文章来自:http://itnoteshare.com

作者联系方式:a345566462@163.com

      

 人工神经网络(Artificial Neural Network,即ANN ),是20世纪80 年代以来人工智能领域兴起的研究热点。它从信息处理角度对人脑神经元网络进行抽象, 建立某种简单模型,按不同的连接方式组成不同的网络。在工程与学术界也常直接简称为神经网络或类神经网络。神经网络是一种运算模型,由大量的节点(或称神经元)之间相互联接构成。每个节点代表一种特定的输出函数,称为激励函数(activation function)。每两个节点间的连接都代表一个对于通过该连接信号的加权值,称之为权重,这相当于人工神经网络的记忆。网络的输出则依网络的连接方式,权重值和激励函数的不同而不同。而网络自身通常都是对自然界某种算法或者函数的逼近,也可能是对一种逻辑策略的表达。




以下代码是NN 神经网络前向传播的实现。 其中的权重参数,偏置值,都是通过 BPNN 反向传播的训练结果得到的。本文不涉及反向传播。

某个神经元的值 =  tanh(权重 * 输入 + 偏置值)

输出值经过 softmax 函数得到 概率分布的预测值。


package my.wxm

import scala.io.StdIn

/**
  * JaanLuis-NNTest-2017/4/12
  *
  * 神经网络前向传播, 判断 x1 * x2 < 0.5 ,
  * 由于模型的训练数据 范围 [0,1],所以[0,1]的效果较好。
  * 99990.1 这样的判断往往是错误的
  *
  * 模型是通过 tensorflow 训练的
  *
  */
object NNTest {

  def main(args: Array[String]) {

    //    x1 * x2 < 0.5
    println(ann(Array(0.5, 0.1)).toList)
    println(ann(Array(0.6, 0.9)).toList)
    println(ann(Array(0.5, 0.6)).toList)
    println(ann(Array(0.9, 0.9)).toList)
    println(ann(Array(6, 0.1)).toList)

    val checkSize = 100000

    val error = (1 to checkSize).map {
      i =>
        val x: Double = math.random
        val y: Double = math.random
        ann(Array(x, y)).toList ->(x, y)
    }.filterNot {
      case (List(label1, label2), (x, y)) =>
        (x * y < 0.5) == (label1 > label2)
    }

    println(s"验证条数:${checkSize}条,错误:${error.length}条,错误率:${error.length * 100.0 / checkSize}%")

    while (true) {
      println("请输入两个[0,1]的数字(如0.1,0.2:")
      val Array(x1, x2) = StdIn.readLine().split(",").map(_.trim.toDouble)
      val ann1 = ann(Array(x1, x2))
      println(if (ann1(0) > ann1(1)) s"$x1 * $x2 < 0.5 ${x1 * x2 < 0.5}" else s"$x1 * $x2 >= 0.5 ${x1 * x2 >= 0.5}")
    }

  }

  /**
    * softmax 把数组内元素变成概率分布,总和为1
    *
    * @param data 一行数据
    * @return
    */
  def softmax(data: Array[Double]) = {
    val sum: Double = data.map(math.exp).sum
    data.map(d => math.exp(d) / sum)
  }

  /**
    * 创建矩阵,张量
    *
    * @param row  行数
    * @param col  列数
    * @param data 数据
    * @return
    */
  def splitMatrix(row: Int, col: Int, data: Array[Double]) = {
    if (row * col % data.length != 0) throw new IllegalArgumentException(s"row * col % data.length != 0 , $row * $col % ${data.length} !=0")

    (for (r <- 0 until row) yield data.slice(r * col, r * col + col)).toArray
  }

  /**
    * 张量积
    *
    * @param ws    权重张量
    * @param input 输入层神经元
    * @param bias  偏置值
    * @return
    */
  def tensor_multiplication(ws: Array[Array[Double]], input: Array[Double], bias: Array[Double], activeFun: Double => Double) = {

    if (ws.length != input.length) throw new IllegalArgumentException(s"ws.length != input.length , ${ws.length} != ${input.length}")
    if (ws.head.length != bias.length) throw new IllegalArgumentException(s"ws.col.length != bias.length , ${ws.head.length} != ${bias.length}")

    ws.zipWithIndex
      .map { case (row, row_index) => row.zipWithIndex.map { case (w, index) => w * input(row_index) -> index } }
      .flatMap(x => x).groupBy(_._2).toArray.map { case (index, arr) => activeFun(arr.map(_._1).sum + bias(index)) -> index }
      .sortWith(_._2 < _._2).map(_._1)

  }

  /**
    * 前向传播,out = softmax(tanh(tanh(input * w1 + bias1) * w2 + bias2))
    *
    * @param inputX 输入层神经元
    * @return
    */
  def ann(inputX: Array[Double]) = {

    // 第一层权重
    val w1 = splitMatrix(2, 8, Array(
      -2.17348814, 3.60451365, -1.76843905, -3.06683207, -1.76570141, 3.14945459, -0.8447625, -1.93399012,
      -0.18026765, 2.77430797, 4.39151907, -0.67225909, 3.94808292, 2.84200525, -0.62446582, -1.83884466
    ))

    // 第二层权重
    val w2 = splitMatrix(8, 2, Array(
      6.05634356, -5.06061459,
      1.02467227, -3.63762474,
      -5.57961321, 6.10432768,
      10.91280651, -11.34722996,
      -4.82213926, 4.05005932,
      1.6171788, -1.45705271,
      3.97176361, -1.85379755,
      20.94778061, -21.47359657
    ))

    // 第一层偏置项
    val bias1 = Array(1.3842231, 3.93543506, -0.9849897, 2.38518882, -1.0282445, 3.85991597, 1.50484979, 2.84501219)
    // 第二层偏置项
    val bias2 = Array(2.00155926, -0.21568353)

    // 计算第一个隐藏层 tanh(w1 * inputX + bias1)
    val one = tensor_multiplication(w1, inputX, bias1, math.tanh)
    // 计算输出层  tanh(w2 * one + bias2)
    val out = tensor_multiplication(w2, one, bias2, math.tanh)

    //返回输出层
    softmax(out)
  }
}
以下代码是通过  Relu激活函数,通过百万次训练得出的模型,正确率相对更高
package my.wxm

import scala.io.StdIn

/**
  * JaanLuis-NNTest-2017/4/12
  *
  * 神经网络前向传播, 判断 x1 * x2 < 0.5 ,
  * 由于模型的训练数据 范围 [0,1],所以[0,1]的效果较好。
  * 99990.1 这样的判断往往是错误的
  *
  * 随机数的错误率 0.034% 左右
  * 模型是通过 tensorflow 训练的
  *
  */
object NNTest2 {

  def main(args: Array[String]) {

    //    x1 * x2 < 0.5
    println(ann(Array(0.5, 0.1)).toList)
    println(ann(Array(0.6, 0.9)).toList)
    println(ann(Array(0.5, 0.6)).toList)
    println(ann(Array(0.9, 0.9)).toList)
    println(ann(Array(6, 0.1)).toList)

    val checkSize = 1000000

    val error = (1 to checkSize).par.map {
      i =>
        val x: Double = math.random
        val y: Double = math.random
        ann(Array(x, y)).toList ->(x, y)
    }.filterNot {
      case (List(label1, label2), (x, y)) =>
        (x * y < 0.5) == (label1 > label2)
    }

    println(s"验证条数:${checkSize }条,错误:${error.length }条,错误率:${error.length * 100.0 / checkSize }%")

    while (true) {
      println("请输入两个[0,1]的数字(如0.1,0.2:")
      val Array(x1, x2) = StdIn.readLine().split(",").map(_.trim.toDouble)
      val ann1 = ann(Array(x1, x2))
      println(if (ann1(0) > ann1(1)) s"$x1 * $x2 < 0.5 ${x1 * x2 < 0.5 }" else s"$x1 * $x2 >= 0.5 ${x1 * x2 >= 0.5 }")
    }

  }

  /**
    * 激活函数 relu
    *
    * @param double value
    * @return
    */
  def relu(double: Double) = math.max(0, double)

  /**
    * 激活函数 tanh
    *
    * @param double value
    * @return
    */
  def tanh(double: Double) = math.tanh(double)

  /**
    * softmax 把数组内元素变成概率分布,总和为1
    *
    * @param data 一行数据
    * @return
    */
  def softmax(data: Array[Double]) = {
    val sum: Double = data.map(math.exp).sum
    //无穷大会出现  NaN 的情况
    data.map(d => math.exp(d) / sum).map(d => if (d.isNaN) 1 else d)

  }

  /**
    * 创建矩阵,张量
    *
    * @param row  行数
    * @param col  列数
    * @param data 数据
    * @return
    */
  def splitMatrix(row: Int, col: Int, data: Array[Double]) = {
    if (row * col % data.length != 0) throw new IllegalArgumentException(s"row * col % data.length != 0 , $row * $col % ${data.length } !=0")

    (for (r <- 0 until row) yield data.slice(r * col, r * col + col)).toArray
  }

  /**
    * 张量积
    *
    * @param ws    权重张量
    * @param input 输入层神经元
    * @param bias  偏置值
    * @return
    */
  def tensor_multiplication(ws: Array[Array[Double]], input: Array[Double], bias: Array[Double], activeFun: Double => Double) = {

    if (ws.length != input.length) throw new IllegalArgumentException(s"ws.length != input.length , ${ws.length } != ${input.length }")
    if (ws.head.length != bias.length) throw new IllegalArgumentException(s"ws.col.length != bias.length , ${ws.head.length } != ${bias.length }")

    ws.zipWithIndex
      .map { case (row, row_index) => row.zipWithIndex.map { case (w, index) => w * input(row_index) -> index } }
      .flatMap(x => x).groupBy(_._2).toArray.map { case (index, arr) => activeFun(arr.map(_._1).sum + bias(index)) -> index }
      .sortWith(_._2 < _._2).map(_._1)

  }

  /**
    * 前向传播,out = softmax(tanh(tanh(input * w1 + bias1) * w2 + bias2))
    *
    * @param inputX 输入层神经元
    * @return
    */
  def ann(inputX: Array[Double]) = {

    /*
    w1 = [
     [  3.75066948,-0.46267065,5.92146683,-10.25980854,0.49942797,-0.16474821,-6.45229769,-8.39618778,-4.96623468,5.49899101]
     [  5.98210001,-7.75265026,8.36706543,1.58930385,0.41676584,-9.64230633,-1.81736887,-0.7795254,-5.03588247,7.90664864]]
    w2 = [
     [ -4.00860882,  4.84761477,]
     [ 12.67997456,-14.79246712,]
     [ -6.19306612,  7.07722855,]
     [ 13.8338213 ,-15.40096951,]
     [ -0.33767474, -0.22693524,]
     [ 18.07026482,-17.85834503,]
     [ 16.05621338,-13.4217205 ,]
     [ 15.7602253 ,-17.68614006,]
     [ 60.52432251,-59.49435043,]
     [ -5.83263302,  7.30717134 ]]
    bias1 = [-4.38971329,7.06747103,-4.04683256,6.38259506,-0.7418949,7.64808607,6.89628839,7.27732229,7.47029543,-3.75374246]
    bias2 = [ 3.56913185, -1.33394516]
    */

    // 第一层权重
    val w1 = splitMatrix(2, 10, Array(
      3.75066948, -0.46267065, 5.92146683, -10.25980854, 0.49942797, -0.16474821, -6.45229769, -8.39618778, -4.96623468, 5.49899101,
      5.98210001, -7.75265026, 8.36706543, 1.58930385, 0.41676584, -9.64230633, -1.81736887, -0.7795254, -5.03588247, 7.90664864
    ))

    // 第二层权重
    val w2 = splitMatrix(10, 2, Array(
      -4.00860882, 4.84761477,
      12.67997456, -14.79246712,
      -6.19306612, 7.07722855,
      13.8338213, -15.40096951,
      -0.33767474, -0.22693524,
      18.07026482, -17.85834503,
      16.05621338, -13.4217205,
      15.7602253, -17.68614006,
      60.52432251, -59.49435043,
      -5.83263302, 7.30717134
    ))

    // 第一层偏置项
    val bias1 = Array(-4.38971329, 7.06747103, -4.04683256, 6.38259506, -0.7418949, 7.64808607, 6.89628839, 7.27732229, 7.47029543, -3.75374246)
    // 第二层偏置项
    val bias2 = Array(3.56913185, -1.33394516)

    // 计算第一个隐藏层 tanh(w1 * inputX + bias1)
    val one = tensor_multiplication(w1, inputX, bias1, relu)
    // 计算输出层  tanh(w2 * one + bias2)
    val out = tensor_multiplication(w2, one, bias2, relu)

    //返回输出层
    softmax(out)
  }
}

文章来自:http://itnoteshare.com

作者联系方式:a345566462@163.com

浏览 687 评论 0 赞 0 砸 0 标签: Scala
评论
还可以再输入500个字

请您注意

·自觉遵守:爱国、守法、自律、真实、文明的原则
·尊重网上道德,遵守《全国人大常委会关于维护互联网安全的决定》及中华人民共和国其他各项有关法律法规
·严禁发表危害国家安全,破坏民族团结、国家宗教政策和社会稳定,含侮辱、诽谤、教唆、淫秽等内容的作品
·承担一切因您的行为而直接或间接导致的民事或刑事法律责任
·您在NoteShare上发表的作品,NoteShare有权在网站内保留、转载、引用或者删除
·参与本评论即表明您已经阅读并接受上述条款