Изменения
→Классификация при помощи RNN
val mlpModel = mlp(x, y, Array(2, 10, 2), ErrorFunction.LEAST_MEAN_SQUARES, ActivationFunction.LOGISTIC_SIGMOID)
plot(x, y, mlpModel)
===Классификация при помощи RNNРекуррентная нейронная сеть===Пример кода, с использованием билиотеки DeepLearning.scala // Задание слоёв def tanh(x: INDArrayLayer): INDArrayLayer = { val exp_x = hyperparameters.exp(x) val exp_nx = hyperparameters.exp(-x) (exp_x - exp_nx) / (exp_x + exp_nx) } def charRNN(x: INDArray, y: INDArray, hprev: INDArrayLayer): (DoubleLayer, INDArrayLayer, INDArrayLayer) = { val hnext = tanh(wxh.dot(x) + whh.dot(hprev) + bh) val yraw = why.dot(hnext) + by val yraw_exp = hyperparameters.exp(yraw) val prob = yraw_exp / yraw_exp.sum val loss = -hyperparameters.log((prob * y).sum) (loss, prob, hnext) } // Определение структуры val batches = data.zip(data.tail).grouped(seqLength).toVector type WithHiddenLayer[A] = (A, INDArrayLayer) type Batch = IndexedSeq[(Char, Char)] type Losses = Vector[Double] def singleBatch(batch: WithHiddenLayer[Batch]): WithHiddenLayer[DoubleLayer] = { batch match { case (batchseq, hprev) => batchseq.foldLeft((DoubleLayer(0.0.forward), hprev)) { (bstate: WithHiddenLayer[DoubleLayer], xy: (Char, Char)) => (bstate, xy) match { case ((tot, localhprev), (x, y)) => { charRNN(oneOfK(x), oneOfK(y), localhprev) match { case (localloss, _, localhnext) => { (tot + localloss, localhnext) } } } } } } } // Определение одного шага обучения def initH = INDArrayLayer(Nd4j.zeros(hiddenSize, 1).forward) def singleRound(initprevloss: Losses): Future[Losses] = (batches.foldLeftM((initprevloss, initH)) { (bstate: WithHiddenLayer[Losses], batch: Batch) => bstate match { case (prevloss, hprev) => singleBatch(batch, hprev) match { case (bloss, hnext) => bloss.train.map { (blossval: Double) => { val nloss = prevloss.last * 0.999 + blossval * 0.001 val loss_seq = prevloss :+ prevloss.last * 0.999 + blossval * 0.001 (loss_seq, hnext) } } } } }).map { (fstate: WithHiddenLayer[Losses]) => fstate match { case (floss, _) => floss } } def allRounds: Future[Losses] = (0 until 2048).foldLeftM(Vector(-math.log(1.0 / vocabSize) * seqLength)) { (ploss: Losses, round: Int) => { singleRound(ploss) } } // Обучение сети def unsafePerformFuture[A](f: Future[A]): A = Await.result(f.toScalaFuture, Duration.Inf) val losses = unsafePerformFuture(allRounds)
== Примечания ==
<references/>