@@ -332,8 +332,8 @@} layer { -name: "fc8"+name: "fc9"type: "InnerProduct" bottom: "fc7" -top: "fc8"+top: "fc9"param { lr_mult: 1 @@ -345,5 +345,5 @@} inner_product_param { -num_output: 1000+num_output: 2weight_filler { type: "gaussian" @@ -359,5 +359,5 @@name: "accuracy" type: "Accuracy" -bottom: "fc8"+bottom: "fc9"bottom: "label" top: "accuracy" @@ -367,5 +367,5 @@name: "loss" type: "SoftmaxWithLoss" -bottom: "fc8"+bottom: "fc9"bottom: "label" top: "loss" @@ -375,5 +375,5 @@name: "softmax" type: "Softmax" -bottom: "fc8"+bottom: "fc9"top: "softmax" include { stage: "deploy" } 我已经将所有的改进文件放在 src/alexnet-customized.prototxt 里面。 这一次,我们的准确率由 60% 多先是上升到 87.5%,然后到 96% 一路到 100%,同时损失度也稳步下降。五分钟后,我们的准确率到达了 100%,损失也只有 0.0009。
测试海马图像时以前的网络会出错,现在我们看到完全相反的结果,即使是小孩画的海马,系统也 100% 确定是海马,海豚的情况也一样。
即使你认为可能很困难的图像,如多个海豚挤在一起,并且它们的身体大部分在水下,系统还是能识别。
训练尝试 3:微调 GoogLeNet 像前面我们微调 AlexNet 模型那样,同样我们也能用 GoogLeNet。修改这个网络会有点棘手,因为你已经定义了三层全连接层而不是只有一层。
在这个案例中微调 GoogLeNet,我们需要再次创建一个新的分类模型:我们需要重命名三个全连接分类层的所有 references,即 loss1/classifier、loss2/classifier 和 loss3/classifier,并重新定义结果类别数(num_output: 2)。下面是我们需要将三个分类层重新命名和从 1000 改变输出类别数为 2 的一些代码实现。 @@ -917,10 +917,10 @@exclude { stage: "deploy" } } layer { -name: "loss1/classifier"+name: "loss1a/classifier"type: "InnerProduct" bottom: "loss1/fc" -top: "loss1/classifier"+top: "loss1a/classifier"param { lr_mult: 1 decay_mult: 1 @@ -930,7 +930,7 @@decay_mult: 0 } inner_product_param { -num_output: 1000+num_output: 2weight_filler { type: "xavier" std: 0.0009765625 @@ -945,7 +945,7 @@layer { name: "loss1/loss" type: "SoftmaxWithLoss" -bottom: "loss1/classifier"+bottom: "loss1a/classifier"bottom: "label" top: "loss1/loss" loss_weight: 0.3 @@ -954,7 +954,7 @@layer { name: "loss1/top-1" type: "Accuracy" -bottom: "loss1/classifier"+bottom: "loss1a/classifier"bottom: "label" top: "loss1/accuracy" include { stage: "val" } @@ -962,7 +962,7 @@layer { name: "loss1/top-5" type: "Accuracy" -bottom: "loss1/classifier"+bottom: "loss1a/classifier"bottom: "label" top: "loss1/accuracy-top5" include { stage: "val" } @@ -1705,10 +1705,10 @@exclude { stage: "deploy" } } layer { -name: "loss2/classifier"+name: "loss2a/classifier"type: "InnerProduct" bottom: "loss2/fc" -top: "loss2/classifier"+top: "loss2a/classifier"param { lr_mult: 1 decay_mult: 1 @@ -1718,7 +1718,7 @@decay_mult: 0 } inner_product_param { -num_output: 1000+num_output: 2weight_filler { type: "xavier" std: 0.0009765625 @@ -1733,7 +1733,7 @@layer { name: "loss2/loss" type: "SoftmaxWithLoss" -bottom: "loss2/classifier"+bottom: "loss2a/classifier"bottom: "label" top: "loss2/loss" loss_weight: 0.3 @@ -1742,7 +1742,7 @@layer { name: "loss2/top-1" type: "Accuracy" -bottom: "loss2/classifier"+bottom: "loss2a/classifier"bottom: "label" top: "loss2/accuracy" include { stage: "val" } @@ -1750,7 +1750,7 @@layer { name: "loss2/top-5" type: "Accuracy" -bottom: "loss2/classifier"+bottom: "loss2a/classifier"bottom: "label" top: "loss2/accuracy-top5" include { stage: "val" } @@ -2435,10 +2435,10 @@} } layer { -name: "loss3/classifier"+name: "loss3a/classifier"type: "InnerProduct" bottom: "pool5/7x7_s1" -top: "loss3/classifier"+top: "loss3a/classifier"param { lr_mult: 1 decay_mult: 1 @@ -2448,7 +2448,7 @@decay_mult: 0 } inner_product_param { -num_output: 1000+num_output: 2weight_filler { type: "xavier" } @@ -2461,7 +2461,7 @@layer { name: "loss3/loss" type: "SoftmaxWithLoss" -bottom: "loss3/classifier"+bottom: "loss3a/classifier"bottom: "label" top: "loss" loss_weight: 1 @@ -2470,7 +2470,7 @@layer { name: "loss3/top-1" type: "Accuracy" -bottom: "loss3/classifier"+bottom: "loss3a/classifier"bottom: "label" top: "accuracy" include { stage: "val" } @@ -2478,7 +2478,7 @@layer { name: "loss3/top-5" type: "Accuracy" -bottom: "loss3/classifier"+bottom: "loss3a/classifier"bottom: "label" top: "accuracy-top5" include { stage: "val" } @@ -2489,7 +2489,7 @@layer { name: "softmax" type: "Softmax" -bottom: "loss3/classifier"+bottom: "loss3a/classifier"top: "softmax" include { stage: "deploy" } } 我己经将完整的文件放在 src/googlenet-customized.prototxt 里面。 (责任编辑:本港台直播) |