tiny-cnn执行过程分析(MNIST)
在http://blog.csdn.net/fengbingchun/article/details/50573841中以MNIST為例對tiny-cnn的使用進行了介紹,下面對其執行過程進行分析:
支持兩種損失函數:(1)、mean squared error(均方差);(2)、cross entropy(交叉熵)。在MNIST中使用的是mean squared error,代碼段:
// mean-squared-error loss function for regression
class mse {
public:static float_t f(float_t y, float_t t) {return (y - t) * (y - t) / 2;}static float_t df(float_t y, float_t t) {return y - t;}
};
支持六種激活函數:(1)、tanh;(2)、sigmoid;(3)、softmax;(4)、rectifiedlinear(relu);(5)、leaky relu;(6)、identity。MNIST中使用的是tanh,代碼段:
class tan_h : public function {
public:float_t f(const vec_t& v, size_t i) const override {const float_t ep = std::exp(v[i]);const float_t em = std::exp(-v[i]); return (ep - em) / (ep + em);}// fast approximation of tanh (improve 2-3% speed in LeNet-5)/*float_t f(float_t x) const {const float_t x2 = x * x;x *= 1.0 + x2 * (0.1653 + x2 * 0.0097);return x / std::sqrt(1.0 + x * x);// invsqrt(static_cast<float>(1.0 + x * x));}*/float_t df(float_t y) const override { return 1.0 - sqr(y); }std::pair<float_t, float_t> scale() const override { return std::make_pair(-0.8, 0.8); }
設計CNN結構,用于MNIST,與LeNet-5結構相似,去除了F6層:
輸入層Input:圖像大小32*32,神經元數量32*32=1024,代碼段:
const int width = header.num_cols + 2 * x_padding;const int height = header.num_rows + 2 * y_padding;std::vector<uint8_t> image_vec(header.num_rows * header.num_cols);ifs.read((char*) &image_vec[0], header.num_rows * header.num_cols);dst.resize(width * height, scale_min);for (size_t y = 0; y < header.num_rows; y++)for (size_t x = 0; x < header.num_cols; x++)dst[width * (y + y_padding) + x + x_padding]= (image_vec[y * header.num_cols + x] / 255.0) * (scale_max - scale_min) + scale_min;
C1層:卷積窗大小5*5,輸出特征圖數量6,卷積窗種類6,輸出特征圖大小28*28,可訓練參數5*5*6+6=156,神經元數量28*28*6=4704;
???????? S2層:卷積窗大小2*2,輸出下采樣圖數量6,卷積窗種類6,輸出下采樣圖大小14*14,可訓練參數1*6+6=12,神經元數量14*14*6=1176;
???????? C3層:卷積窗大小5*5,輸出特征圖數量16,卷積窗種類16,輸出特征圖大小10*10,可訓練參數6*16*5*5+16=2416,神經元數量10*10*16=1600;
???????? S4層:卷積窗大小2*2,輸出下采樣圖數量16,卷積窗種類16,輸出下采樣圖大小5*5,可訓練參數1*16+16=32,神經元數量5*5*16=400;
???????? C5層:卷積窗大小5*5,輸出特征圖數量120,卷積窗種類120,輸出特征圖大小1*1,可訓練參數5*5*16*120+120=48120,神經元數量1*120=120;
輸出層Output:輸出特征圖數量10,卷積窗種類10,輸出特征圖大小1*1,可訓練參數120*10+10=1210,神經元數量1*10=10。
原有MNIST圖像大小為28*28,此處為32*32,上下左右各填補2個像素,填補的像素取值為-1,其它像素取值范圍為[-1,1]。
權值和閾值(偏置)初始化:權值采用均勻隨機數產生,閾值均賦0。
C1層權值,初始化范圍[sqrt(6.0/(25+150)), sqrt(6.0/(25+150))];
S2層權值,初始化范圍[sqrt(6.0/(4+1)), - sqrt(6.0/(4+1))];
C3層權值,初始化范圍[sqrt(6.0/(150+400)), - sqrt(6.0/(150+400))];
S4層權值,初始化范圍[sqrt(6.0/(4+1)), - sqrt(6.0/(4+1))];
C5層權值,初始化范圍[sqrt(6.0/(400+3000)), - sqrt(6.0/(400+3000))];
輸出層權值,初始化范圍[sqrt(6.0/(120+10)),??-sqrt(6.0/(120+10))]。
前向傳播:
C1層代碼段:
vec_t &a = a_[worker_index]; // w*xvec_t &out = output_[worker_index]; // outputconst vec_t &in = *(prev_out_padded_[worker_index]); // inputstd::fill(a.begin(), a.end(), (float_t)0.0);for_i(parallelize_, out_.depth_, [&](int o) {for (layer_size_t inc = 0; inc < in_.depth_; inc++) {if (!tbl_.is_connected(o, inc)) continue;const float_t *pw = &this->W_[weight_.get_index(0, 0, in_.depth_ * o + inc)];const float_t *pi = &in[in_padded_.get_index(0, 0, inc)];float_t *pa = &a[out_.get_index(0, 0, o)];for (layer_size_t y = 0; y < out_.height_; y++) {for (layer_size_t x = 0; x < out_.width_; x++) {const float_t * ppw = pw;const float_t * ppi = pi + (y * h_stride_) * in_padded_.width_ + x * w_stride_;float_t sum = (float_t)0.0;// should be optimized for small kernel(3x3,5x5)for (layer_size_t wy = 0; wy < weight_.height_; wy++) {for (layer_size_t wx = 0; wx < weight_.width_; wx++) {sum += *ppw++ * ppi[wy * in_padded_.width_ + wx];}}pa[y * out_.width_ + x] += sum;}}}if (!this->b_.empty()) {float_t *pa = &a[out_.get_index(0, 0, o)];float_t b = this->b_[o];std::for_each(pa, pa + out_.width_ * out_.height_, [&](float_t& f) { f += b; });}});for_i(parallelize_, out_size_, [&](int i) {out[i] = h_.f(a, i);});
S2層代碼段:
vec_t& a = a_[index];for_i(parallelize_, out_size_, [&](int i) {const wi_connections& connections = out2wi_[i];a[i] = 0.0;for (auto connection : connections)// 13.1%a[i] += W_[connection.first] * in[connection.second]; // 3.2%a[i] *= scale_factor_;a[i] += b_[out2bias_[i]];});for_i(parallelize_, out_size_, [&](int i) {output_[index][i] = h_.f(a, i);});
C3層、C5層代碼段與C1層相同。
S4層代碼段與S2層相同。
輸出層代碼段:
vec_t &a = a_[index];vec_t &out = output_[index];for_i(parallelize_, out_size_, [&](int i) {a[i] = 0.0;for (layer_size_t c = 0; c < in_size_; c++) {a[i] += W_[c*out_size_ + i] * in[c];}if (has_bias_)a[i] += b_[i];});for_i(parallelize_, out_size_, [&](int i) {out[i] = h_.f(a, i);});
反向傳播:
輸出層代碼段:
vec_t delta(out_dim());const activation::function& h = layers_.tail()->activation_function();if (is_canonical_link(h)) {for_i(out_dim(), [&](int i){ delta[i] = out[i] - t[i]; });} else {vec_t dE_dy = gradient<E>(out, t);// delta = dE/da = (dE/dy) * (dy/da)for (size_t i = 0; i < out_dim(); i++) {vec_t dy_da = h.df(out, i);delta[i] = vectorize::dot(&dE_dy[0], &dy_da[0], out_dim());}}
C5層代碼段:
const vec_t& prev_out = prev_->output(index);const activation::function& prev_h = prev_->activation_function();vec_t& prev_delta = prev_delta_[index];vec_t& dW = dW_[index];vec_t& db = db_[index];for (layer_size_t c = 0; c < this->in_size_; c++) {// propagate delta to previous layer// prev_delta[c] += current_delta[r] * W_[c * out_size_ + r]prev_delta[c] = vectorize::dot(&curr_delta[0], &W_[c*out_size_], out_size_);prev_delta[c] *= prev_h.df(prev_out[c]);}for_(parallelize_, 0, (size_t)out_size_, [&](const blocked_range& r) {// accumulate weight-step using delta// dW[c * out_size + i] += current_delta[i] * prev_out[c]for (layer_size_t c = 0; c < in_size_; c++)vectorize::muladd(&curr_delta[r.begin()], prev_out[c], r.end() - r.begin(), &dW[c*out_size_ + r.begin()]);if (has_bias_) {for (int i = r.begin(); i < r.end(); i++)db[i] += curr_delta[i];}});
S4層代碼段:
const vec_t& prev_out = *(prev_out_padded_[index]);const activation::function& prev_h = prev_->activation_function();vec_t* prev_delta = (pad_type_ == padding::same) ? &prev_delta_padded_[index] : &prev_delta_[index];vec_t& dW = dW_[index];vec_t& db = db_[index];std::fill(prev_delta->begin(), prev_delta->end(), (float_t)0.0);// propagate delta to previous layerfor_i(in_.depth_, [&](int inc) {for (layer_size_t outc = 0; outc < out_.depth_; outc++) {if (!tbl_.is_connected(outc, inc)) continue;const float_t *pw = &this->W_[weight_.get_index(0, 0, in_.depth_ * outc + inc)];const float_t *pdelta_src = &curr_delta[out_.get_index(0, 0, outc)];float_t *pdelta_dst = &(*prev_delta)[in_padded_.get_index(0, 0, inc)];for (layer_size_t y = 0; y < out_.height_; y++) {for (layer_size_t x = 0; x < out_.width_; x++) {const float_t * ppw = pw;const float_t ppdelta_src = pdelta_src[y * out_.width_ + x];float_t * ppdelta_dst = pdelta_dst + y * h_stride_ * in_padded_.width_ + x * w_stride_;for (layer_size_t wy = 0; wy < weight_.height_; wy++) {for (layer_size_t wx = 0; wx < weight_.width_; wx++) {ppdelta_dst[wy * in_padded_.width_ + wx] += *ppw++ * ppdelta_src;}}}}}});for_i(parallelize_, in_padded_.size(), [&](int i) {(*prev_delta)[i] *= prev_h.df(prev_out[i]);});// accumulate dwfor_i(in_.depth_, [&](int inc) {for (layer_size_t outc = 0; outc < out_.depth_; outc++) {if (!tbl_.is_connected(outc, inc)) continue;for (layer_size_t wy = 0; wy < weight_.height_; wy++) {for (layer_size_t wx = 0; wx < weight_.width_; wx++) {float_t dst = 0.0;const float_t * prevo = &prev_out[in_padded_.get_index(wx, wy, inc)];const float_t * delta = &curr_delta[out_.get_index(0, 0, outc)];for (layer_size_t y = 0; y < out_.height_; y++) {dst += vectorize::dot(prevo + y * in_padded_.width_, delta + y * out_.width_, out_.width_);}dW[weight_.get_index(wx, wy, in_.depth_ * outc + inc)] += dst;}}}});// accumulate dbif (!db.empty()) {for (layer_size_t outc = 0; outc < out_.depth_; outc++) {const float_t *delta = &curr_delta[out_.get_index(0, 0, outc)];db[outc] += std::accumulate(delta, delta + out_.width_ * out_.height_, (float_t)0.0);}}
C3層代碼段:
const vec_t& prev_out = prev_->output(index);const activation::function& prev_h = prev_->activation_function();vec_t& prev_delta = prev_delta_[index];for_(parallelize_, 0, (size_t)in_size_, [&](const blocked_range& r) {for (int i = r.begin(); i != r.end(); i++) {const wo_connections& connections = in2wo_[i];float_t delta = 0.0;for (auto connection : connections) delta += W_[connection.first] * current_delta[connection.second]; // 40.6%prev_delta[i] = delta * scale_factor_ * prev_h.df(prev_out[i]); // 2.1%}});for_(parallelize_, 0, weight2io_.size(), [&](const blocked_range& r) {for (int i = r.begin(); i < r.end(); i++) {const io_connections& connections = weight2io_[i];float_t diff = 0.0;for (auto connection : connections) // 11.9%diff += prev_out[connection.first] * current_delta[connection.second];dW_[index][i] += diff * scale_factor_;}});for (size_t i = 0; i < bias2out_.size(); i++) {const std::vector<layer_size_t>& outs = bias2out_[i];float_t diff = 0.0;for (auto o : outs)diff += current_delta[o]; db_[index][i] += diff;}
S2層、輸入層代碼段與S4層相同。
C1層代碼段與C3層相同。
權值和偏置更新代碼段:
void update(const vec_t& dW, const vec_t& /*Hessian*/, vec_t &W) {vec_t& g = get<0>(W);for_i(W.size(), [&](int i) {g[i] += dW[i] * dW[i];W[i] -= alpha * dW[i] / (std::sqrt(g[i]) + eps);});}
對MNIST中的60000個訓練樣本,依次執行上面的操作,并更新權值和偏置。
每此循環執行完60000個訓練樣本,會對10000個測試樣本,進行測試,獲得識別率。
共迭代30次,然后將最終的權值、偏置等相關參數保持到指定的文件中。
總結
以上是生活随笔為你收集整理的tiny-cnn执行过程分析(MNIST)的全部內容,希望文章能夠幫你解決所遇到的問題。
- 上一篇: tiny-cnn开源库的使用(MNIST
- 下一篇: VLFeat开源库介绍及在VS2013中