TinyMind人民币编码识别挑战赛-热身赛分享

腾古 2019-05-31 17:16
关注文章

任务内容:

热身赛—— ⼈⺠币⾯值识别:

目的:要求参赛者根据训练集(train_data.zip)图片训练模型,识别人民币面值,并按要求提交验证集(public_test_data)训练结果进行验证。

数据:train_data.zip:此数据集用于训练模型,图片为第四套人民币中的某一张。共包含39620张图片。

public_test_data.zip:共有20000张图片,将训练好的模型得出的面额识别结果。

方案: 首先明确这是一个图像分类问题,自然而然地就会想到经典的图像分类网络,比如结构较为简单的Alexnet,稍微复杂的VGGnet-16和VGGnet-19,以及更为复杂的Resnet和Googlenet等等。

明确可以用到的模型后,再来看任务内容,识别人民币面值,从提供的数据集可以看到,其实肉眼是很容易区分开的,说明分类任务其实没那么复杂,经过卷积神经网络提取特征后,类间差异是比较大的,所以可以用浅层的Alexnet网络或者VGG就可以解决问题。

我最后采用了VGG-16,利用迁移学习,加载了VGG-16在ImageNet的网络参数,最后重新训练全连接层参数,大概50轮就收敛为1了,最后测试集上效果也达到了100%。本人没试过Alexnet,感觉应该也可以达到满分的水平。具体的代码如果有人感兴趣,我再贴上好了,其实经典网络复现的代码也挺多的,大家可以参考下。

代码放在了gayhub上,有兴趣同学可以下载训练(方便给个star~):https://github.com/lwten/VGG,主要代码如下所示:

网络定义:vgg16_trainable.py

import tensorflow as tf
import numpy as np
from functools import reduce

VGG_MEAN = [103.939, 116.779, 123.68]


class Vgg16:

    def __init__(self, vgg16_npy_path=None, trainable=True, dropout=0.5):
        if vgg16_npy_path is not None:
            self.data_dict = np.load(vgg16_npy_path, encoding='latin1').item()
        else:
            self.data_dict = None

        self.var_dict = {}
        self.trainable = trainable
        self.dropout = dropout

    def build(self, rgb, train_mode=None):
        """
        load variable from npy to build the VGG

        :param rgb: rgb image [batch, height, width, 3] values scaled [0, 1]
        :param train_mode: a bool tensor, usually a placeholder: if True, dropout will be turned on
        """

        rgb_scaled = rgb * 255.0

        # Convert RGB to BGR
        red, green, blue = tf.split(axis=3, num_or_size_splits=3, value=rgb_scaled)
        assert red.get_shape().as_list()[1:] == [250, 500, 1]
        assert green.get_shape().as_list()[1:] == [250, 500, 1]
        assert blue.get_shape().as_list()[1:] == [250, 500, 1]
        bgr = tf.concat(axis=3, values=[
            blue - VGG_MEAN[0],
            green - VGG_MEAN[1],
            red - VGG_MEAN[2],
        ])
        assert bgr.get_shape().as_list()[1:] == [250, 500, 3]

        self.conv1_1 = self.conv_layer(bgr, 3, 64, "conv1_1")
        self.conv1_2 = self.conv_layer(self.conv1_1, 64, 64, "conv1_2")
        self.pool1 = self.max_pool(self.conv1_2, 'pool1')

        self.conv2_1 = self.conv_layer(self.pool1, 64, 128, "conv2_1")
        self.conv2_2 = self.conv_layer(self.conv2_1, 128, 128, "conv2_2")
        self.pool2 = self.max_pool(self.conv2_2, 'pool2')

        self.conv3_1 = self.conv_layer(self.pool2, 128, 256, "conv3_1")
        self.conv3_2 = self.conv_layer(self.conv3_1, 256, 256, "conv3_2")
        self.conv3_3 = self.conv_layer(self.conv3_2, 256, 256, "conv3_3")
        # self.conv3_4 = self.conv_layer(self.conv3_3, 256, 256, "conv3_4")
        self.pool3 = self.max_pool(self.conv3_3, 'pool3')

        self.conv4_1 = self.conv_layer(self.pool3, 256, 512, "conv4_1")
        self.conv4_2 = self.conv_layer(self.conv4_1, 512, 512, "conv4_2")
        self.conv4_3 = self.conv_layer(self.conv4_2, 512, 512, "conv4_3")
        # self.conv4_4 = self.conv_layer(self.conv4_3, 512, 512, "conv4_4")
        self.pool4 = self.max_pool(self.conv4_3, 'pool4')

        self.conv5_1 = self.conv_layer(self.pool4, 512, 512, "conv5_1")
        self.conv5_2 = self.conv_layer(self.conv5_1, 512, 512, "conv5_2")
        self.conv5_3 = self.conv_layer(self.conv5_2, 512, 512, "conv5_3")
        # self.conv5_4 = self.conv_layer(self.conv5_3, 512, 512, "conv5_4")
        self.pool5 = self.max_pool(self.conv5_3, 'pool5')
        print("__________________")
        shape = self.pool5.get_shape().as_list()
        print(shape[-1], shape[-2], shape[-3], (shape[-1] * shape[-2] * shape[-3]))

        self.fc6 = self.fc_layer(self.pool5, (shape[-1] * shape[-2] * shape[-3]), 4096, "fc6")  # 25088 = ((224 // (2 ** 5)) ** 2) * 512
        self.relu6 = tf.nn.relu(self.fc6)
        if train_mode is not None:
            self.relu6 = tf.cond(train_mode, lambda: tf.nn.dropout(self.relu6, self.dropout), lambda: self.relu6)
        elif self.trainable:
            self.relu6 = tf.nn.dropout(self.relu6, self.dropout)

        self.fc7 = self.fc_layer(self.relu6, 4096, 4096, "fc7")
        self.relu7 = tf.nn.relu(self.fc7)
        if train_mode is not None:
            self.relu7 = tf.cond(train_mode, lambda: tf.nn.dropout(self.relu7, self.dropout), lambda: self.relu7)
        elif self.trainable:
            self.relu7 = tf.nn.dropout(self.relu7, self.dropout)

        self.fc8 = self.fc_layer(self.relu7, 4096, 9, "fc8")

        self.prob = tf.nn.softmax(self.fc8, name="prob")
        self.data_dict = None

    def avg_pool(self, bottom, name):
        return tf.nn.avg_pool(bottom, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME', name=name)

    def max_pool(self, bottom, name):
        return tf.nn.max_pool(bottom, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME', name=name)

    def conv_layer(self, bottom, in_channels, out_channels, name):
        with tf.variable_scope(name):
            filt, conv_biases = self.get_conv_var(3, in_channels, out_channels, name)

            conv = tf.nn.conv2d(bottom, filt, [1, 1, 1, 1], padding='SAME')
            bias = tf.nn.bias_add(conv, conv_biases)
            relu = tf.nn.relu(bias)

            return relu

    def fc_layer(self, bottom, in_size, out_size, name):
        with tf.variable_scope(name):
            weights, biases = self.get_fc_var(in_size, out_size, name)

            x = tf.reshape(bottom, [-1, in_size])
            fc = tf.nn.bias_add(tf.matmul(x, weights), biases)

            return fc

    def get_conv_var(self, filter_size, in_channels, out_channels, name):
        initial_value = tf.truncated_normal([filter_size, filter_size, in_channels, out_channels], 0.0, 0.001)
        filters = self.get_var(initial_value, name, 0, name + "_filters")

        initial_value = tf.truncated_normal([out_channels], .0, .001)
        biases = self.get_var(initial_value, name, 1, name + "_biases")

        return filters, biases

    def get_fc_var(self, in_size, out_size, name):
        initial_value = tf.truncated_normal([in_size, out_size], 0.0, 0.001)
        weights = self.get_var(initial_value, name, 0, name + "_weights", True)

        initial_value = tf.truncated_normal([out_size], .0, .001)
        biases = self.get_var(initial_value, name, 1, name + "_biases", True)

        return weights, biases

    def get_var(self, initial_value, name, idx, var_name, fc=False):
        if self.data_dict is not None and name in self.data_dict and not fc:
            value = self.data_dict[name][idx]
        else:
            value = initial_value

        if self.trainable:
            var = tf.Variable(value, name=var_name)
        else:
            var = tf.constant(value, dtype=tf.float32, name=var_name)

        self.var_dict[(name, idx)] = var

        # print var_name, var.get_shape().as_list()
        assert var.get_shape() == initial_value.get_shape()

        return var

    def get_var_count(self):
        count = 0
        for v in list(self.var_dict.values()):
            count += reduce(lambda x, y: x * y, v.get_shape().as_list())
        return count

训练代码:train_VGG16.py

import tensorflow as tf
import vgg16_trainable as vgg16
import utils
import os

os.environ['CUDA_VISIBLE_DEVICES']='2'
with tf.Session() as sess:

    input = tf.placeholder(tf.float32, [None, 250, 500, 3])
    label = tf.placeholder(tf.int32, [None, ])
    train_mode = tf.placeholder(tf.bool)

    vgg = vgg16.Vgg16('./vgg16.npy')
    vgg.build(input, train_mode)

    # print number of variables used:
    sess.run(tf.global_variables_initializer())


    loss = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(logits=vgg.prob, labels=label))
    optimizer = tf.train.GradientDescentOptimizer(0.0002).minimize(loss)

    correct_prediction = tf.equal(tf.cast(label, tf.int32), tf.cast(tf.argmax(vgg.prob, 1), tf.int32))
    accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
    # 创建saver
    saver = tf.train.Saver()
    for i in range(50):
        batch_images, batch_labels = utils.get_batch_data('./train_data', "./train_face_value_label.csv")
        train_dict = {input: batch_images, label: batch_labels, train_mode: True}
        sess.run(optimizer, feed_dict=train_dict)

        loss_, acc_ = sess.run([loss, accuracy], feed_dict=train_dict)

        train_text = 'step: {}, loss: {}, acc: {}'.format(i + 1, loss_, acc_)
        print(train_text)

    saver.save(sess, "./VGG16_model/model.ckpt")

测试代码:test_dataset.py

import tensorflow as tf
import os
import cv2
import pandas as pd
import numpy as np
input_path = '../public_test_data'

# count= 0
batch = []
name = []
label_ = []
cnt = 0
# data = pd.read_csv("./train_face_value_label.csv")
# target_num_map = {0.1: 0, 0.2: 1, 0.5: 2, 1: 3, 2: 4, 5: 5, 10: 6, 50: 7, 100: 8}
# data[' label'] = data[' label'].apply(lambda x: target_num_map[x])
# dict = data.set_index('name').T.to_dict('list')
for image_file in os.listdir(input_path):
    cnt =cnt + 1
    if cnt % 10000 == 0:
        print(cnt)
    name.append(image_file)
    image = cv2.imread(input_path + '/' + image_file)
    image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
    image = cv2.resize(image, (500, 250), interpolation=cv2.INTER_LINEAR)
    batch.append(image)
print(len(batch))
res = pd.DataFrame(name, columns=['name'])
cnt0 = 0
cnt1 = 0
with tf.Session() as sess:
    sess = tf.Session()

    saver = tf.train.import_meta_graph('./VGG16_model/model.ckpt.meta')
    saver.restore(sess, './VGG16_model/model.ckpt')
    # print([n.name for n in tf.get_default_graph().as_graph_def().node])
    inputs = tf.get_default_graph().get_tensor_by_name('Placeholder:0')
    train_mode = tf.get_default_graph().get_tensor_by_name('Placeholder_2:0')
    label = tf.get_default_graph().get_tensor_by_name('prob:0')
    # 使用y进行预测
    minibatch = []
    for i in range(len(batch)):
        minibatch.append(batch[i])
        if (i+1) % 50 == 0 or i == len(batch)-1:
            print(i+1)
            prob = sess.run(label, feed_dict={inputs: minibatch, train_mode: False})
            # print(prob.shape[0])
            target_num_map = {0:"0.1", 1:"0.2", 2:"0.5", 3:"1", 4:"2", 5: "5", 6:"10", 7:"50", 8:"100"}
            # target_num_map = {0: 0.1, 1: 0.2, 2: 0.5, 3: 1, 4: 2, 5: 5, 6: 10, 7: 50, 8: 100}
            for j in range(prob.shape[0]):
                pred = np.argsort(prob[j])[::-1]
                # print(prob[j])
                # print(pred)
                label_.append(target_num_map[pred[0]])
            minibatch = []

res['label'] = label_
print(res.head())
res.to_csv("result_1.csv", encoding='utf8', index=False)
文章被以下专辑收录
{{panelTitle}}
支持Markdown和数学公式,公式格式:\\(...\\)或\\[...\\]

还没有内容

关注微信公众号