相关的类似场景有车牌识别,验证码识别等格式固定且短小的字符串识别
图片预处理
从目录读出文件, 并且随机抽取文件名 , 形成训练集和测试集列表
image_file_name_list, total = _get_image_file_name(CAPTCHA_IMAGE_PATH)
random.seed(time.time())
# 随机顺序
random.shuffle(image_file_name_list)
#训练集数目
trainImageNumber = int(total * TRAIN_IMAGE_PERCENT)
# 分成测试集
TRAINING_IMAGE_NAME = image_file_name_list[: trainImageNumber]
# 和验证集
VALIDATION_IMAGE_NAME = image_file_name_list[trainImageNumber:]
准备好图像数据和标签数据,并且向量化:
把文件读入内存中,并转换成灰度图片( 3层图像转为1层图像,减少存储信息,方便计算),同时把矩阵信息转化为一维数组, 并且做归一化处理
标签数据就是图片文件名, 然后把文件名 转换成向量;
def _get_data_and_label(file_name, file_path=CAPTCHA_IMAGE_PATH):
'''取得验证码图片的数据以及它的标签'''
path_name = os.path.join(file_path, file_name)
img = Image.open(path_name)
#转为灰度图
img = img.convert("L")
image_array = np.array(img)
#矩阵转化为一维数组,并对数据归一化处理
image_data = image_array.flatten()/255
#把标签转换成向量,用于做计算; 我们生成的图像文件名字就相当于Label
image_label = _name2label(file_name[0:CAPTCHA_LEN])
return image_data, image_label
def _name2label(name):
'''将验证码转换为训练时用的标签向量,维数是CAPTCHA_LEN * CHAR_SET_LEN '''
label = np.zeros(CAPTCHA_LEN * CHAR_SET_LEN)
for i, c in enumerate(name):
idx = i*CHAR_SET_LEN + _char2index(c)
label[idx] = 1
return label
def _char2index(c):
'''Ascii字符转换为向量数据'''
k = ord(c)
index = -1
if k >= 48 and k <= 57: # 数字索引0-9
index = k - 48
if k >= 65 and k <= 90: # 大写字母索引A-Z
index = k - 55
if k >= 97 and k <= 122: # 小写字母索引a-z
index = k - 61
if index == -1:
raise ValueError('No Map')
return index
其中CAPTCHA_LEN表示标签的长度, 比如我们的文字长度是4, 因此CAPTCHA_LEN =4
CHAR_SET_LEN表示 文字 特征向量的长度, 比如特征码范围为”0-9″+”a-z”+”A-Z” ,所以特征向量 CHAR_SET_LEN长度为 10+26+26=62
所以一个特征码的标签(比如: “N 3eP”) 向量长度是 CAPTCHA_LEN * CHAR_SET_LEN=4*62 = 248
3. 构建CNN网络模型
验证码在OCR识别场景中特征相对简单,字符少,使用CNN几层网络就足够了
- 构建CNN网络:输入层+ 3层卷积层+全链路层+输出层构成
- 每个卷积层使用Relu作为激活函数, 使用maxpool进一步简化提取特征, 然后为了防止过拟合使用Dropout 再次过滤部分特征
卷积后的矩阵大小计算 :N(原始图像矩阵大小)-F(卷积核矩阵大小)+1
如果使用了padding操作(防止图片被过度压缩,同时防止图像边缘特征丢失),每一次卷积生成的矩阵大小:N+2P(P为Pad的像素)-F+1
def weight_variable(shape, name='weight'):
'''初始化权值'''
init = tf.truncated_normal(shape, stddev=0.1)
var = tf.Variable(initial_value=init, name=name)
return var
def bias_variable(shape, name='bias'):
'''初始化偏置'''
init = tf.constant(0.1, shape=shape)
var = tf.Variable(init, name=name)
return var
def conv2d(x, W, name='conv2d'):
'''卷积'''
return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME', name=name)
def max_pool_2X2(x, name='maxpool'):
''' 池化 '''
return tf.nn.max_pool(x, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME', name=name)
# 输入层,请注意 X 的 name,在测试model时会用到它
X = tf.placeholder(tf.float32, [None, CAPTCHA_IMAGE_WIDHT * CAPTCHA_IMAGE_HEIGHT], name='data-input')
Y = tf.placeholder(tf.float32, [None, CAPTCHA_LEN * CHAR_SET_LEN], name='label-input')
x_input = tf.reshape(X, [-1, CAPTCHA_IMAGE_HEIGHT, CAPTCHA_IMAGE_WIDHT, 1], name='x-input')
# dropout,防止过拟合,请注意 keep_prob 的 name,在测试model时会用到它
keep_prob = tf.placeholder(tf.float32, name='keep-prob')
搭建 第一层卷积:
使用32个3*3的filter,输入图像channel 为1(灰度化后的图片),输出channel为32
W_conv1 = weight_variable([3, 3, 1, 32], 'W_conv1')
#bias向量长度和输出channel个数一致也为32
B_conv1 = bias_variable([32], 'B_conv1')
#使用relu作为特征函数
conv1 = tf.nn.relu(conv2d(x_input, W_conv1, 'conv1') + B_conv1)
#池化层用于压缩模型,加快计算速度;同时也能增加特征提取的鲁棒性
#下面max_pool是通过2*2filter过滤输入矩阵中的最大值作为新的矩阵
conv1 = max_pool_2X2(conv1, 'conv1-pool')
conv1 = tf.nn.dropout(conv1, keep_prob)//防止过拟合,增加dropout层,过滤部分特征
搭建第二层卷积
使用64个3×3的filter,输入图像channel 为32,输出channel为64
W_conv2 = weight_variable([3, 3, 32, 64], 'W_conv2')
B_conv2 = bias_variable([64], 'B_conv2')
conv2 = tf.nn.relu(conv2d(conv1, W_conv2, 'conv2') + B_conv2)
conv2 = max_pool_2X2(conv2, 'conv2-pool')
conv2 = tf.nn.dropout(conv2, keep_prob)
搭建 第三层卷积
使用64个3×3的filter,输入图像channel 为64,输出channel为64
W_conv3 = weight_variable([3, 3, 64, 64], 'W_conv3')
B_conv3 = bias_variable([64], 'B_conv3')
conv3 = tf.nn.relu(conv2d(conv2, W_conv3, 'conv3') + B_conv3)
conv3 = max_pool_2X2(conv3, 'conv3-pool')
conv3 = tf.nn.dropout(conv3, keep_prob)
搭建全链接层
每次池化后,图片的宽度和高度均缩小为原来的一半,进过上面的三次池化,宽度和高度均缩小8倍
# 如图片大小改变,W_fc前两位应该改为weight/8,hight/8向上取整
W_fc1 = weight_variable([13 * 6 * 64, 1024], 'W_fc1')
B_fc1 = bias_variable([1024], 'B_fc1')
fc1 = tf.reshape(conv3, [-1, W_fc1.get_shape().as_list()[0]])
fc1 = tf.nn.relu(tf.add(tf.matmul(fc1, W_fc1), B_fc1))
fc1 = tf.nn.dropout(fc1, keep_prob)
# 输出层
W_fc2 = weight_variable([1024, CAPTCHA_LEN * CHAR_SET_LEN], 'W_fc2')
B_fc2 = bias_variable([CAPTCHA_LEN * CHAR_SET_LEN], 'B_fc2')
output = tf.add(tf.matmul(fc1, W_fc2), B_fc2, 'output')
定义损失函数(优化器)
#loss,优化器
loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(labels=Y, logits=output))
optimizer = tf.train.AdamOptimizer(0.001).minimize(loss)
定义 预期结果和 实际标签结构—把预测结果 和 预期标签 转换为 shape=(?, 4, 58) 维度的矩阵
#预测与标签
predict = tf.reshape(output, [-1, CAPTCHA_LEN, CHAR_SET_LEN], name='predict')
labels = tf.reshape(Y, [-1, CAPTCHA_LEN, CHAR_SET_LEN], name='labels')
找到给定的张量tensor中在指定轴axis上的最大值/最小值的位置。并定义 成本函数reduce_mean
# 预测结果
# 请注意 predict_max_idx 的 name,在测试model时会用到它
predict_max_idx = tf.argmax(predict, axis=2, name='predict_max_idx')
labels_max_idx = tf.argmax(labels, axis=2, name='labels_max_idx')
predict_correct_vec = tf.equal(predict_max_idx, labels_max_idx)
accuracy = tf.reduce_mean(tf.cast(predict_correct_vec, tf.float32))
模型保存定义
#保存模型
saver = tf.train.Saver()
config = tf.ConfigProto(allow_soft_placement=True,log_device_placement=True)
config.gpu_options.per_process_gpu_memory_fraction = 0.6
4. 训练模型
每4000次迭代保存一次模型;
精准率>0.99时也保存模型;
#运行
with tf.Session(config=config) as sess:
sess.run(tf.global_variables_initializer())
steps = 0
for epoch in range(6000):
train_data, train_label = _get_next_batch(64, 'train', steps)
op,pre = sess.run([optimizer,labels_max_idx], feed_dict={X: train_data, Y: train_label, keep_prob: 0.75}) //dropout 率为0.75
if steps % 1 == 0:
test_data, test_label = _get_next_batch(100, 'validate', steps)
acc = sess.run(accuracy, feed_dict={X: test_data, Y: test_label, keep_prob: 1.0})//预测时dropout 率为1
print("steps=%d, accuracy=%f" % (steps, acc))
if acc > 0.99:
saver.save(sess, MODEL_SAVE_PATH + "crack_captcha.model", global_step=steps)
break
if steps > 4000:
if steps % 500 == 0:
saver.save(sess, MODEL_SAVE_PATH + "crack_captcha.model", global_step=steps)
steps += 1
5. 预测
加载模型graph
saver = tf.train.import_meta_graph(MODEL_SAVE_PATH + "crack_captcha.model-11500.meta")
graph = tf.get_default_graph()
从graph取得预测时需要的相关tensor,具体的的name需和构建graph时定义的一致(通过打印Grafh节点信息得到)
//print( [n.name for n in tf.get_default_graph().as_graph_def().node])
input_holder = graph.get_tensor_by_name("data-input:0")
keep_prob_holder = graph.get_tensor_by_name("keep-prob:0")
predict_max_idx = graph.get_tensor_by_name("predict_max_idx:0")
下面是Graph的节点信息
[‘data-input‘, ‘label-input’, ‘x-input/shape’, ‘x-input’, ‘keep-prob‘, ‘truncated_normal/shape’, ‘truncated_normal/mean’, ‘truncated_normal/stddev’, ‘truncated_normal/TruncatedNormal’, ‘truncated_normal/mul’, ‘truncated_normal’, ‘W_conv1’, ‘W_conv1/Assign’, ‘W_conv1/read’, ‘Const’, ‘B_conv1’, ‘B_conv1/Assign’, ‘B_conv1/read’, ‘conv1’, ‘add’, ‘Relu’, ‘conv1-pool’, ‘dropout/Shape’, ‘dropout/random_uniform/min’, ‘dropout/random_uniform/max’, ‘dropout/random_uniform/RandomUniform’, ‘dropout/random_uniform/sub’, ‘dropout/random_uniform/mul’, ‘dropout/random_uniform’, ‘dropout/add’, ‘dropout/Floor’, ‘dropout/div’, ‘dropout/mul’,…., ‘labels’, ‘predict_max_idx/dimension’, ‘predict_max_idx‘, ,…]
运行tf session执行图片预测:
# 运行session
with tf.Session() as sess:
saver.restore(sess, tf.train.latest_checkpoint(MODEL_SAVE_PATH))
#加载数据
img_data, img_name = _get_image_data_and_name(file_path)
# 预测
predict = sess.run(predict_max_idx, feed_dict={input_holder: [img_data], keep_prob_holder: 1.0})
# 预测结果转字符串
predict_value = _array2string(np.squeeze(predict))
print(predict_value)
return predict_value
没有评论