Tensorflow學(xué)習(xí)筆記-基于LeNet5結(jié)構(gòu)的ORL數(shù)據(jù)集識別

參考文獻(xiàn):
《基于卷積神經(jīng)網(wǎng)絡(luò)的人臉識別研究》 李春利,柳振東,惠康華

文章中基于經(jīng)典的網(wǎng)絡(luò)LeNet-5的結(jié)構(gòu),提出了一種適用于ORL數(shù)據(jù)集的CNN結(jié)構(gòu),在該數(shù)據(jù)集上取得了較高的識別率。

本文是在參考此論文的基礎(chǔ)上,使用tensorflow實(shí)現(xiàn)了文中相關(guān)理論。

ORL訓(xùn)練集byCSDN

訓(xùn)練集下載解壓后可以看到,ORL訓(xùn)練集一共有40類,每一類有10張bmp類型的圖片。


目錄
s1中圖片

首先我們需要做的就是將這些數(shù)據(jù)讀入,制作我們自己的訓(xùn)練集和測試集。

input_path = "./orl"
train_path = "./train"
test_path = "./test"

if not os.path.exists(train_path):
    os.mkdir(train_path)

if not os.path.exists(test_path):
    os.mkdir(test_path)

for i in range(1, 41):
    if not os.path.exists(train_path + '/' + str(i)):
        os.mkdir(train_path + '/' + str(i))
    if not os.path.exists(test_path + '/' + str(i)):
        os.mkdir(test_path + '/' + str(i))


# 生成訓(xùn)練和測試的數(shù)據(jù)
def generate_data(train_path, test_path):
    index = 1
    output_index = 1
    for (dirpath, dirnames, filenames) in os.walk(input_path):
        # 打亂文件列表,相當(dāng)于是隨機(jī)選取8張訓(xùn)練集,2張測試
        random.shuffle(filenames)
        for filename in filenames:
            if filename.endswith('.bmp'):
                img_path = dirpath + '/' + filename
                # 使用opencv 讀取圖片
                img_data = cv2.imread(img_path)
                # 按照論文中的將圖片大小調(diào)整為28 * 28
                img_data = cv2.resize(img_data, (28, 28), interpolation=cv2.INTER_AREA)
                if index < 3:
                    cv2.imwrite(test_path + '/' + str(output_index) + '/' + str(index) + '.jpg', img_data)
                    index += 1
                elif 10 >= index >= 3:
                    cv2.imwrite(train_path + '/' + str(output_index) + '/' + str(index) + '.jpg', img_data)
                    index += 1
                if index > 10:
                    output_index += 1
                    index = 1

運(yùn)行完后我們便得到了320張訓(xùn)練集,80張測試集,所得的樣本都是通過隨機(jī)選取。

訓(xùn)練集:

訓(xùn)練集
第一類別

測試集:

image.png

將train和test寫入到tfrecord的同時(shí)進(jìn)行標(biāo)注

# 生成整數(shù)型的屬性
def _int64_feature(value):
    return tf.train.Feature(int64_list=tf.train.Int64List(value=[value]))


# 生成字符串類型
def _bytes_feature(value):
    return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))


train_path = "./train/"
test_path = "./test/"
classes = {i: i for i in range(1, 41)}
writer_train = tf.python_io.TFRecordWriter("orl_train.tfrecords")
writer_test = tf.python_io.TFRecordWriter("orl_test.tfrecords")


def generate():
    # 遍歷字典
    for index, name in enumerate(classes):
        train = train_path + str(name) + '/'
        test = test_path + str(name) + '/'
        for img_name in os.listdir(train):
            img_path = train + img_name  # 每一個(gè)圖片的地址
            img = cv2.imread(img_path)
            img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
            img_raw = img.tobytes()
            example = tf.train.Example(features=tf.train.Features(feature={
                'label': _int64_feature(index + 1),
                'img_raw': _bytes_feature(img_raw)
            }))
            writer_train.write(example.SerializeToString())
        for img_name in os.listdir(test):
            img_path = test + img_name  # 每一個(gè)圖片的地址
            img = cv2.imread(img_path)
            img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
            img_raw = img.tobytes()
            example = tf.train.Example(features=tf.train.Features(feature={
                'label': _int64_feature(index + 1),
                'img_raw': _bytes_feature(img_raw)
            }))
            writer_test.write(example.SerializeToString())
    writer_test.close()
    writer_train.close()

接下來開始訓(xùn)練:

def train(data, label):
    x = tf.placeholder(tf.float32,
                       [BATCH_SIZE, SIZE, SIZE, orl_inference.NUM_CHANNELS],
                       name='x-input')

    y_ = tf.placeholder(tf.float32, [None, orl_inference.OUTPUT_NODE], name='y-output')

    # 使用L2正則化計(jì)算損失函數(shù)
    regularizer = tf.contrib.layers.l2_regularizer(REGULARIZATION_RATE)

    min_after_dequeue = 100
    capacity = min_after_dequeue + 3 * BATCH_SIZE
    image_batch, label_batch = tf.train.shuffle_batch(
        [data, label], batch_size=BATCH_SIZE,
        capacity=capacity, min_after_dequeue=min_after_dequeue
    )

    y = orl_inference.inference(x, False, regularizer)

    global_step = tf.Variable(0, trainable=False)

    variable_averages = tf.train.ExponentialMovingAverage(
        MOVING_AVERAGE_DECAY, global_step
    )

    variable_averages_op = variable_averages.apply(tf.trainable_variables())

    # 計(jì)算交叉熵作為刻畫預(yù)測值和真實(shí)值之間的損失函數(shù)
    cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=y, labels=tf.argmax(y_, 1))

    # 計(jì)算所有樣例中交叉熵的平均值
    cross_entropy_mean = tf.reduce_mean(cross_entropy)

    # 總損失等于交叉熵?fù)p失和正則化損失的和
    loss = cross_entropy_mean + tf.add_n(tf.get_collection('losses'))

    # 設(shè)置指數(shù)衰減的學(xué)習(xí)率
    learning_rate = tf.train.exponential_decay(
        LEARNING_RATE_BASE,
        global_step,
        320 / BATCH_SIZE,
        LEARNING_RATE_DECAY,
        staircase=True
    )

    # 優(yōu)化損失函數(shù)
    train_step = tf.train.GradientDescentOptimizer(learning_rate) \
        .minimize(loss, global_step=global_step)

    with tf.control_dependencies([train_step, variable_averages_op]):
        train_op = tf.no_op(name='train')
    saver = tf.train.Saver()

    # 驗(yàn)證
    # accuracy = tf.reduce_mean()
    with tf.Session() as sess:
        tf.global_variables_initializer().run()
        coord = tf.train.Coordinator()
        threads = tf.train.start_queue_runners(sess=sess, coord=coord)
        # 迭代的訓(xùn)練網(wǎng)絡(luò)
        for i in range(TRAINING_STEPS):
            xs, ys = sess.run([image_batch, label_batch])
            xs = xs / 255.0
            reshaped_xs = np.reshape(xs, (BATCH_SIZE,
                                          SIZE,
                                          SIZE,
                                          orl_inference.NUM_CHANNELS))
            # 將圖像和標(biāo)簽數(shù)據(jù)通過tf.train.shuffle_batch整理成訓(xùn)練時(shí)需要的batch
            ys = get_label(ys)
            _, loss_value, step = sess.run([train_op, loss, global_step],
                                           feed_dict={x: reshaped_xs, y_: ys})

            if i % 100 == 0:
                # 每10輪輸出一次在訓(xùn)練集上的測試結(jié)果
                acc = loss.eval({x: reshaped_xs, y_: ys})
                print("After %d training step[s], loss on training"
                      " batch is %g. " % (step, loss_value))

                saver.save(
                    sess, os.path.join(MODEL_SAVE_PATH, MODEL_NAME),
                    global_step=global_step
                )
                # logit = orl_inference.inference(image_batch)
        coord.request_stop()
        coord.join(threads)
訓(xùn)練過程

進(jìn)行驗(yàn)證:

def evaluate():
    with tf.Graph().as_default() as g:
        filename_queue = tf.train.string_input_producer(["orl_test.tfrecords"])
        reader = tf.TFRecordReader()
        _, serialized_example = reader.read(filename_queue)
        features = tf.parse_single_example(serialized_example,
                                           features={
                                               'label': tf.FixedLenFeature([], tf.int64),
                                               'img_raw': tf.FixedLenFeature([], tf.string),
                                           })
        img = tf.decode_raw(features['img_raw'], tf.uint8)
        img = tf.reshape(img, [28, 28, 1])
        label = tf.cast(features['label'], tf.int32)
        min_after_dequeue = 100
        capacity = min_after_dequeue + 3 * 200
        image_batch, label_batch = tf.train.shuffle_batch(
            [img, label], batch_size=80,
            capacity=capacity, min_after_dequeue=min_after_dequeue
        )

        x = tf.placeholder(tf.float32,
                           [80,
                            orl_inference.IMAGE_SIZE,
                            orl_inference.IMAGE_SIZE,
                            orl_inference.NUM_CHANNELS],
                           name='x-input')
        y_ = tf.placeholder(
            tf.float32, [None, orl_inference.OUTPUT_NODE], name='y-input'
        )

        y = orl_inference.inference(x, None, None)

        correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))
        accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
        variable_averages = tf.train.ExponentialMovingAverage(
            orl_train.MOVING_AVERAGE_DECAY
        )
        variable_to_restore = variable_averages.variables_to_restore()
        saver = tf.train.Saver(variable_to_restore)

        # 每隔EVAL_INTERVAL_SECS秒調(diào)用一次
        while True:
            with tf.Session() as sess:
                test = cv2.imread('./data/20/10.jpg')
                test = cv2.cvtColor(test, cv2.COLOR_BGR2GRAY)
                test = np.array(test)
                test = test / 255.0
                test_re = np.reshape(test, (1, 28, 28, 1))

                coord = tf.train.Coordinator()
                threads = tf.train.start_queue_runners(sess=sess, coord=coord)
                xs, ys = sess.run([image_batch, label_batch])
                ys = get_label(ys)
                xs = xs / 255.0
                validate_feed = {x: xs,
                                 y_: ys}

                cpkt = tf.train.get_checkpoint_state(
                    orl_train.MODEL_SAVE_PATH
                )
                if cpkt and cpkt.model_checkpoint_path:
                    # 加載模型
                    saver.restore(sess, cpkt.model_checkpoint_path)
                    # 通過文件名得到模型保存時(shí)迭代的輪數(shù)
                    global_step = cpkt.model_checkpoint_path \
                        .split('/')[-1].split('-')[-1]
                    # result = sess.run(y, feed_dict={x: test_re})
                    # re = np.where(result == np.max(result))
                    # ss = tf.argmax(result, 1)
                    # tt = np.argmax(result, 1)
                    # print('result is %d'%(tt[0] + 1))
                    # # print('hehe')
                    accuracy_score = sess.run(accuracy,feed_dict=validate_feed)
                    print("After %s training steps, validation "
                          "accuracy = %g" % (global_step, accuracy_score))
                else:
                    print("No checkpoint file found")
                    return
            time.sleep(EVAL_INTERVAL_SECS)
驗(yàn)證結(jié)果

此次實(shí)驗(yàn)參考了《Tensorflow 實(shí)戰(zhàn)Google深度學(xué)習(xí)框架》這本書的內(nèi)容,根據(jù)所學(xué)內(nèi)容,將文獻(xiàn)中的實(shí)驗(yàn)實(shí)踐了一遍,也算是加深了理解。
完整代碼:聽說star的人會變帥

最后編輯于
?著作權(quán)歸作者所有,轉(zhuǎn)載或內(nèi)容合作請聯(lián)系作者
【社區(qū)內(nèi)容提示】社區(qū)部分內(nèi)容疑似由AI輔助生成,瀏覽時(shí)請結(jié)合常識與多方信息審慎甄別。
平臺聲明:文章內(nèi)容(如有圖片或視頻亦包括在內(nèi))由作者上傳并發(fā)布,文章內(nèi)容僅代表作者本人觀點(diǎn),簡書系信息發(fā)布平臺,僅提供信息存儲服務(wù)。

相關(guān)閱讀更多精彩內(nèi)容

友情鏈接更多精彩內(nèi)容