ResNet50的tensorflow實現


最近在看殘差網絡的論文,然后看了很多網上實現的代碼,我發現很多人寫代碼是沒有邏輯的,其實那個代碼寫得壓根就不對,只是可能恰巧結果對,然后我不明白明明池化很簡單的道理,非要說成什么降采樣,給我整的看論文看得我一臉蒙逼,現在的模型適合大多數數據集的幾乎不存在,我參考論文網上的帖子,實現了resnet50,但是我沒訓練,因為沒有好的224*224的數據集,硬盤太小,大的程序也跑不起來,今天把代碼貼出來,然后如果需要的話拿去參考。還有就是復原模型最重要的就是搞清楚網絡的結構,最好是看着結構圖來做模型,這樣你就會很清楚每一層的tensor是如何進行變化的,然后就不會那么蒙圈了,在網上找了個不錯的網絡結構圖,分享給大家 https://blog.csdn.net/haoji007/article/details/90259359

然后下面就是代碼了,我相信只要你仔細看過殘差網絡的論文,你都會理解的,然后我打算今晚再做一個18層的殘差網絡用來訓練cifar10,看一下訓練的效果,下面是代碼

import tensorflow as tf
import tensorflow.contrib.slim as slim

WEIGHT_DECAY = 0.01

# 這段是我之前看別人的帖子上做的,但是后來我做的過程中我發現其實並沒有什么用,
# 改變featuremap的大小用卷積也完全可以實現,所以我把它注釋掉,發現也是正常的
# 但是由於tensorflow老大哥最近又開始迭代更新了,所以可能會有warning,但是
# 沒什么大影響

# def sampling(input_tensor,
#              ksize=1,
#              stride=2):
#     data = input_tensor
#     if stride > 1:
#         data = slim.max_pool2d(data, ksize, stride=stride)
#         print('sampling', 2)
#     return data


def conv2d_same(input_tensor,
                num_outputs,
                kernel_size,
                stride,
                is_train=True,
                activation_fn=tf.nn.relu,
                normalizer_fc=True
                ):
    data = input_tensor
    if stride is 1:
        data = slim.conv2d(inputs=data,
                           num_outputs=num_outputs,
                           kernel_size=kernel_size,
                           stride=stride,
                           weights_regularizer=slim.l2_regularizer(WEIGHT_DECAY),
                           activation_fn=None,
                           padding='SAME',
                           )
    else:
        pad_total = kernel_size - 1
        pad_begin = pad_total // 2
        pad_end = pad_total - pad_begin
        data = tf.pad(data, [[0, 0], [pad_begin, pad_end], [pad_begin, pad_end], [0, 0]])
        data = slim.conv2d(data,
                           num_outputs=num_outputs,
                           kernel_size=kernel_size,
                           stride=stride,
                           weights_regularizer=slim.l2_regularizer(WEIGHT_DECAY),
                           activation_fn=None,
                           padding='VALID',
                           )
    if normalizer_fc:
        data = tf.layers.batch_normalization(data, training=is_train)
    if activation_fn is not None:
        data = activation_fn(data)
    return data


def bottle_net(input_tensor, output_depth, is_train, stride=1):
    data = input_tensor
    depth = input_tensor.get_shape().as_list()[-1]
    if depth == output_depth:
        shortcut_tensor = input_tensor
    else:
        shortcut_tensor = conv2d_same(input_tensor, output_depth, 1, stride, is_train=is_train, activation_fn=None,
                                      normalizer_fc=True)
    data = conv2d_same(data, output_depth // 4, 1, 1, is_train=is_train)
    data = conv2d_same(data, output_depth // 4, 3, stride, is_train=is_train)
    data = conv2d_same(data, output_depth, 1, 1, is_train=is_train, activation_fn=None, normalizer_fc=False)

    # 生成殘差
    data = data + shortcut_tensor
    data = tf.nn.relu(data)
    return data


def create_block(input_tensor, output_depth, block_nums, init_stride=1, is_train=True, scope='block'):
    with tf.variable_scope(scope):
        data = bottle_net(input_tensor, output_depth, is_train=is_train, stride=init_stride)
        for i in range(1, block_nums):
            data = bottle_net(data, output_depth, is_train=is_train)
        return data


def ResNet(input_tensor, num_output, is_train, scope='resnet50'):
    data = input_tensor
    with tf.variable_scope(scope):
        data = conv2d_same(data, 64, 7, 2, is_train=is_train, normalizer_fc=True)
        data = slim.max_pool2d(data, 3, 2, padding='SAME', scope='pool_1')
        # 第一個殘差塊組
        data = create_block(data, 256, 3, init_stride=1, is_train=is_train, scope='block1')

        # 第二個殘差塊組
        data = create_block(data, 512, 4, init_stride=2, is_train=is_train, scope='block2')

        # 第三個殘差塊組
        data = create_block(data, 1024, 6, init_stride=2, is_train=is_train, scope='block3')

        # 第四個殘差塊組
        data = create_block(data, 2048, 3, init_stride=2, is_train=is_train, scope='block4')

        # 接下來就是池化層和全連接層
        data = slim.avg_pool2d(data, 7)
        data = slim.conv2d(data, num_output, 1, activation_fn=None, scope='final_conv')

        data_shape = data.get_shape().as_list()
        nodes = data_shape[1] * data_shape[2] * data_shape[3]
        data = tf.reshape(data, [-1, nodes])

        return data


if __name__ == '__main__':
    x = tf.random_normal([32, 224, 224, 3])
    data = ResNet(x, 1000, True)
    print(data)

能力極其有限,有錯誤的地方希望各位同學指正


免責聲明!

本站轉載的文章為個人學習借鑒使用,本站對版權不負任何法律責任。如果侵犯了您的隱私權益,請聯系本站郵箱yoyou2525@163.com刪除。



 
粵ICP備18138465號   © 2018-2025 CODEPRJ.COM