机器学习CV代码练习(六)之图像描述-根据网络模型结构图训练网络

╰+攻爆jí腚メ 2023-01-23 11:53 67阅读 0赞

机器学习CV代码练习(六)之图像描述-根据网络模型结构图训练网络

  • 需要哪些层就去Keras的API文档中查找(Eg:Input、Embedding、Dropout、LSTM、Add)
  • 模型训练完整代码

  • 网络模型结构图1:
    在这里插入图片描述

  • 网络模型结构图2:
    在这里插入图片描述

需要哪些层就去Keras的API文档中查找(Eg:Input、Embedding、Dropout、LSTM、Add)

  1. def caption_model(vocab_size, max_len):
  2. """创建一个新的用于给图片生成标题的网络模型 Args: vocab_size: 训练集中标题单词个数 max_len: 训练集中的标题最长长度 Returns: 用于给图像生成标题的网络模型 """
  3. input1 = Input(shape=(4096,))
  4. dropout_1 = Dropout(0.5)(input1)
  5. dense_1 = Dense(256,activation='relu')(dropout_1)
  6. input2 = Input(shape=(max_len,))
  7. embed_1 = Embedding(vocab_size,256)(input2)#蒋正整数转换为固定尺寸的稠密向量(只可以用作模型的第一层)
  8. # ——每个单词转换为256维度向量
  9. lstm_1 = LSTM(256,activation='relu')(embed_1)
  10. add_1 = add([dense_1,lstm_1])
  11. dense_2 = Dense(256,activation='relu')(add_1)
  12. outputs = Dense(vocab_size, activation='softmax')(dense_2)
  13. model = Model(inputs=[input1,input2],outputs=outputs)
  14. model.compile(loss='categorical_crossentropy',optimizer='adam')
  15. return model

模型训练完整代码

  1. from keras.utils import plot_model
  2. from keras.models import Model
  3. from keras.layers import Input
  4. from keras.layers import Dense
  5. from keras.layers import LSTM
  6. from keras.layers import Embedding
  7. from keras.layers import Dropout
  8. from keras.layers.merge import add
  9. from pickle import load
  10. from keras.utils import to_categorical
  11. from keras.preprocessing.sequence import pad_sequences
  12. import util
  13. from numpy import array
  14. def create_batches(desc_list, photo_features, tokenizer, max_len, vocab_size=7378):
  15. """从输入的图片标题list和图片特征构造LSTM的一组输入 Args: desc_list: 某一个图像对应的一组标题(一个list) photo_features: 某一个图像对应的特征 tokenizer: 英文单词和整数转换的工具keras.preprocessing.text.Tokenizer max_len: 训练数据集中最长的标题的长度 vocab_size: 训练集中的单词个数, 默认为7378 Returns: tuple: 第一个元素为list, list的元素为图像的特征 第二个元素为list, list的元素为图像标题的前缀 第三个元素为list, list的元素为图像标题的下一个单词(根据图像特征和标题的前缀产生) Examples: #>>> from pickle import load #>>> tokenizer = load(open('tokenizer.pkl', 'rb')) #>>> desc_list = ['startseq one dog on desk endseq', "startseq red bird on tree endseq"] #>>> photo_features = [0.434, 0.534, 0.212, 0.98] #>>> print(create_batches(desc_list, photo_features, tokenizer, 6, 7378)) (array([[ 0.434, 0.534, 0.212, 0.98 ], ..., [ 0.434, 0.534, 0.212, 0.98 ]]), array([[ 0, 0, 0, 0, 0, 2], [ 0, 0, 0, 0, 2, 59], ..., [ 0, 0, 2, 26, 254, 6], [ 0, 2, 26, 254, 6, 134]]), array([[ 0., 0., 0., ..., 0., 0., 0.], [ 0., 0., 0., ..., 0., 0., 0.], ..., [ 0., 0., 0., ..., 0., 0., 0.]])) """
  16. X1, X2, y = list(), list(), list()
  17. # walk through each description for the image
  18. for desc in desc_list:
  19. # encode the sequence
  20. seq = tokenizer.texts_to_sequences([desc])[0]
  21. # split one sequence into multiple X,y pairs
  22. for i in range(1, len(seq)):
  23. # split into input and output pair
  24. in_seq, out_seq = seq[:i], seq[i]
  25. # pad input sequence
  26. in_seq = pad_sequences([in_seq], maxlen=max_len)[0]
  27. # encode output sequence
  28. out_seq = to_categorical([out_seq], num_classes=vocab_size)[0]
  29. # store
  30. X1.append(photo_features)#图片输入
  31. X2.append(in_seq)#文字输入
  32. y.append(out_seq)#输出
  33. return array(X1), array(X2), array(y)
  34. #数据生成器,用于调用model.fit_generator()
  35. def data_generator(captions, photo_features, tokenizer, max_len):
  36. """创建一个训练数据生成器, 用于传入模型训练函数的第一个参数model.fit_generator(generator,...) Args: captions: dict, key为图像名(不包含.jpg后缀), value为list, 图像的几个训练标题 photo_features: dict, key为图像名(不包含.jpg后缀), value为图像的特征 tokenizer: 英文单词和整数转换的工具keras.preprocessing.text.Tokenizer max_len: 训练集中的标题最长长度 Returns: generator, 使用yield [[list, 元素为图像特征, list, 元素为输入的图像标题前缀], list, 元素为预期的输出图像标题的下一个单词] """
  37. # loop for ever over images
  38. while 1:#产生一组新的batch
  39. for key, desc_list in captions.items():
  40. # retrieve the photo feature
  41. photo_feature = photo_features[key]
  42. # print(photo_feature.shape)
  43. in_img, in_seq, out_word = create_batches(desc_list, photo_feature, tokenizer, max_len)
  44. yield [[in_img, in_seq], out_word]#返回数据的方法
  45. def caption_model(vocab_size, max_len):
  46. """创建一个新的用于给图片生成标题的网络模型 Args: vocab_size: 训练集中标题单词个数 max_len: 训练集中的标题最长长度 Returns: 用于给图像生成标题的网络模型 """
  47. input1 = Input(shape=(4096,))
  48. dropout_1 = Dropout(0.5)(input1)
  49. dense_1 = Dense(256,activation='relu')(dropout_1)
  50. input2 = Input(shape=(max_len,))
  51. embed_1 = Embedding(vocab_size,256)(input2)#蒋正整数转换为固定尺寸的稠密向量(只可以用作模型的第一层)
  52. # ——每个单词转换为256维度向量
  53. lstm_1 = LSTM(256,activation='relu')(embed_1)
  54. add_1 = add([dense_1,lstm_1])
  55. dense_2 = Dense(256,activation='relu')(add_1)
  56. outputs = Dense(vocab_size, activation='softmax')(dense_2)#vocab_size大小为7579
  57. model = Model(inputs=[input1,input2],outputs=outputs)
  58. model.compile(loss='categorical_crossentropy',optimizer='adam')
  59. model.summary()
  60. return model
  61. def train():
  62. # load training dataset (6K)
  63. filename = 'Flickr_8k.trainImages.txt'
  64. train = util.load_ids(filename)
  65. print('Dataset: %d' % len(train))
  66. train_captions = util.load_clean_captions('descriptions.txt', train)
  67. print('Captions: train number=%d' % len(train_captions))
  68. # photo features
  69. train_features = util.load_photo_features('features.pkl', train)
  70. print('Photos: train=%d' % len(train_features))
  71. # prepare tokenizer
  72. tokenizer = load(open('tokenizer.pkl', 'rb'))
  73. vocab_size = len(tokenizer.word_index) + 1
  74. print('Vocabulary Size: %d' % vocab_size)
  75. # determine the maximum sequence length
  76. max_len = util.get_max_length(train_captions)
  77. print('Description Length: %d' % max_len)
  78. # define the model
  79. model = caption_model(vocab_size, max_len)
  80. print("load model...")
  81. # train the model, run epochs manually and save after each epoch
  82. epochs = 20
  83. steps = len(train_captions)
  84. for i in range(epochs):
  85. # create the data generator只产生一个batch的数据
  86. generator = data_generator(train_captions, train_features, tokenizer, max_len)
  87. print(i,generator)
  88. # fit for one epoch——fit_generator
  89. model.fit_generator(generator, epochs=1, steps_per_epoch=steps, verbose=1)
  90. # save model
  91. model.save('model_' + str(i) + '.h5')
  92. if __name__ == "__main__":
  93. train()

发表评论

表情:
评论列表 (有 0 条评论,67人围观)

还没有评论,来说两句吧...

相关阅读