从本小节开始,打算以一种实例的方式来讲解API,以实例应用为线索,遇到新的API在逐一讲解。
sequential()构建简单模型
from tensorflow.keras import layers
model = tf.keras.Sequential()
model.add(layers.Dense(32, activation='tanh'))
model.add(layers.Dense(32, activation='relu'))
model.add(layers.Dense(10, activation='softmax'))
model.add(layers.Dense(32, input_shape=(16,)))
model
#<tensorflow.python.keras.engintane.sequential.Sequential at 0x1fcc0aada20>
layers.Dense()在这里相当于一个全连接层.其输出是:output = activation(dot(input, kernel) + bias),很正常的前向传播过程。layers.Dense()的参数如下:
- units 表示输出空间的维度,即某个样本的输出维度。
- activition 激活函数,如果没有指定,则a(x)=x.
- use_bias 是否使用偏置项,默认为True
- kernel_initializer kernel权重矩阵的初始化器,默认为"glorot_uniform",可以理解为均匀分布
- bias_initializer 偏置项初始值,默认为0
- kernel_regularizer 应用于kernel权重矩阵的正则函数,默认为None
- bias_regularizer bias的正则函数,默认为None
- activity_regularizer 激活函数的正则,默认None
- kernel_constraint kernel的限制函数,默认为None
- bias_constraint bias的限制函数,默认为None
值得一提的是:不在参数列表中的两个参数input_shape和input_dim.
- input_shape: N-D tensor with shape: (batch_size, …, input_dim). The most common situation would be a 2D input with shape (batch_size, input_dim)
- input_dim: N-D tensor with shape: (batch_size, …, units). For instance, for a 2D input with shape (batch_size, input_dim), the output would have shape (batch_size, units).
配置模型网络参数
from tensorflow.keras.layers import Dense
layers.Dense(32, activation='relu')
layers.Dense(32, activation=tf.sigmoid)
layers.Dense(32, kernel_initializer='orthogonal')
layers.Dense(32, kernel_initializer=tf.keras.initializers.glorot_normal)
layers.Dense(32, kernel_regularizer=tf.keras.regularizers.l2(0.02))
layers.Dense(32, kernel_regularizer=tf.keras.regularizers.l1(0.01))
模型训练的完整过程
模型构建完成之后,需要进行编译,把高级API的实现转化为低级API的实现。
model = tf.keras.Sequential()
model.add(layers.Dense(64, activation='relu'))
model.add(layers.Dense(64, activation='tanh'))
model.add(layers.Dense(16, activation='softmax'))
model.compile(optimizer=tf.keras.optimizers.Adam(0.001),
loss=tf.keras.losses.categorical_hinge,
metrics=[tf.keras.metrics.Recall])
随机生成数据,进行模型训练
import numpy as np
>>>train_x = np.random.random((1000, 128))
>>>train_y = np.random.random((1000, 10))
>>>val_x = np.random.random((200, 128))
>>>val_y = np.random.random((200, 10))
>>>model.fit(train_x, train_y, epochs=5, batch_size=200,
validation_data=(val_x, val_y))
>>>model.summary()
Model: "sequential_1"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
dense (Dense) multiple 8256
_________________________________________________________________
dense_1 (Dense) multiple 4160
_________________________________________________________________
dense_2 (Dense) multiple 650
=================================================================
Total params: 13,066
Trainable params: 13,066
Non-trainable params: 0
现在换个方式,使用tf.data处理输入数据
dataset = tf.data.Dataset.from_tensor_slices((train_x, train_y))
dataset = dataset.batch(32)
dataset = dataset.repeat()
val_dataset = tf.data.Dataset.from_tensor_slices((val_x, val_y))
val_dataset = val_dataset.batch(32)
val_dataset = val_dataset.repeat()
model.fit(dataset, epochs=10, steps_per_epoch=30,
validation_data=val_dataset, validation_steps=3)
模型预测与评估
x = np.random.random((1000, 128))
y = np.random.random((1000, 10))
model.evaluate(x,y, batch_size=32)
test_data = tf.data.Dataset.from_tensor_slices((x,y))
test_data = test_data.batch(32).repeat()
model.evaluate(test_data, steps=30)
model.predict(x,batch_size=10)
利用tf.keras.Model()构建模型
上文中我们是用tf.keras.Model()构建模型的,下面来用tf.keras.Model构建简单模型。
input=tf.keras.Input(shape=(72,))
h1=layers.Dense(units=64,activation="tanh")(input)
h2=layers.Dense(units=32,activation="relu")(h1)
pre=layers.Dense(units=8,activation="softmax")(h2)
model=tf.keras.Model(inputs=input,outputs=pre)
model.compile(optimizer=tf.keras.optimizers.Adam(0.001),
loss=tf.keras.losses.categorical_crossentropy,metrics=["accuracy"])
x=np.random.random((1000,72))
y=np.random.random((1000,8))
model.fit(x,y,batch_size=32,epochs=5)
模型子类化
通过对 tf.keras.Model 进行子类化并定义您自己的前向传播来构建完全可自定义的模型。在 init 方法中创建层并将它们设置为类实例的属性。在 call 方法中定义前向传播。
class MyModel(tf.keras.Model):
def __init__(self, num_classes=10):
super(MyModel, self).__init__(name='my_model')
self.num_classes = num_classes
self.layer1 = layers.Dense(32, activation='relu')
self.layer2 = layers.Dense(num_classes, activation='softmax')
def call(self, inputs):
h1 = self.layer1(inputs)
out = self.layer2(h1)
return out
def compute_output_shape(self, input_shape):
shape = tf.TensorShapej(input_shape).as_list()
shape[-1] = self.num_classes
return tf.TensorShape(shape)
model = MyModel(num_classes=10)
model.compile(optimizer=tf.keras.optimizers.RMSprop(0.001),
loss=tf.keras.losses.categorical_crossentropy,
metrics=['accuracy'])
model.fit(train_x, train_y, batch_size=16, epochs=5)
自定义层
通过对 tf.keras.layers.Layer 进行子类化并实现以下方法来创建自定义层:
build:创建层的权重。使用 add_weight 方法添加权重。
call:定义前向传播。
compute_output_shape:指定在给定输入形状的情况下如何计算层的输出形状。
或者,可以通过实现 get_config 方法和 from_config 类方法序列化层。
class MyLayer(layers.Layer):
def __init__(self, output_dim, **kwargs):
self.output_dim = output_dim
super(MyLayer, self).__init__(**kwargs)
def build(self, input_shape):
shape = tf.TensorShape((input_shape[1], self.output_dim))
self.kernel = self.add_weight(name='kernel1', shape=shape,
initializer='uniform', trainable=True)
super(MyLayer, self).build(input_shape)
def call(self, inputs):
return tf.matmul(inputs, self.kernel)
def compute_output_shape(self, input_shape):
shape = tf.TensorShape(input_shape).as_list()
shape[-1] = self.output_dim
return tf.TensorShape(shape)
def get_config(self):
base_config = super(MyLayer, self).get_config()
base_config['output_dim'] = self.output_dim
return base_config
@classmethod
def from_config(cls, config):
return cls(**config)
model = tf.keras.Sequential(
[
MyLayer(10),
layers.Activation('softmax')
])
model.compile(optimizer=tf.keras.optimizers.RMSprop(0.001),
loss=tf.keras.losses.categorical_crossentropy,
metrics=['accuracy'])
model.fit(train_x, train_y, batch_size=16, epochs=5)
回调
callbacks = [
tf.keras.callbacks.EarlyStopping(patience=2, monitor='val_loss'),
tf.keras.callbacks.TensorBoard(log_dir='./logs')
]
model.fit(train_x, train_y, batch_size=16, epochs=5,
callbacks=callbacks, validation_data=(val_x, val_y))
权重保存
model = tf.keras.Sequential([
layers.Dense(64, activation='relu'),
layers.Dense(10, activation='softmax')])
model.compile(optimizer=tf.keras.optimizers.Adam(0.001),
loss='categorical_crossentropy',
metrics=['accuracy'])
model.save_weights('./weights/model')
model.load_weights('./weights/model')
model.save_weights('./model.h5')
model.load_weights('./model.h5')
保存整个模型
model = tf.keras.Sequential([
layers.Dense(10, activation='softmax', input_shape=(72,)),
layers.Dense(10, activation='softmax')
])
model.compile(optimizer='rmsprop',
loss='categorical_crossentropy',
metrics=['accuracy'])
model.fit(train_x, train_y, batch_size=32, epochs=5)
model.save('all_model.h5')
model = tf.keras.models.load_model('all_model.h5')
将keras用于Estimator
Estimator API 用于针对分布式环境训练模型。它适用于一些行业使用场景,例如用大型数据集进行分布式训练并导出模型以用于生产.
model = tf.keras.Sequential([layers.Dense(10,activation='softmax'),
layers.Dense(10,activation='softmax')])
model.compile(optimizer=tf.keras.optimizers.RMSprop(0.001),
loss='categorical_crossentropy',
metrics=['accuracy'])
estimator = tf.keras.estimator.model_to_estimator(model)
本文内容参考自:
https://blog.csdn.net/qq_31456593/article/details/88377117
官方文档