一个简单的pytorch模型

1、创建一个全连接层

model = torch.nn.Linear(1, 1)

2、传入数据,model作为可调用对象被当做函数调用,并输出返回值y_predict

y_predict = model(x_data)

3、源码

class Module(object):
	# 当子类被当做可调用对象调用时,会调用其重写的forward方法
	def __call__(self, *input, **kwargs):
		result = self.forward(*input, **kwargs)

class Linear(Module):
	# 构造函数时,初始化参数权重矩阵w和偏置b
	def __init__(self, in_features, out_features, bias=True):
		super(Linear, self).__init__()
		self.in_features = in_features
		self.out_features = out_features 
		self.weight = Parameter(torch.Tensor(out_features, in_features))
		if bias:
			self.bias = Parameter(torch.Tensor(out_features))
	# 重写的forward方法,实际上就是计算x*W*T
	def forward(self, x):
		return x.matmul(weight.t())

定义神经网络一个层的方法是继承nn.Module并重写其forward方法,forward方法接受输入并返回该层的输出。

4、定义一个神经网络简单的方法

class Resnet(nn.Module):
	# 构造函数时,初始化参数权重矩阵w和偏置b
	def __init__(self, block, layers, num_classes=1000, zeros_init_residual=False,
				groups=1, width_per_group=64, replace_stride_with_dilation=None, 
				norm_layer=None):
		super(Resnet, self).__init__()
		self.conv1 = nn.Conv2d(3, self.inplanes, kernel_size=7, stride=2, padding=3,bias=False)
		self.bn1 = norm_layer(self.inplanes)
		self.relu = nn.ReLU(inplace=True)
		self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) 
		self.layer1 = self.__make__layer(block, 64, layers[0])
		self.layer2 = self.__make__layer(block, 128, layers[1], stride=2, 
										dilate=replace_stride_with_dilation[0])
	
	def forward(self, x):
		x = self.conv1(x)
		x = self.bn1(x)
		x = self.relu(x)
		x = self.maxpool(x)
		
		x = self.layer1(x)
		x = self.layer2(x)

		x = self.avgpool(x)
		x = torch.flatten(x, 1)
		x = self.fc(x)
		
		return x

使用:

1、定义一个ResNet

model = torch.nn.ResNet()

2、传入数据,model作为可调用对象被当做函数调用,并输出模型的返回值y_predict

y_predict = model(x_data)

猜你喜欢

转载自blog.csdn.net/qq_41754907/article/details/115177401