图神经网络(四)图分类(4)图分类实战完整代码

完整代码

代码说明

这里使用Jupyter Notebook来完成代码的实现,只需导包然后按照步骤运行即可。

SetUp

!pip install --verbose --no-cache-dir torch-scatter
import os
import urllib
import torch
import torch.nn as nn
import torch.nn.init as init
import torch.nn.functional as F
import torch.utils.data as data
import torch.optim as optim
import numpy as np
import scipy.sparse as sp
from zipfile import ZipFile
from sklearn.model_selection import train_test_split
import pickle
import pandas as pd
import torch_scatter
from collections import Counter

功能函数定义

def tensor_from_numpy(x, device):
    return torch.from_numpy(x).to(device)


def normalization(adjacency):
    """计算 L=D^-0.5 * (A+I) * D^-0.5,

    Args:
        adjacency: sp.csr_matrix.

    Returns:
        归一化后的邻接矩阵,类型为 torch.sparse.FloatTensor
    """
    adjacency += sp.eye(adjacency.shape[0])    # 增加自连接
    degree = np.array(adjacency.sum(1))
    d_hat = sp.diags(np.power(degree, -0.5).flatten())
    L = d_hat.dot(adjacency).dot(d_hat).tocoo()
    # 转换为 torch.sparse.FloatTensor
    indices = torch.from_numpy(np.asarray([L.row, L.col])).long()
    values = torch.from_numpy(L.data.astype(np.float32))
    tensor_adjacency = torch.sparse.FloatTensor(indices, values, L.shape)
    return tensor_adjacency

D&D数据

"""D&D is a dataset of 1178 protein structures (Dobson and Doig, 2003). Each protein is represented by a graph, in which the nodes are amino acids and two nodes are connected by an edge if they are less than 6 Angstroms apart. The prediction task is to classify the protein structures into enzymes and non-enzymes.
"""
class DDDataset(object):
    url = "https://ls11-www.cs.tu-dortmund.de/people/morris/graphkerneldatasets/DD.zip"
    
    def __init__(self, data_root="data", train_size=0.8):
        self.data_root = data_root
        self.maybe_download()
        sparse_adjacency, node_labels, graph_indicator, graph_labels = self.read_data()
        self.sparse_adjacency = sparse_adjacency.tocsr()
        self.node_labels = node_labels
        self.graph_indicator = graph_indicator
        self.graph_labels = graph_labels
        self.train_index, self.test_index = self.split_data(train_size)
        self.train_label = graph_labels[self.train_index]
        self.test_label = graph_labels[self.test_index]

    def split_data(self, train_size):
        unique_indicator = np.asarray(list(set(self.graph_indicator)))
        train_index, test_index = train_test_split(unique_indicator,
                                                   train_size=train_size,
                                                   random_state=1234)
        return train_index, test_index
    
    def __getitem__(self, index):
        mask = self.graph_indicator == index
        node_labels = self.node_labels[mask]
        graph_indicator = self.graph_indicator[mask]
        graph_labels = self.graph_labels[index]
        adjacency = self.sparse_adjacency[mask, :][:, mask]
        return adjacency, node_labels, graph_indicator, graph_labels
    
    def __len__(self):
        return len(self.graph_labels)
    
    def read_data(self):
        data_dir = os.path.join(self.data_root, "DD")
        print("Loading DD_A.txt")
        adjacency_list = np.genfromtxt(os.path.join(data_dir, "DD_A.txt"),
                                       dtype=np.int64, delimiter=',') - 1
        print("Loading DD_node_labels.txt")
        node_labels = np.genfromtxt(os.path.join(data_dir, "DD_node_labels.txt"), 
                                    dtype=np.int64) - 1
        print("Loading DD_graph_indicator.txt")
        graph_indicator = np.genfromtxt(os.path.join(data_dir, "DD_graph_indicator.txt"), 
                                        dtype=np.int64) - 1
        print("Loading DD_graph_labels.txt")
        graph_labels = np.genfromtxt(os.path.join(data_dir, "DD_graph_labels.txt"), 
                                     dtype=np.int64) - 1
        num_nodes = len(node_labels)
        sparse_adjacency = sp.coo_matrix((np.ones(len(adjacency_list)), 
                                          (adjacency_list[:, 0], adjacency_list[:, 1])),
                                         shape=(num_nodes, num_nodes), dtype=np.float32)
        print("Number of nodes: ", num_nodes)
        return sparse_adjacency, node_labels, graph_indicator, graph_labels
    
    def maybe_download(self):
        save_path = os.path.join(self.data_root)
        if not os.path.exists(save_path):
            self.download_data(self.url, save_path)
        if not os.path.exists(os.path.join(self.data_root, "DD")):
            zipfilename = os.path.join(self.data_root, "DD.zip")
            with ZipFile(zipfilename, "r") as zipobj:
                zipobj.extractall(os.path.join(self.data_root))
                print("Extracting data from {}".format(zipfilename))
    
    @staticmethod
    def download_data(url, save_path):
        """数据下载工具,当原始数据不存在时将会进行下载"""
        print("Downloading data from {}".format(url))
        if not os.path.exists(save_path):
            os.makedirs(save_path)
        data = urllib.request.urlopen(url)
        filename = "DD.zip"
        with open(os.path.join(save_path, filename), 'wb') as f:
            f.write(data.read())
        return True

Model定义

GraphConvolution

class GraphConvolution(nn.Module):
    def __init__(self, input_dim, output_dim, use_bias=True):
        """图卷积:L*X*\theta

        Args:
        ----------
            input_dim: int
                节点输入特征的维度
            output_dim: int
                输出特征维度
            use_bias : bool, optional
                是否使用偏置
        """
        super(GraphConvolution, self).__init__()
        self.input_dim = input_dim
        self.output_dim = output_dim
        self.use_bias = use_bias
        self.weight = nn.Parameter(torch.Tensor(input_dim, output_dim))
        if self.use_bias:
            self.bias = nn.Parameter(torch.Tensor(output_dim))
        else:
            self.register_parameter('bias', None)
        self.reset_parameters()

    def reset_parameters(self):
        init.kaiming_uniform_(self.weight)
        if self.use_bias:
            init.zeros_(self.bias)

    def forward(self, adjacency, input_feature):
        """邻接矩阵是稀疏矩阵,因此在计算时使用稀疏矩阵乘法"""
        support = torch.mm(input_feature, self.weight)
        output = torch.sparse.mm(adjacency, support)
        if self.use_bias:
            output += self.bias
        return output

    def __repr__(self):
        return self.__class__.__name__ + ' (' \
            + str(self.input_dim) + ' -> ' \
            + str(self.output_dim) + ')'

ReadOut实现

def global_max_pool(x, graph_indicator):
    num = graph_indicator.max().item() + 1
    return torch_scatter.scatter_max(x, graph_indicator, dim=0, dim_size=num)[0]


def global_avg_pool(x, graph_indicator):
    num = graph_indicator.max().item() + 1
    return torch_scatter.scatter_mean(x, graph_indicator, dim=0, dim_size=num)

基于自注意力机制的池化层

def top_rank(attention_score, graph_indicator, keep_ratio):
    """基于给定的attention_score, 对每个图进行pooling操作.
    为了直观体现pooling过程,我们将每个图单独进行池化,最后再将它们级联起来进行下一步计算
    
    Arguments:
    ----------
        attention_score:torch.Tensor
            使用GCN计算出的注意力分数,Z = GCN(A, X)
        graph_indicator:torch.Tensor
            指示每个节点属于哪个图
        keep_ratio: float
            要保留的节点比例,保留的节点数量为int(N * keep_ratio)
    """
    # TODO: 确认是否是有序的, 必须是有序的
    graph_id_list = list(set(graph_indicator.cpu().numpy()))
    mask = attention_score.new_empty((0,), dtype=torch.bool)
    for graph_id in graph_id_list:
        graph_attn_score = attention_score[graph_indicator == graph_id]
        graph_node_num = len(graph_attn_score)
        graph_mask = attention_score.new_zeros((graph_node_num,),
                                                dtype=torch.bool)
        keep_graph_node_num = int(keep_ratio * graph_node_num)
        _, sorted_index = graph_attn_score.sort(descending=True)
        graph_mask[sorted_index[:keep_graph_node_num]] = True
        mask = torch.cat((mask, graph_mask))
    
    return mask
def filter_adjacency(adjacency, mask):
    """根据掩码mask对图结构进行更新
    
    Args:
        adjacency: torch.sparse.FloatTensor, 池化之前的邻接矩阵
        mask: torch.Tensor(dtype=torch.bool), 节点掩码向量
    
    Returns:
        torch.sparse.FloatTensor, 池化之后归一化邻接矩阵
    """
    device = adjacency.device
    mask = mask.cpu().numpy()
    indices = adjacency.coalesce().indices().cpu().numpy()
    num_nodes = adjacency.size(0)
    row, col = indices
    maskout_self_loop = row != col
    row = row[maskout_self_loop]
    col = col[maskout_self_loop]
    sparse_adjacency = sp.csr_matrix((np.ones(len(row)), (row, col)),
                                     shape=(num_nodes, num_nodes), dtype=np.float32)
    filtered_adjacency = sparse_adjacency[mask, :][:, mask]
    return normalization(filtered_adjacency).to(device)
class SelfAttentionPooling(nn.Module):
    def __init__(self, input_dim, keep_ratio, activation=torch.tanh):
        super(SelfAttentionPooling, self).__init__()
        self.input_dim = input_dim
        self.keep_ratio = keep_ratio
        self.activation = activation
        self.attn_gcn = GraphConvolution(input_dim, 1)
    
    def forward(self, adjacency, input_feature, graph_indicator):
        attn_score = self.attn_gcn(adjacency, input_feature).squeeze()
        attn_score = self.activation(attn_score)
        
        mask = top_rank(attn_score, graph_indicator, self.keep_ratio)
        hidden = input_feature[mask] * attn_score[mask].view(-1, 1)
        mask_graph_indicator = graph_indicator[mask]
        mask_adjacency = filter_adjacency(adjacency, mask)
        return hidden, mask_graph_indicator, mask_adjacency

模型一:SAGPool Global Model

class ModelA(nn.Module):
    def __init__(self, input_dim, hidden_dim, num_classes=2):
        """图分类模型结构A
        
        Args:
        ----
            input_dim: int, 输入特征的维度
            hidden_dim: int, 隐藏层单元数
            num_classes: 分类类别数 (default: 2)
        """
        super(ModelA, self).__init__()
        self.input_dim = input_dim
        self.hidden_dim = hidden_dim
        self.num_classes = num_classes
        
        self.gcn1 = GraphConvolution(input_dim, hidden_dim)
        self.gcn2 = GraphConvolution(hidden_dim, hidden_dim)
        self.gcn3 = GraphConvolution(hidden_dim, hidden_dim)
        self.pool = SelfAttentionPooling(hidden_dim * 3, 0.5)
        self.fc1 = nn.Linear(hidden_dim * 3 * 2, hidden_dim)
        self.fc2 = nn.Linear(hidden_dim, hidden_dim // 2)
        self.fc3 = nn.Linear(hidden_dim // 2, num_classes)

    def forward(self, adjacency, input_feature, graph_indicator):
        gcn1 = F.relu(self.gcn1(adjacency, input_feature))
        gcn2 = F.relu(self.gcn2(adjacency, gcn1))
        gcn3 = F.relu(self.gcn3(adjacency, gcn2))
        
        gcn_feature = torch.cat((gcn1, gcn2, gcn3), dim=1)
        pool, pool_graph_indicator, pool_adjacency = self.pool(adjacency, gcn_feature,
                                                               graph_indicator)
        
        readout = torch.cat((global_avg_pool(pool, pool_graph_indicator),
                             global_max_pool(pool, pool_graph_indicator)), dim=1)
        
        fc1 = F.relu(self.fc1(readout))
        fc2 = F.relu(self.fc2(fc1))
        logits = self.fc3(fc2)
        
        return logits

模型二:SAGPool Hierarchical Model

class ModelB(nn.Module):
    def __init__(self, input_dim, hidden_dim, num_classes=2):
        """图分类模型结构
        
        Args:
        -----
            input_dim: int, 输入特征的维度
            hidden_dim: int, 隐藏层单元数
            num_classes: int, 分类类别数 (default: 2)
        """
        super(ModelB, self).__init__()
        self.input_dim = input_dim
        self.hidden_dim = hidden_dim
        self.num_classes = num_classes
        
        self.gcn1 = GraphConvolution(input_dim, hidden_dim)
        self.pool1 = SelfAttentionPooling(hidden_dim, 0.5)
        self.gcn2 = GraphConvolution(hidden_dim, hidden_dim)
        self.pool2 = SelfAttentionPooling(hidden_dim, 0.5)
        self.gcn3 = GraphConvolution(hidden_dim, hidden_dim)
        self.pool3 = SelfAttentionPooling(hidden_dim, 0.5)
        
        self.mlp = nn.Sequential(
            nn.Linear(hidden_dim * 2, hidden_dim),
            nn.ReLU(),
            nn.Linear(hidden_dim, hidden_dim // 2),
            nn.ReLU(), 
            nn.Linear(hidden_dim // 2, num_classes))
    
    def forward(self, adjacency, input_feature, graph_indicator):
        gcn1 = F.relu(self.gcn1(adjacency, input_feature))
        pool1, pool1_graph_indicator, pool1_adjacency = \
            self.pool1(adjacency, gcn1, graph_indicator)
        global_pool1 = torch.cat(
            [global_avg_pool(pool1, pool1_graph_indicator),
             global_max_pool(pool1, pool1_graph_indicator)],
            dim=1)
        
        gcn2 = F.relu(self.gcn2(pool1_adjacency, pool1))
        pool2, pool2_graph_indicator, pool2_adjacency = \
            self.pool2(pool1_adjacency, gcn2, pool1_graph_indicator)
        global_pool2 = torch.cat(
            [global_avg_pool(pool2, pool2_graph_indicator),
             global_max_pool(pool2, pool2_graph_indicator)],
            dim=1)

        gcn3 = F.relu(self.gcn3(pool2_adjacency, pool2))
        pool3, pool3_graph_indicator, pool3_adjacency = \
            self.pool3(pool2_adjacency, gcn3, pool2_graph_indicator)
        global_pool3 = torch.cat(
            [global_avg_pool(pool3, pool3_graph_indicator),
             global_max_pool(pool3, pool3_graph_indicator)],
            dim=1)
        
        readout = global_pool1 + global_pool2 + global_pool3
        
        logits = self.mlp(readout)
        return logits

训练&测试

dataset = DDDataset()
# 模型输入数据准备
DEVICE = "cuda" if torch.cuda.is_available() else "cpu"
adjacency = dataset.sparse_adjacency
normalize_adjacency = normalization(adjacency).to(DEVICE)
node_labels = tensor_from_numpy(dataset.node_labels, DEVICE)
node_features = F.one_hot(node_labels, node_labels.max().item() + 1).float()
graph_indicator = tensor_from_numpy(dataset.graph_indicator, DEVICE)
graph_labels = tensor_from_numpy(dataset.graph_labels, DEVICE)
train_index = tensor_from_numpy(dataset.train_index, DEVICE)
test_index = tensor_from_numpy(dataset.test_index, DEVICE)
train_label = tensor_from_numpy(dataset.train_label, DEVICE)
test_label = tensor_from_numpy(dataset.test_label, DEVICE)
# 超参数设置
INPUT_DIM = node_features.size(1)
NUM_CLASSES = 2
EPOCHS = 200    # @param {type: "integer"}
HIDDEN_DIM =    32# @param {type: "integer"}
LEARNING_RATE = 0.01 # @param
WEIGHT_DECAY = 0.0001 # @param
# 模型初始化
model_g = ModelA(INPUT_DIM, HIDDEN_DIM, NUM_CLASSES).to(DEVICE)
model_h = ModelB(INPUT_DIM, HIDDEN_DIM, NUM_CLASSES).to(DEVICE)
model = model_g #@param ['model_g', 'model_h'] {type: 'raw'}
print("Device:", DEVICE)
print(model)
criterion = nn.CrossEntropyLoss().to(DEVICE)
optimizer = optim.Adam(model.parameters(), LEARNING_RATE, weight_decay=WEIGHT_DECAY)

model.train()
for epoch in range(EPOCHS):
    logits = model(normalize_adjacency, node_features, graph_indicator)
    loss = criterion(logits[train_index], train_label)  # 只对训练的数据计算损失值
    optimizer.zero_grad()
    loss.backward()  # 反向传播计算参数的梯度
    optimizer.step()  # 使用优化方法进行梯度更新
    train_acc = torch.eq(
        logits[train_index].max(1)[1], train_label).float().mean()
    print("Epoch {:03d}: Loss {:.4f}, TrainAcc {:.4}".format(
        epoch, loss.item(), train_acc.item()))
model.eval()
with torch.no_grad():
    logits = model(normalize_adjacency, node_features, graph_indicator)
    test_logits = logits[test_index]
    test_acc = torch.eq(
        test_logits.max(1)[1], test_label
    ).float().mean()

print(test_acc.item())

运行结果

Epoch 000: Loss 0.6814, TrainAcc 0.5881
Epoch 001: Loss 0.6738, TrainAcc 0.5881
Epoch 002: Loss 0.6696, TrainAcc 0.5881
Epoch 003: Loss 0.6697, TrainAcc 0.5881
Epoch 004: Loss 0.6579, TrainAcc 0.5881
Epoch 005: Loss 0.6558, TrainAcc 0.5881
Epoch 006: Loss 0.6556, TrainAcc 0.5881
Epoch 007: Loss 0.6409, TrainAcc 0.621
Epoch 008: Loss 0.6472, TrainAcc 0.6253
Epoch 009: Loss 0.6333, TrainAcc 0.6486
Epoch 010: Loss 0.6418, TrainAcc 0.6656
Epoch 011: Loss 0.6265, TrainAcc 0.6794
Epoch 012: Loss 0.6302, TrainAcc 0.6486
Epoch 013: Loss 0.6256, TrainAcc 0.6561
Epoch 014: Loss 0.6144, TrainAcc 0.6837
Epoch 015: Loss 0.6190, TrainAcc 0.6773
Epoch 016: Loss 0.6074, TrainAcc 0.673
Epoch 017: Loss 0.6082, TrainAcc 0.6741
Epoch 018: Loss 0.5945, TrainAcc 0.7028
Epoch 019: Loss 0.5962, TrainAcc 0.6932
Epoch 020: Loss 0.5945, TrainAcc 0.6815
Epoch 021: Loss 0.5802, TrainAcc 0.6975
Epoch 022: Loss 0.5957, TrainAcc 0.6911
Epoch 023: Loss 0.5765, TrainAcc 0.7017
Epoch 024: Loss 0.5723, TrainAcc 0.7059
Epoch 025: Loss 0.5775, TrainAcc 0.7059
Epoch 026: Loss 0.5564, TrainAcc 0.7208
Epoch 027: Loss 0.5672, TrainAcc 0.7038
Epoch 028: Loss 0.5503, TrainAcc 0.7314
Epoch 029: Loss 0.5504, TrainAcc 0.7261
Epoch 030: Loss 0.5501, TrainAcc 0.7325
Epoch 031: Loss 0.5334, TrainAcc 0.7473
Epoch 032: Loss 0.5397, TrainAcc 0.7378
Epoch 033: Loss 0.5310, TrainAcc 0.7537
Epoch 034: Loss 0.5167, TrainAcc 0.7633
Epoch 035: Loss 0.5205, TrainAcc 0.7484
Epoch 036: Loss 0.5152, TrainAcc 0.7601
Epoch 037: Loss 0.5032, TrainAcc 0.7696
Epoch 038: Loss 0.4942, TrainAcc 0.7749
Epoch 039: Loss 0.4987, TrainAcc 0.7675
Epoch 040: Loss 0.5036, TrainAcc 0.7654
Epoch 041: Loss 0.4926, TrainAcc 0.7707
Epoch 042: Loss 0.4745, TrainAcc 0.7792
Epoch 043: Loss 0.4665, TrainAcc 0.7887
Epoch 044: Loss 0.4720, TrainAcc 0.7909
Epoch 045: Loss 0.4740, TrainAcc 0.7856
Epoch 046: Loss 0.4643, TrainAcc 0.7866
Epoch 047: Loss 0.4439, TrainAcc 0.8057
Epoch 048: Loss 0.4410, TrainAcc 0.8174
Epoch 049: Loss 0.4406, TrainAcc 0.8089
Epoch 050: Loss 0.4481, TrainAcc 0.7962
Epoch 051: Loss 0.4479, TrainAcc 0.7941
Epoch 052: Loss 0.4427, TrainAcc 0.81
Epoch 053: Loss 0.4199, TrainAcc 0.8185
Epoch 054: Loss 0.4096, TrainAcc 0.828
Epoch 055: Loss 0.4233, TrainAcc 0.8217
Epoch 056: Loss 0.4124, TrainAcc 0.8174
Epoch 057: Loss 0.4039, TrainAcc 0.8174
Epoch 058: Loss 0.3880, TrainAcc 0.8418
Epoch 059: Loss 0.3856, TrainAcc 0.8397
Epoch 060: Loss 0.3801, TrainAcc 0.845
Epoch 061: Loss 0.3951, TrainAcc 0.8259
Epoch 062: Loss 0.4363, TrainAcc 0.8079
Epoch 063: Loss 0.4916, TrainAcc 0.7781
Epoch 064: Loss 0.3780, TrainAcc 0.8376
Epoch 065: Loss 0.4109, TrainAcc 0.8132
Epoch 066: Loss 0.4094, TrainAcc 0.8185
Epoch 067: Loss 0.3742, TrainAcc 0.845
Epoch 068: Loss 0.4071, TrainAcc 0.8227
Epoch 069: Loss 0.3514, TrainAcc 0.8599
Epoch 070: Loss 0.3744, TrainAcc 0.8408
Epoch 071: Loss 0.3902, TrainAcc 0.8301
Epoch 072: Loss 0.3523, TrainAcc 0.8631
Epoch 073: Loss 0.3513, TrainAcc 0.8631
Epoch 074: Loss 0.3575, TrainAcc 0.8482
Epoch 075: Loss 0.3306, TrainAcc 0.8684
Epoch 076: Loss 0.3474, TrainAcc 0.8652
Epoch 077: Loss 0.3293, TrainAcc 0.8694
Epoch 078: Loss 0.3246, TrainAcc 0.8684
Epoch 079: Loss 0.3383, TrainAcc 0.8588
Epoch 080: Loss 0.3142, TrainAcc 0.8715
Epoch 081: Loss 0.3091, TrainAcc 0.8715
Epoch 082: Loss 0.3208, TrainAcc 0.8652
Epoch 083: Loss 0.3008, TrainAcc 0.8843
Epoch 084: Loss 0.2949, TrainAcc 0.8843
Epoch 085: Loss 0.2961, TrainAcc 0.8811
Epoch 086: Loss 0.3088, TrainAcc 0.8694
Epoch 087: Loss 0.3171, TrainAcc 0.8694
Epoch 088: Loss 0.3067, TrainAcc 0.8779
Epoch 089: Loss 0.2829, TrainAcc 0.8896
Epoch 090: Loss 0.2742, TrainAcc 0.8938
Epoch 091: Loss 0.2979, TrainAcc 0.879
Epoch 092: Loss 0.2934, TrainAcc 0.8811
Epoch 093: Loss 0.2984, TrainAcc 0.8705
Epoch 094: Loss 0.2862, TrainAcc 0.8875
Epoch 095: Loss 0.2663, TrainAcc 0.9002
Epoch 096: Loss 0.2744, TrainAcc 0.8907
Epoch 097: Loss 0.2755, TrainAcc 0.8864
Epoch 098: Loss 0.2942, TrainAcc 0.8737
Epoch 099: Loss 0.2602, TrainAcc 0.8949
Epoch 100: Loss 0.2623, TrainAcc 0.8949
Epoch 101: Loss 0.2310, TrainAcc 0.9087
Epoch 102: Loss 0.2521, TrainAcc 0.9023
Epoch 103: Loss 0.2297, TrainAcc 0.9193
Epoch 104: Loss 0.2344, TrainAcc 0.9066
Epoch 105: Loss 0.2192, TrainAcc 0.9214
Epoch 106: Loss 0.2253, TrainAcc 0.9151
Epoch 107: Loss 0.2289, TrainAcc 0.913
Epoch 108: Loss 0.2159, TrainAcc 0.9246
Epoch 109: Loss 0.2179, TrainAcc 0.9161
Epoch 110: Loss 0.1981, TrainAcc 0.9352
Epoch 111: Loss 0.2073, TrainAcc 0.9225
Epoch 112: Loss 0.1861, TrainAcc 0.9374
Epoch 113: Loss 0.1945, TrainAcc 0.9289
Epoch 114: Loss 0.1860, TrainAcc 0.9384
Epoch 115: Loss 0.1747, TrainAcc 0.9448
Epoch 116: Loss 0.1841, TrainAcc 0.9374
Epoch 117: Loss 0.1655, TrainAcc 0.9427
Epoch 118: Loss 0.1717, TrainAcc 0.9374
Epoch 119: Loss 0.1635, TrainAcc 0.948
Epoch 120: Loss 0.1607, TrainAcc 0.9522
Epoch 121: Loss 0.2336, TrainAcc 0.8854
Epoch 122: Loss 0.7919, TrainAcc 0.6805
Epoch 123: Loss 1.0422, TrainAcc 0.6996
Epoch 124: Loss 0.5043, TrainAcc 0.7537
Epoch 125: Loss 0.9418, TrainAcc 0.5732
Epoch 126: Loss 0.6531, TrainAcc 0.6805
Epoch 127: Loss 0.5462, TrainAcc 0.7622
Epoch 128: Loss 0.6121, TrainAcc 0.7537
Epoch 129: Loss 0.5723, TrainAcc 0.7665
Epoch 130: Loss 0.3502, TrainAcc 0.8641
Epoch 131: Loss 0.4173, TrainAcc 0.8068
Epoch 132: Loss 0.4610, TrainAcc 0.7845
Epoch 133: Loss 0.3979, TrainAcc 0.8376
Epoch 134: Loss 0.4896, TrainAcc 0.8068
Epoch 135: Loss 0.3175, TrainAcc 0.8662
Epoch 136: Loss 0.3619, TrainAcc 0.8333
Epoch 137: Loss 0.3281, TrainAcc 0.8652
Epoch 138: Loss 0.3243, TrainAcc 0.8684
Epoch 139: Loss 0.3672, TrainAcc 0.8355
Epoch 140: Loss 0.3262, TrainAcc 0.8641
Epoch 141: Loss 0.3300, TrainAcc 0.8747
Epoch 142: Loss 0.3460, TrainAcc 0.862
Epoch 143: Loss 0.3255, TrainAcc 0.8662
Epoch 144: Loss 0.3167, TrainAcc 0.8673
Epoch 145: Loss 0.3247, TrainAcc 0.8567
Epoch 146: Loss 0.3124, TrainAcc 0.8641
Epoch 147: Loss 0.2965, TrainAcc 0.897
Epoch 148: Loss 0.2926, TrainAcc 0.9045
Epoch 149: Loss 0.2841, TrainAcc 0.9013
Epoch 150: Loss 0.2726, TrainAcc 0.8949
Epoch 151: Loss 0.2704, TrainAcc 0.8981
Epoch 152: Loss 0.2707, TrainAcc 0.8949
Epoch 153: Loss 0.2539, TrainAcc 0.914
Epoch 154: Loss 0.2610, TrainAcc 0.9087
Epoch 155: Loss 0.2481, TrainAcc 0.914
Epoch 156: Loss 0.2373, TrainAcc 0.9119
Epoch 157: Loss 0.2391, TrainAcc 0.9066
Epoch 158: Loss 0.2221, TrainAcc 0.9268
Epoch 159: Loss 0.2219, TrainAcc 0.9204
Epoch 160: Loss 0.2163, TrainAcc 0.9299
Epoch 161: Loss 0.2099, TrainAcc 0.9321
Epoch 162: Loss 0.2086, TrainAcc 0.9299
Epoch 163: Loss 0.2014, TrainAcc 0.9363
Epoch 164: Loss 0.1983, TrainAcc 0.9395
Epoch 165: Loss 0.1936, TrainAcc 0.9416
Epoch 166: Loss 0.1899, TrainAcc 0.9384
Epoch 167: Loss 0.1855, TrainAcc 0.9395
Epoch 168: Loss 0.1820, TrainAcc 0.9416
Epoch 169: Loss 0.1776, TrainAcc 0.9459
Epoch 170: Loss 0.1739, TrainAcc 0.9469
Epoch 171: Loss 0.1704, TrainAcc 0.948
Epoch 172: Loss 0.1662, TrainAcc 0.9501
Epoch 173: Loss 0.1641, TrainAcc 0.9512
Epoch 174: Loss 0.1599, TrainAcc 0.9522
Epoch 175: Loss 0.1572, TrainAcc 0.9522
Epoch 176: Loss 0.1532, TrainAcc 0.9554
Epoch 177: Loss 0.1505, TrainAcc 0.9575
Epoch 178: Loss 0.1472, TrainAcc 0.9586
Epoch 179: Loss 0.1442, TrainAcc 0.9575
Epoch 180: Loss 0.1408, TrainAcc 0.9628
Epoch 181: Loss 0.1383, TrainAcc 0.966
Epoch 182: Loss 0.1349, TrainAcc 0.965
Epoch 183: Loss 0.1325, TrainAcc 0.9671
Epoch 184: Loss 0.1292, TrainAcc 0.9682
Epoch 185: Loss 0.1263, TrainAcc 0.9713
Epoch 186: Loss 0.1237, TrainAcc 0.9724
Epoch 187: Loss 0.1206, TrainAcc 0.9713
Epoch 188: Loss 0.1176, TrainAcc 0.9724
Epoch 189: Loss 0.1151, TrainAcc 0.9724
Epoch 190: Loss 0.1127, TrainAcc 0.9724
Epoch 191: Loss 0.1096, TrainAcc 0.9735
Epoch 192: Loss 0.1074, TrainAcc 0.9735
Epoch 193: Loss 0.1048, TrainAcc 0.9735
Epoch 194: Loss 0.1021, TrainAcc 0.9745
Epoch 195: Loss 0.0996, TrainAcc 0.9745
Epoch 196: Loss 0.0973, TrainAcc 0.9756
Epoch 197: Loss 0.0946, TrainAcc 0.9756
Epoch 198: Loss 0.0924, TrainAcc 0.9777
Epoch 199: Loss 0.0906, TrainAcc 0.9798

猜你喜欢

转载自blog.csdn.net/weixin_43360025/article/details/124633104