深度学习优化算法比较

1、pytorch+python

    optimizer=torch.optim.SGD(net.parameters(),lr=LR)
    #optimizer=torch.optim.SGD(net.parameters(),lr=LR,momentum=0.8)
    #optimizer=torch.optim.RMSprop(net.parameters(),lr=LR,alpha=0.9)
    #optimizer=torch.optim.Adam(net.parameters(),lr=LR,betas=(0.9,0.99))
    
    #optimizer = torch.optim.Adam(net.parameters(), lr=0.001, betas=(0.9, 0.999), eps=1e-08, weight_decay=0, amsgrad=False)
    #optimizer = torch.optim.Adadelta(net.parameters(), lr=1.0, rho=0.9, eps=1e-06, weight_decay=0)
    #optimizer = torch.optim.Adagrad(net.parameters(), lr=0.01, lr_decay=0, weight_decay=0, initial_accumulator_value=0, eps=1e-10)
    #optimizer = torch.optim.AdamW(net.parameters(), lr=0.001, betas=(0.9, 0.999), eps=1e-08, weight_decay=0.01, amsgrad=False)
    #optimizer = torch.optim.SparseAdam(net.parameters(), lr=0.001, betas=(0.9, 0.999), eps=1e-08)
    #optimizer = torch.optim.Adamax(net.parameters(), lr=0.002, betas=(0.9, 0.999), eps=1e-08, weight_decay=0)

    #optimizer = torch.optim.SGD(net.parameters(), lr=0.01, momentum=0.9)
    #optimizer = torch.optim.ASGD(net.parameters(), lr=0.01, lambd=0.0001, alpha=0.75, t0=1000000.0, weight_decay=0)

    #optimizer = torch.optim.RMSprop(net.parameters(), lr=0.01, alpha=0.99, eps=1e-08, weight_decay=0, momentum=0, centered=False)

猜你喜欢

转载自www.cnblogs.com/wjjcjj/p/12743285.html
今日推荐