PyTorch基本结构

from torch import nn 
class model_name (nn.Moudule):
    def __init__(self):
        super(model_name , self).__init__()
        # network
    def forward(self,x):
        return
    
 
        
from torch.utils.data import DataLoader,Dataset
class dataset_name(Dataset):
    def __len__(self):
        return 
    def __getitem__(self,index):
        return
loader1_name = DataLoader(dataset=dataset_name)

        
    
device = "cuda" if torch.cuda.is_available() else "cpu"

loss_fn = nn.MSELoss()
optimizer = torch.optim.Adam(model.parameters())

def train_loop(dataloader,model,loss_fn,optimizer):
    for batch,(input_x,output_y) in enumerate (dataloader):
        pre = model (input_x)
        loss = loss_fn(pre,output_y)
        
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()
        
        with torch.no_grad():
        for batch, (val_x, val_y) in enumerate(val_dataloader):

            outputs = model(val_x)
            loss = loss_fn(outputs,val_y)

for t in range(epochs):
    print(f"Epoch {t+1}\n-------------------------------")
    train_loop(train_dataloader, model, loss_fn, optimizer)
    print("Done!")


        

 

posted @ 2022-05-01 18:59  Nonmy  阅读(66)  评论(0)    收藏  举报