pytorch

tensorboardX使用

import torch
import torchvision.utils as vutils
import numpy as np
import torchvision.models as models
from torchvision import datasets
from tensorboardX import SummaryWriter

resnet18 = models.resnet18(False)
writer = SummaryWriter()
sample_rate = 44100
freqs = [262, 294, 330, 349, 392, 440, 440, 440, 440, 440, 440]

for n_iter in range(100):

    dummy_s1 = torch.rand(1)
    dummy_s2 = torch.rand(1)
    # data grouping by `slash`
    writer.add_scalar('data/scalar1', dummy_s1[0], n_iter)
    writer.add_scalar('data/scalar2', dummy_s2[0], n_iter)

    writer.add_scalars('data/scalar_group', {'xsinx': n_iter * np.sin(n_iter),
                                             'xcosx': n_iter * np.cos(n_iter),
                                             'arctanx': np.arctan(n_iter)}, n_iter)

    dummy_img = torch.rand(32, 3, 64, 64)  # output from network
    if n_iter % 10 == 0:
        x = vutils.make_grid(dummy_img, normalize=True, scale_each=True)
        writer.add_image('Image', x, n_iter)

        dummy_audio = torch.zeros(sample_rate * 2)
        for i in range(x.size(0)):
            # amplitude of sound should in [-1, 1]
            dummy_audio[i] = np.cos(freqs[n_iter // 10] * np.pi * float(i) / float(sample_rate))
        writer.add_audio('myAudio', dummy_audio, n_iter, sample_rate=sample_rate)

        writer.add_text('Text', 'text logged at step:' + str(n_iter), n_iter)

        for name, param in resnet18.named_parameters():
            writer.add_histogram(name, param.clone().cpu().data.numpy(), n_iter)

        # needs tensorboard 0.4RC or later
        writer.add_pr_curve('xoxo', np.random.randint(2, size=100), np.random.rand(100), n_iter)

dataset = datasets.MNIST('mnist', train=False, download=True)
images = dataset.test_data[:100].float()
label = dataset.test_labels[:100]

features = images.view(100, 784)
writer.add_embedding(features, metadata=label, label_img=images.unsqueeze(1))

# export scalar data to JSON for external processing
writer.export_scalars_to_json("./all_scalars.json")
writer.close()

命令行下输入:tensorboard --logdir runs

'''

线性回归

w = torch.tensor(0.0,requires_grad=True)
b = torch.tensor(0.0,requires_grad=True
optimizer=optim.Adam([w,b],lr=1e-2)
for i in range(1000):
output = w*x+b
loss= F.mse_loss(output,y)
optimizer.zero_grad()
loss.backward()
optimizer.step()

'''
train_db, val_db = torch.utils.data.random_split(train_db, [50000, 10000])#分割数据
train_loader = torch.utils.data.DataLoader(
train_db,
batch_size=batch_size, shuffle=True)#装载器

Dataset是一个包装类,用来将数据包装为Dataset类,然后传入DataLoader中,我们再使用DataLoader这个类来更加快捷的对数据进行操作。
当我们继承了一个Dataset类之后,我们需要重写len 和 getitem方法, len 方法提供了dataset的大小; getitem 方法该方法支持从 0 到 len(self)的索引

'''
def len(self):
return self.length

def __getitem__(self, i):
    traj_idx, i = self.get_idx[i]

    return self.trajectories['states'][traj_idx][i], self.trajectories[
        'actions'][traj_idx][i]

'''
DataLoader是一个比较重要的类,它为我们提供的常用操作有:batch_size(每个batch的大小), shuffle(是否进行shuffle操作), num_workers(加载数据的时候使用几个子进程

需要注意的重要一点是,每次迭代pytorch都会从头开始重新创建图形,这正是允许使用任意Python控制流语句的原因,该语句可以在每次迭代时更改图形的整体形状和大小。在开始训练之前,您不必编码所有可能的路径-您运行的就是您的与众不同。

in-place operation在pytorch中是指改变一个tensor的值的时候,不经过复制操作,而是直接在原来的内存上改变它的值。可以把它成为原地操作符。如果你使用了in-place operation而没有报错的话,那么你可以确定你的梯度计算是正确的。
通常不鼓励该行为

broadcast规则,同一维度要不相等,要不为空,要不为1

torch是numpy的GPU替代品,提供最大灵活性和速度的深度学习研究平台

view改变tensor的shape
x = torch.randn(4, 4)
y = x.view(16)
z = x.view(-1, 8) # the size -1 is inferred from other dimensions
print(x.size(), y.size(), z.size())

posted @ 2019-04-09 14:00  blog_hfg  阅读(260)  评论(0)    收藏  举报