Pytorch
https://pytorch.org/tutorials/
https://pytorch.org/tutorials/beginner/deep_learning_60min_blitz.html
install sth: https://github.com/pytorch/pytorch or https://devtalk.nvidia.com/default/topic/1049071/pytorch-for-jetson-nano/
me@me:~/pytorch$ sudo apt-get install python-pip
me@me:~/pytorch$ pip install torch-1.1.0-cp27-cp27mu-linux_aarch64.whl
Processing ./torch-1.1.0-cp27-cp27mu-linux_aarch64.whl
Collecting numpy (from torch==1.1.0)
Downloading https://files.pythonhosted.org/packages/d3/4b/f9f4b96c0b1ba43d28a5bdc4b64f0b9d3fbcf31313a51bc766942866a7c7/numpy-1.16.4.zip (5.1MB)
100% |████████████████████████████████| 5.1MB 113kB/s
Collecting future (from torch==1.1.0)
Downloading https://files.pythonhosted.org/packages/90/52/e20466b85000a181e1e144fd8305caf2cf475e2f9674e797b222f8105f5f/future-0.17.1.tar.gz (829kB)
100% |████████████████████████████████| 829kB 620kB/s
Building wheels for collected packages: numpy, future
Running setup.py bdist_wheel for numpy ... done
Stored in directory: /home/me/.cache/pip/wheels/6b/6b/07/ec9651c970f87810e475369b9d655170ac06c66c6f38c3830e
Running setup.py bdist_wheel for future ... done
Stored in directory: /home/me/.cache/pip/wheels/0c/61/d2/d6b7317325828fbb39ee6ad559dbe4664d0896da4721bf379e
Successfully built numpy future
Installing collected packages: numpy, future, torch
Successfully installed future-0.17.1 numpy-1.16.4 torch-1.1.0
me@me:~$ sudo apt-get install libatlas3-base (http://danielnouri.org/notes/2012/12/19/libblas-and-liblapack-issues-and-speed,-with-scipy-and-ubuntu/)
me@me:~$ python
Python 2.7.15+ (default, Nov 27 2018, 23:36:35)
[GCC 7.3.0] on linux2
Type "help", "copyright", "credits" or "license" for more information.
>>> import torch
>>> from __future__ import print_function
>>> x = torch.empty(5,3)
>>> print(x)
tensor([[6.3303e-04, 1.7796e-43, 6.3303e-04],
[1.7796e-43, 6.2621e-04, 1.7796e-43],
[6.3303e-04, 1.7796e-43, 6.3303e-04],
[1.7796e-43, 6.2621e-04, 1.7796e-43],
[6.2622e-04, 1.7796e-43, 6.3304e-04]])
>>> print(x)
tensor([[6.3303e-04, 1.7796e-43, 6.3303e-04],
[1.7796e-43, 6.2621e-04, 1.7796e-43],
[6.3303e-04, 1.7796e-43, 6.3303e-04],
[1.7796e-43, 6.2621e-04, 1.7796e-43],
[6.2622e-04, 1.7796e-43, 6.3304e-04]])
>>> print(x)
tensor([[6.3303e-04, 1.7796e-43, 6.3303e-04],
[1.7796e-43, 6.2621e-04, 1.7796e-43],
[6.3303e-04, 1.7796e-43, 6.3303e-04],
[1.7796e-43, 6.2621e-04, 1.7796e-43],
[6.2622e-04, 1.7796e-43, 6.3304e-04]])
>>> x = torch.empty(5,3)
>>> print(x)
tensor([[-9.9167e-10, 1.1911e-43, 6.3275e-04],
[ 1.7796e-43, 6.3274e-04, 1.7796e-43],
[ 6.3274e-04, 1.7796e-43, 6.3256e-04],
[ 1.7796e-43, 6.2621e-04, 1.7796e-43],
[ 6.2622e-04, 1.7796e-43, 6.3304e-04]])
>>> x = torch.rand(5, 3)
>>> print(x)
tensor([[0.5236, 0.3999, 0.8464],
[0.0597, 0.0370, 0.2942],
[0.5335, 0.6017, 0.5114],
[0.4158, 0.2590, 0.6017],
[0.4964, 0.7748, 0.6362]])
>>> print(x)
tensor([[0.5236, 0.3999, 0.8464],
[0.0597, 0.0370, 0.2942],
[0.5335, 0.6017, 0.5114],
[0.4158, 0.2590, 0.6017],
[0.4964, 0.7748, 0.6362]])
>>> x = torch.zeros(5, 3, dtype=torch.long)
>>> print(x)
tensor([[0, 0, 0],
[0, 0, 0],
[0, 0, 0],
[0, 0, 0],
[0, 0, 0]])
>>> x = x.new_ones(5, 3, dtype=torch.double)
>>> print(x)
tensor([[1., 1., 1.],
[1., 1., 1.],
[1., 1., 1.],
[1., 1., 1.],
[1., 1., 1.]], dtype=torch.float64)
>>> x = torch.randn_like(x, dtype=torch.float)
>>> print(x)
tensor([[-0.0104, -1.6859, 1.5372],
[-0.2854, 0.2397, -0.0364],
[-0.0362, -1.0012, -0.2862],
[ 1.2732, -0.6627, 0.0126],
[ 0.1980, 0.2829, 0.1413]])
>>> print(x)
tensor([[-0.0104, -1.6859, 1.5372],
[-0.2854, 0.2397, -0.0364],
[-0.0362, -1.0012, -0.2862],
[ 1.2732, -0.6627, 0.0126],
[ 0.1980, 0.2829, 0.1413]])
>>> print(x)
tensor([[-0.0104, -1.6859, 1.5372],
[-0.2854, 0.2397, -0.0364],
[-0.0362, -1.0012, -0.2862],
[ 1.2732, -0.6627, 0.0126],
[ 0.1980, 0.2829, 0.1413]])
>>> print(x.size())
(5, 3)
>>> y = torch.rand(5, 3)
>>> print(y)
tensor([[0.1257, 0.3377, 0.9653],
[0.0577, 0.6825, 0.6740],
[0.0399, 0.6792, 0.1840],
[0.0085, 0.3385, 0.8567],
[0.1932, 0.0309, 0.0804]])
>>> print(x+y)
tensor([[ 0.1154, -1.3482, 2.5025],
[-0.2277, 0.9222, 0.6376],
[ 0.0037, -0.3219, -0.1022],
[ 1.2817, -0.3242, 0.8692],
[ 0.3912, 0.3138, 0.2217]])
>>> torch.cuda.is_available()
True
>>> device = torch.device("cuda")
>>> print(device)
cuda
>>> y = torch.ones_like(x, device=device)
>>> print(y)
tensor([[1., 1., 1.],
[1., 1., 1.],
[1., 1., 1.],
[1., 1., 1.],
[1., 1., 1.]], device='cuda:0')
>>> x = x.to(device)
>>> print(x)
tensor([[-0.0104, -1.6859, 1.5372],
[-0.2854, 0.2397, -0.0364],
[-0.0362, -1.0012, -0.2862],
[ 1.2732, -0.6627, 0.0126],
[ 0.1980, 0.2829, 0.1413]], device='cuda:0')
>>> z = x + y
>>> print(z)
tensor([[ 9.8963e-01, -6.8589e-01, 2.5372e+00],
[ 7.1465e-01, 1.2397e+00, 9.6358e-01],
[ 9.6382e-01, -1.1544e-03, 7.1375e-01],
[ 2.2732e+00, 3.3731e-01, 1.0126e+00],
[ 1.1980e+00, 1.2829e+00, 1.1413e+00]], device='cuda:0')
>>> print(z.to("cpu", torch.double))
tensor([[ 9.8963e-01, -6.8589e-01, 2.5372e+00],
[ 7.1465e-01, 1.2397e+00, 9.6358e-01],
[ 9.6382e-01, -1.1544e-03, 7.1375e-01],
[ 2.2732e+00, 3.3731e-01, 1.0126e+00],
[ 1.1980e+00, 1.2829e+00, 1.1413e+00]], dtype=torch.float64)
>>> a = torch.randn(2, 2)
>>> print(a)
tensor([[-0.6065, 1.4560],
[ 1.3102, 0.7580]])
>>> a = ((a * 3) / (a - 1))
>>> print(a)
tensor([[ 1.1326, 9.5792],
[12.6716, -9.3983]])
>>> print(a.requires_grad)
False
>>> a.requires_grad_(True)
tensor([[ 1.1326, 9.5792],
[12.6716, -9.3983]], requires_grad=True)
>>> print(a.requires_grad)
True
>>> b = (a * a).sum()
>>> print(b)
tensor(341.9424, grad_fn=<SumBackward0>)
>>> print(b.grad_fn)
<SumBackward0 object at 0x7f3a24bf90>
浙公网安备 33010602011771号