# 1. Preparation

pip install torch==0.3.1 --user


import torch
print(torch.__version__)


DataLoader(dataset, batch_size=1, shuffle=False, sampler=None,
batch_sampler=None, num_workers=0, collate_fn=None,
pin_memory=False, drop_last=False, timeout=0,
worker_init_fn=None, *, prefetch_factor=2,
persistent_workers=False)


## 1.2. 矩阵相关

shape = torch.randn([3,2,3])


numpy转tensor：

torch.Tensor()


x1 = torch.zeros(10, 10)
x2 = x1.unsqueeze(0)
>>> print(x2.size())
torch.Size([1, 10, 10])


B = 3

torch.repeat_interleave(shape, torch.ones(B, dtype=torch.long)*B, dim=0) #[A,A,A,B,B,B,C,C,C]

shape.repeat(B, 1, 1) # [A,B,C,A,B,C,A,B,C]


index = torch.tensor([[0], [1], [2]])
tensor_p = tensor_0.gather(1, index)
print(tensor_p)


imgs = image.view(-1,1,256,256).repeat(1,3,1,1)


## 1.3. 上采样

image_tensor = image_tensor.view(1, 1, img_d, img_h, img_w)
resize_tensor = F.upsample(image_tensor, new_shape, mode='trilinear').data[0, 0]

torch.max(input, axis) #return the max value

torch.flatten(input, start_dim, end_dim)# flatten a continuous rang of dims in a tensor


## 1.5. Debug

import torch

fms = model(Variable(torch.randn(1,1,256,256)))
for fm in fms:
print(fm.size())


## 1.6. 模型

### 1.6.1. 加载模型

checkpoint = torch.load(self.model_path, map_location=lambda storage, loc: storage)


from functools import partial
import pickle
pickle.Unpickler = partial(pickle.Unpickler, encoding="latin1")
model = torch.load(model_file, map_location=lambda storage, loc: storage, pickle_module=pickle)


### 1.6.2. test model

input_img_var = torch.autograd.Variable(images.cuda(), volatile=True)


## 1.7. cuda memory

torch.cuda.set_device(7)

model.cuda() #RAM + 0.9G


torch.backends.cudnn.benchmark = True


ps x |grep python|awk '{print \$1}'|xargs kill


Memory Leakage with PyTorch

# 2. Pytorch lightning

## 2.1. pl.LightningModule

def configure_optimizers(self):
#     # Make sure to filter the parameters based on requires_grad
optimizer = torch.optim.SGD(filter(lambda p: p.requires_grad, self.parameters()), lr=self.hparams.lr, momentum=0.9)
lr_scheduler = {
'scheduler': torch.optim.lr_scheduler.ExponentialLR(optimizer, gamma=0.9995),
'name': 'lr'
}
return [optimizer], [lr_scheduler]