-
-
Notifications
You must be signed in to change notification settings - Fork 273
Expand file tree
/
Copy pathmain.cpp
More file actions
70 lines (56 loc) · 2.48 KB
/
main.cpp
File metadata and controls
70 lines (56 loc) · 2.48 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
// Copyright 2020-present pytorch-cpp Authors
// Original: https://pytorch.org/tutorials/beginner/blitz/autograd_tutorial.html
#include <torch/torch.h>
#include <iostream>
#include <iomanip>
int main() {
std::cout << "Deep Learning with PyTorch: A 60 Minute Blitz\n\n";
std::cout << "Autograd: Automatic Differentiation\n\n";
std::cout << "Tensor\n\n";
// Create a tensor and set requires_grad=True to track computation with it:
auto x = torch::ones({2, 2}, torch::TensorOptions().requires_grad(true));
std::cout << "x:\n" << x << '\n';
// Do a tensor operation:
auto y = x + 2;
std::cout << "y:\n" << y << '\n';
// y was created as a result of an operation, so it has a grad_fn:
std::cout << "y.grad_fn:\n" << y.grad_fn() << '\n';
// Do more operations on y:
auto z = y * y * 3;
auto out = z.mean();
std::cout << "z:\n" << z << "out:\n" << out << '\n';
// .requires_grad_(...) changes an existing Tensor’s requires_grad flag in-place:
auto a = torch::randn({2, 2});
a = ((a * 3) / (a - 1));
std::cout << a.requires_grad() << '\n';
a.requires_grad_(true);
std::cout << a.requires_grad() << '\n';
auto b = (a * a).sum();
std::cout << b.grad_fn() << '\n';
std::cout << "Gradients\n\n";
// Let’s backprop now:
out.backward();
// Print gradients d(out)/dx:
std::cout << "x.grad:\n" << x.grad() << '\n';
// Example of vector-Jacobian product:
x = torch::randn(3, torch::TensorOptions().requires_grad(true));
y = x * 2;
while (y.data().norm().item<int>() < 1000) {
y = y * 2;
}
std::cout << "y:\n" << y << '\n';
// Simply pass the vector to backward as argument:
auto v = torch::tensor({0.1, 1.0, 0.0001}, torch::TensorOptions(torch::kFloat));
y.backward(v);
std::cout << "x.grad:\n" << x.grad() << '\n';
// Stop autograd from tracking history on Tensors with .requires_grad=True:
std::cout << "x.requires_grad\n" << x.requires_grad() << '\n';
std::cout << "(x ** 2).requires_grad\n" << (x * x).requires_grad() << '\n';
torch::NoGradGuard no_grad;
std::cout << "(x ** 2).requires_grad\n" << (x * x).requires_grad() << '\n';
// Or by using .detach() to get a new Tensor with the same content but that does not require gradients:
std::cout << "x.requires_grad:\n" << x.requires_grad() << '\n';
y = x.detach();
std::cout << "y.requires_grad:\n" << y.requires_grad() << '\n';
std::cout << "x.eq(y).all():\n" << x.eq(y).all() << '\n';
}