Skip to content

Commit 9d1d939

Browse files
MatteB03dario-coscia
authored andcommitted
Update Tutorials + New Tutorial 14
1 parent 40d82c2 commit 9d1d939

File tree

28 files changed

+2969
-1363
lines changed

28 files changed

+2969
-1363
lines changed

tutorials/tutorial1/tutorial.ipynb

Lines changed: 244 additions & 94 deletions
Large diffs are not rendered by default.

tutorials/tutorial1/tutorial.py

Lines changed: 103 additions & 42 deletions
Original file line numberDiff line numberDiff line change
@@ -38,7 +38,7 @@
3838
#
3939
# ```python
4040
# from pina.problem import SpatialProblem
41-
# from pina.geometry import CartesianProblem
41+
# from pina.domain import CartesianProblem
4242
#
4343
# class SimpleODE(SpatialProblem):
4444
#
@@ -53,7 +53,7 @@
5353
# What if our equation is also time-dependent? In this case, our `class` will inherit from both `SpatialProblem` and `TimeDependentProblem`:
5454
#
5555

56-
# In[ ]:
56+
# In[1]:
5757

5858

5959
## routine needed to run the notebook on Google Colab
@@ -65,9 +65,13 @@
6565
if IN_COLAB:
6666
get_ipython().system('pip install "pina-mathlab"')
6767

68+
import warnings
69+
6870
from pina.problem import SpatialProblem, TimeDependentProblem
6971
from pina.domain import CartesianDomain
7072

73+
warnings.filterwarnings('ignore')
74+
7175
class TimeSpaceODE(SpatialProblem, TimeDependentProblem):
7276

7377
output_variables = ['u']
@@ -87,25 +91,30 @@ class TimeSpaceODE(SpatialProblem, TimeDependentProblem):
8791

8892
# ### Write the problem class
8993
#
90-
# Once the `Problem` class is initialized, we need to represent the differential equation in **PINA**. In order to do this, we need to load the **PINA** operators from `pina.operators` module. Again, we'll consider Equation (1) and represent it in **PINA**:
94+
# Once the `Problem` class is initialized, we need to represent the differential equation in **PINA**. In order to do this, we need to load the **PINA** operators from `pina.operator` module. Again, we'll consider Equation (1) and represent it in **PINA**:
9195

9296
# In[2]:
9397

9498

99+
import torch
100+
import matplotlib.pyplot as plt
101+
95102
from pina.problem import SpatialProblem
96-
from pina.operators import grad
103+
from pina.operator import grad
97104
from pina import Condition
98105
from pina.domain import CartesianDomain
99106
from pina.equation import Equation, FixedValue
100107

101-
import torch
102-
103-
104108
class SimpleODE(SpatialProblem):
105109

106110
output_variables = ['u']
107111
spatial_domain = CartesianDomain({'x': [0, 1]})
108112

113+
domains ={
114+
'x0': CartesianDomain({'x': 0.}),
115+
'D': CartesianDomain({'x': [0, 1]})
116+
}
117+
109118
# defining the ode equation
110119
def ode_equation(input_, output_):
111120

@@ -120,13 +129,10 @@ def ode_equation(input_, output_):
120129

121130
# conditions to hold
122131
conditions = {
123-
'x0': Condition(location=CartesianDomain({'x': 0.}), equation=FixedValue(1)), # We fix initial condition to value 1
124-
'D': Condition(location=CartesianDomain({'x': [0, 1]}), equation=Equation(ode_equation)), # We wrap the python equation using Equation
132+
'bound_cond': Condition(domain='x0', equation=FixedValue(1.)),
133+
'phys_cond': Condition(domain='D', equation=Equation(ode_equation))
125134
}
126135

127-
# sampled points (see below)
128-
input_pts = None
129-
130136
# defining the true solution
131137
def truth_solution(self, pts):
132138
return torch.exp(pts.extract(['x']))
@@ -149,14 +155,14 @@ def truth_solution(self, pts):
149155

150156

151157
# sampling 20 points in [0, 1] through discretization in all locations
152-
problem.discretise_domain(n=20, mode='grid', variables=['x'], locations='all')
158+
problem.discretise_domain(n=20, mode='grid', domains='all')
153159

154160
# sampling 20 points in (0, 1) through latin hypercube sampling in D, and 1 point in x0
155-
problem.discretise_domain(n=20, mode='latin', variables=['x'], locations=['D'])
156-
problem.discretise_domain(n=1, mode='random', variables=['x'], locations=['x0'])
161+
problem.discretise_domain(n=20, mode='latin', domains=['D'])
162+
problem.discretise_domain(n=1, mode='random', domains=['x0'])
157163

158164
# sampling 20 points in (0, 1) randomly
159-
problem.discretise_domain(n=20, mode='random', variables=['x'])
165+
problem.discretise_domain(n=20, mode='random')
160166

161167

162168
# We are going to use latin hypercube points for sampling. We need to sample in all the conditions domains. In our case we sample in `D` and `x0`.
@@ -165,41 +171,45 @@ def truth_solution(self, pts):
165171

166172

167173
# sampling for training
168-
problem.discretise_domain(1, 'random', locations=['x0'])
169-
problem.discretise_domain(20, 'lh', locations=['D'])
174+
problem.discretise_domain(1, 'random', domains=['x0']) # TODO check
175+
problem.discretise_domain(20, 'lh', domains=['D'])
170176

171177

172178
# The points are saved in a python `dict`, and can be accessed by calling the attribute `input_pts` of the problem
173179

174180
# In[5]:
175181

176182

177-
print('Input points:', problem.input_pts)
178-
print('Input points labels:', problem.input_pts['D'].labels)
179-
183+
print('Input points:', problem.discretised_domains)
184+
print('Input points labels:', problem.discretised_domains['D'].labels)
180185

181-
# To visualize the sampled points we can use the `.plot_samples` method of the `Plotter` class
182186

183-
# In[5]:
187+
# To visualize the sampled points we can use `matplotlib.pyplot`:
184188

189+
# In[6]:
185190

186-
from pina import Plotter
187191

188-
pl = Plotter()
189-
pl.plot_samples(problem=problem)
192+
variables=problem.spatial_variables
193+
fig = plt.figure()
194+
proj = "3d" if len(variables) == 3 else None
195+
ax = fig.add_subplot(projection=proj)
196+
for location in problem.input_pts:
197+
coords = problem.input_pts[location].extract(variables).T.detach()
198+
ax.plot(coords.flatten(),torch.zeros(coords.flatten().shape),".",label=location)
190199

191200

192201
# ## Perform a small training
193202

194-
# Once we have defined the problem and generated the data we can start the modelling. Here we will choose a `FeedForward` neural network available in `pina.model`, and we will train using the `PINN` solver from `pina.solvers`. We highlight that this training is fairly simple, for more advanced stuff consider the tutorials in the ***Physics Informed Neural Networks*** section of ***Tutorials***. For training we use the `Trainer` class from `pina.trainer`. Here we show a very short training and some method for plotting the results. Notice that by default all relevant metrics (e.g. MSE error during training) are going to be tracked using a `lightining` logger, by default `CSVLogger`. If you want to track the metric by yourself without a logger, use `pina.callbacks.MetricTracker`.
203+
# Once we have defined the problem and generated the data we can start the modelling. Here we will choose a `FeedForward` neural network available in `pina.model`, and we will train using the `PINN` solver from `pina.solver`. We highlight that this training is fairly simple, for more advanced stuff consider the tutorials in the ***Physics Informed Neural Networks*** section of ***Tutorials***. For training we use the `Trainer` class from `pina.trainer`. Here we show a very short training and some method for plotting the results. Notice that by default all relevant metrics (e.g. MSE error during training) are going to be tracked using a `lightning` logger, by default `CSVLogger`. If you want to track the metric by yourself without a logger, use `pina.callback.MetricTracker`.
195204

196-
# In[ ]:
205+
# In[7]:
197206

198207

199208
from pina import Trainer
200-
from pina.solvers import PINN
209+
from pina.solver import PINN
201210
from pina.model import FeedForward
202-
from pina.callbacks import MetricTracker
211+
from lightning.pytorch.loggers import TensorBoardLogger
212+
from pina.optim import TorchOptimizer
203213

204214

205215
# build the model
@@ -211,42 +221,93 @@ def truth_solution(self, pts):
211221
)
212222

213223
# create the PINN object
214-
pinn = PINN(problem, model)
224+
pinn = PINN(problem, model, TorchOptimizer(torch.optim.Adam, lr=0.005))
215225

216226
# create the trainer
217-
trainer = Trainer(solver=pinn, max_epochs=1500, callbacks=[MetricTracker()], accelerator='cpu', enable_model_summary=False) # we train on CPU and avoid model summary at beginning of training (optional)
227+
trainer = Trainer(solver=pinn, max_epochs=1500, logger=TensorBoardLogger('tutorial_logs'),
228+
accelerator='cpu',
229+
train_size=1.0,
230+
test_size=0.0,
231+
val_size=0.0,
232+
enable_model_summary=False) # we train on CPU and avoid model summary at beginning of training (optional)
218233

219234
# train
220235
trainer.train()
221236

222237

223-
# After the training we can inspect trainer logged metrics (by default **PINA** logs mean square error residual loss). The logged metrics can be accessed online using one of the `Lightinig` loggers. The final loss can be accessed by `trainer.logged_metrics`
238+
# After the training we can inspect trainer logged metrics (by default **PINA** logs mean square error residual loss). The logged metrics can be accessed online using one of the `Lightning` loggers. The final loss can be accessed by `trainer.logged_metrics`
224239

225-
# In[7]:
240+
# In[8]:
226241

227242

228243
# inspecting final loss
229244
trainer.logged_metrics
230245

231246

232-
# By using the `Plotter` class from **PINA** we can also do some quatitative plots of the solution.
247+
# By using `matplotlib` we can also do some qualitative plots of the solution.
233248

234-
# In[8]:
249+
# In[9]:
235250

236251

237-
# plotting the solution
238-
pl.plot(solver=pinn)
252+
pts = pinn.problem.spatial_domain.sample(256, 'grid', variables='x')
253+
predicted_output = pinn.forward(pts).extract('u').as_subclass(torch.Tensor).cpu().detach()
254+
true_output = pinn.problem.truth_solution(pts).cpu().detach()
255+
pts = pts.cpu()
256+
fig, ax = plt.subplots(nrows=1, ncols=1, figsize=(8, 8))
257+
ax.plot(pts.extract(['x']), predicted_output, label='Neural Network solution')
258+
ax.plot(pts.extract(['x']), true_output, label='True solution')
259+
plt.legend()
239260

240261

241-
# The solution is overlapped with the actual one, and they are barely indistinguishable. We can also plot easily the loss:
262+
# The solution is overlapped with the actual one, and they are barely indistinguishable. We can also take a look at the loss using `TensorBoard`:
242263

243-
# In[9]:
264+
# In[ ]:
265+
266+
267+
print('\nTo load TensorBoard run load_ext tensorboard on your terminal')
268+
print("To visualize the loss you can run tensorboard --logdir 'tutorial_logs' on your terminal\n")
269+
270+
271+
# As we can see the loss has not reached a minimum, suggesting that we could train for longer! Alternatively, we can also take look at the loss using callbacks. Here we use `MetricTracker` from `pina.callback`:
272+
273+
# In[11]:
274+
275+
276+
from pina.callback import MetricTracker
244277

278+
#create the model
279+
newmodel = FeedForward(
280+
layers=[10, 10],
281+
func=torch.nn.Tanh,
282+
output_dimensions=len(problem.output_variables),
283+
input_dimensions=len(problem.input_variables)
284+
)
245285

246-
pl.plot_loss(trainer=trainer, label = 'mean_loss', logy=True)
286+
# create the PINN object
287+
newpinn = PINN(problem, newmodel, optimizer=TorchOptimizer(torch.optim.Adam, lr=0.005))
288+
289+
# create the trainer
290+
newtrainer = Trainer(solver=newpinn, max_epochs=1500, logger=True, #enable parameter logging
291+
callbacks=[MetricTracker()],
292+
accelerator='cpu',
293+
train_size=1.0,
294+
test_size=0.0,
295+
val_size=0.0,
296+
enable_model_summary=False) # we train on CPU and avoid model summary at beginning of training (optional)
247297

298+
# train
299+
newtrainer.train()
300+
301+
#plot loss
302+
trainer_metrics = newtrainer.callbacks[0].metrics
303+
loss = trainer_metrics['train_loss']
304+
epochs = range(len(loss))
305+
plt.plot(epochs, loss.cpu())
306+
# plotting
307+
plt.xlabel('epoch')
308+
plt.ylabel('loss')
309+
plt.yscale('log')
248310

249-
# As we can see the loss has not reached a minimum, suggesting that we could train for longer
250311

251312
# ## What's next?
252313
#

tutorials/tutorial10/tutorial.ipynb

Lines changed: 30 additions & 25 deletions
Large diffs are not rendered by default.

tutorials/tutorial10/tutorial.py

Lines changed: 22 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -32,14 +32,17 @@
3232

3333
import torch
3434
import matplotlib.pyplot as plt
35-
plt.style.use('tableau-colorblind10')
35+
import warnings
36+
3637
from scipy import io
3738
from pina import Condition, LabelTensor
3839
from pina.problem import AbstractProblem
3940
from pina.model import AveragingNeuralOperator
40-
from pina.solvers import SupervisedSolver
41+
from pina.solver import SupervisedSolver
4142
from pina.trainer import Trainer
4243

44+
warnings.filterwarnings('ignore')
45+
4346

4447
# ## Data Generation
4548
#
@@ -81,7 +84,7 @@
8184

8285

8386
# load data
84-
data=io.loadmat("dat/Data_KS.mat")
87+
data=io.loadmat("data/Data_KS.mat")
8588

8689
# converting to label tensor
8790
initial_cond_train = LabelTensor(torch.tensor(data['initial_cond_train'], dtype=torch.float), ['t','x','u0'])
@@ -203,30 +206,33 @@ def forward(self, x):
203206
# We will now focus on solving the KS equation using the `SupervisedSolver` class
204207
# and the `AveragingNeuralOperator` model. As done in the [FNO tutorial](https://github.com/mathLab/PINA/blob/master/tutorials/tutorial5/tutorial.ipynb) we now create the `NeuralOperatorProblem` class with `AbstractProblem`.
205208

206-
# In[6]:
209+
# In[5]:
207210

208211

209212
# expected running time ~ 1 minute
210213

211214
class NeuralOperatorProblem(AbstractProblem):
212215
input_variables = initial_cond_train.labels
213216
output_variables = sol_train.labels
214-
conditions = {'data' : Condition(input_points=initial_cond_train,
215-
output_points=sol_train)}
217+
conditions = {'data' : Condition(input=initial_cond_train,
218+
target=sol_train)}
216219

217220

218221
# initialize problem
219222
problem = NeuralOperatorProblem()
220223
# initialize solver
221-
solver = SupervisedSolver(problem=problem, model=model,optimizer_kwargs={"lr":0.001})
224+
solver = SupervisedSolver(problem=problem, model=model)
222225
# train, only CPU and avoid model summary at beginning of training (optional)
223-
trainer = Trainer(solver=solver, max_epochs=40, accelerator='cpu', enable_model_summary=False, log_every_n_steps=-1, batch_size=5) # we train on CPU and avoid model summary at beginning of training (optional)
226+
trainer = Trainer(solver=solver, max_epochs=40, accelerator='cpu', enable_model_summary=False, log_every_n_steps=-1, batch_size=5, # we train on CPU and avoid model summary at beginning of training (optional)
227+
train_size=1.0,
228+
val_size=0.0,
229+
test_size=0.0)
224230
trainer.train()
225231

226232

227233
# We can now see some plots for the solutions
228234

229-
# In[7]:
235+
# In[6]:
230236

231237

232238
sample_number = 2
@@ -236,13 +242,13 @@ class NeuralOperatorProblem(AbstractProblem):
236242
no_sol=no_sol[5])
237243

238244

239-
# As we can see we can obtain nice result considering the small trainint time and the difficulty of the problem!
240-
# Let's see how the training and testing error:
245+
# As we can see we can obtain nice result considering the small training time and the difficulty of the problem!
246+
# Let's take a look at the training and testing error:
241247

242-
# In[8]:
248+
# In[7]:
243249

244250

245-
from pina.loss.loss_interface import PowerLoss
251+
from pina.loss import PowerLoss
246252

247253
error_metric = PowerLoss(p=2) # we use the MSE loss
248254

@@ -255,14 +261,14 @@ class NeuralOperatorProblem(AbstractProblem):
255261
print(f'Testing error: {float(err_test):.3f}')
256262

257263

258-
# as we can see the error is pretty small, which agrees with what we can see from the previous plots.
264+
# As we can see the error is pretty small, which agrees with what we can see from the previous plots.
259265

260266
# ## What's next?
261267
#
262268
# Now you know how to solve a time dependent neural operator problem in **PINA**! There are multiple directions you can go now:
263269
#
264-
# 1. Train the network for longer or with different layer sizes and assert the finaly accuracy
270+
# 1. Train the network for longer or with different layer sizes and assert the final accuracy
265271
#
266-
# 2. We left a more challenging dataset [Data_KS2.mat](dat/Data_KS2.mat) where $A_k \in [-0.5, 0.5]$, $\ell_k \in [1, 2, 3]$, $\phi_k \in [0, 2\pi]$ for loger training
272+
# 2. We left a more challenging dataset [Data_KS2.mat](dat/Data_KS2.mat) where $A_k \in [-0.5, 0.5]$, $\ell_k \in [1, 2, 3]$, $\phi_k \in [0, 2\pi]$ for longer training
267273
#
268274
# 3. Compare the performance between the different neural operators (you can even try to implement your favourite one!)

0 commit comments

Comments
 (0)