-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy patheval.py
More file actions
82 lines (71 loc) · 3.31 KB
/
eval.py
File metadata and controls
82 lines (71 loc) · 3.31 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
import torch
import json
import argparse
import os
import datetime
import sys
from pathlib import Path
from models.mlp import MLP
from utils.eval_utils import *
def parse_args():
parser = argparse.ArgumentParser(description="Test SDF prediction and reconstruct mesh using Marching Cubes")
parser.add_argument('--model_name', type=str, default='MLP', help='model name')
parser.add_argument('--gpu', type=str, default='0', help='specify gpu device')
parser.add_argument('--batch_size', type=int, default=1024, help='batch size for evaluation')
parser.add_argument('--checkpoint', type=str, default=None, help='checkpoint')
parser.add_argument("--grid_resolution", type=int, default=128, help="Resolution of the 3D grid for Marching Cubes")
parser.add_argument("--test", type=bool, default=False, help="Test mode")
parser.add_argument('--config', type=str, default='model/config.json', help='Configuration file')
return parser.parse_args()
if __name__ == '__main__':
args = parse_args()
# --- SET GPU DEVICE ---
os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# --- CREATE DIR ---
experiment_dir = Path('./eval_experiment/')
experiment_dir.mkdir(exist_ok=True)
file_dir = Path(str(experiment_dir) + '/%s-'%args.model_name + str(datetime.datetime.now().strftime('%Y-%m-%d_%H-%M')))
file_dir.mkdir(exist_ok=True)
output_dir = Path(str(file_dir) + '/output')
# --- MODEL LOADING ---
with open(args.config, 'r') as f:
config = json.load(f)
net_config = config['NetConfig']
num_shapes = config['ShapeNum']
latent_size = net_config['latent_size']
latent_codes = torch.nn.Embedding(num_shapes, latent_size).to(device)
model = MLP(**net_config).to(device) # Initialize the model
if args.checkpoint is not None:
print('Load CheckPoint...')
checkpoint = torch.load(args.checkpoint, map_location=device, weights_only=False)
start_epoch = checkpoint['epoch']
model.load_state_dict(checkpoint["model_state_dict"])
latent_codes.load_state_dict(checkpoint["latent_state_dict"])
else:
print('Please load Checkpoint to eval...')
sys.exit(0)
start_epoch = 0
# --- EVAL ---
print('Start evaluating...')
parameter_num = sum(p.numel() for p in model.parameters())
print('Parameter number of the model: ', parameter_num)
model.eval()
# Create 3D grid
print('Creating 3D grid...')
grid_points = create_grid(args.grid_resolution).to(device)
for shape_id in range(num_shapes):
# Reconstruct mesh
print('Reconstructing mesh...')
with open('norm_params.json', 'r') as f:
norm_dict = json.load(f)
params = norm_dict[str(shape_id)]
center = np.array(params['center'])
scale = params['scale']
latent_code = latent_codes(torch.tensor([shape_id], device=device)) # [1, latent_size]
mesh = reconstruct_mesh(model, grid_points, args.grid_resolution, center, scale, args.batch_size, device, latent_code=latent_code)
# Save mesh
os.makedirs(output_dir, exist_ok=True)
output_file = os.path.join(output_dir, f"output-{shape_id}.obj")
mesh.export(output_file)
print(f"Reconstructed mesh saved to {output_file}")