Skip to content

Commit 5b7f1d6

Browse files
committed
fix(config): range divide voxel_size need to be a INT for initial tensor size.
1 parent 8606ff0 commit 5b7f1d6

2 files changed

Lines changed: 11 additions & 9 deletions

File tree

1_train.py

Lines changed: 11 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -28,10 +28,19 @@
2828
from scripts.network.dataloader import HDF5Dataset, collate_fn_pad
2929
from scripts.pl_model import ModelWrapper
3030

31-
@hydra.main(version_base=None, config_path="conf", config_name="config")
32-
def main(cfg):
31+
def precheck_cfg_valid(cfg):
3332
if cfg.loss_fn == 'seflowLoss' and cfg.add_seloss is None:
3433
raise ValueError("Please specify the self-supervised loss items for seflowLoss.")
34+
if (cfg.point_cloud_range[3] - cfg.point_cloud_range[0]) % cfg.voxel_size[0] != 0 or \
35+
(cfg.point_cloud_range[4] - cfg.point_cloud_range[1]) % cfg.voxel_size[1] != 0 or \
36+
(cfg.point_cloud_range[5] - cfg.point_cloud_range[2]) % cfg.voxel_size[2] != 0:
37+
# For example: 51.2/0.2=256 good, 51.2/0.3=170.67 wrong.
38+
raise ValueError("The voxel size should be able to divide the point cloud range to a INT.")
39+
return cfg
40+
41+
@hydra.main(version_base=None, config_path="conf", config_name="config")
42+
def main(cfg):
43+
precheck_cfg_valid(cfg)
3544
pl.seed_everything(cfg.seed, workers=True)
3645

3746
train_dataset = HDF5Dataset(cfg.train_data, n_frames=cfg.num_frames, dufo=(cfg.loss_fn == 'seflowLoss'))

dataprocess/README.md

Lines changed: 0 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -121,12 +121,6 @@ All these preprocess scripts will generate the same format `.h5` file. The file
121121
File: `[*:logid].h5` file named in logid. Every timestamp is the key of group (f[key]).
122122

123123
```python
124-
def process_log(data_dir: Path, mode: str, scene_num_id: int, output_dir: Path, n: Optional[int] = None) :
125-
def create_group_data(group, pc, pose, gm = None, flow_0to1=None, flow_valid=None, flow_category=None, ego_motion=None):
126-
group.create_dataset('lidar', data=pc.astype(np.float32))
127-
group.create_dataset('pose', data=pose.astype(np.float64))
128-
129-
130124
def process_log(data_dir: Path, log_id: str, output_dir: Path, n: Optional[int] = None) :
131125
def create_group_data(group, pc, gm, pose, flow_0to1=None, flow_valid=None, flow_category=None, ego_motion=None):
132126
group.create_dataset('lidar', data=pc.astype(np.float32))
@@ -138,7 +132,6 @@ def process_log(data_dir: Path, log_id: str, output_dir: Path, n: Optional[int]
138132
group.create_dataset('flow_is_valid', data=flow_valid.astype(bool))
139133
group.create_dataset('flow_category_indices', data=flow_category.astype(np.uint8))
140134
group.create_dataset('ego_motion', data=ego_motion.astype(np.float32))
141-
142135
```
143136

144137
After preprocessing, all data can use the same dataloader to load the data. As already in our DeFlow code.

0 commit comments

Comments
 (0)