update configs

This commit is contained in:
Thang Vu 2022-04-08 10:12:50 +00:00
parent 0a17ca2cd1
commit a9e41f3f79
7 changed files with 50 additions and 88 deletions

View File

@ -1,9 +1,3 @@
GENERAL:
task: train # train, test
manual_seed: 123
model_dir: model/softgroup/softgroup.py
dataset_dir: data/scannetv2_inst.py
model:
channels: 32
num_blocks: 7
@ -20,106 +14,59 @@ model:
8331., 3948., 3166., 5629., 11719.,
1003., 3317., 4912., 10221., 3889.,
4136., 2120., 945., 3967., 2589.]
npoint_thr: 0.05 # absolute if class_numpoint == -1, relative if class_numpoint != -1
ignore_classes: [0, 1]
instance_voxel_cfg:
scale: 50
spatial_shape: 20
train_cfg:
max_proposal_num: 200
pos_iou_thr: 0.5
test_cfg:
x4_split: False
cls_score_thr: 0.001
mask_score_thr: -0.5
min_npoint: 100
fixed_modules: []
fixed_modules: ['input_conv', 'unet', 'output_layer', 'semantic_linear', 'offset_linear']
data:
train:
type: 'scannetv2'
data_root: 'dataset/scannetv2'
prefix: 'val'
prefix: 'train'
suffix: '_inst_nostuff.pth'
training: True
voxel_cfg:
scale: 50
spatial_shape: [128, 512]
max_npoint: 250000
min_npoint: 5000
test:
type: 'scannetv2'
data_root: 'dataset/scannetv2'
prefix: 'val'
suffix: '_inst_nostuff.pth'
training: False
voxel_cfg:
scale: 50
spatial_shape: [128, 512]
max_npoint: 250000
data_loader:
min_npoint: 5000
dataloader:
train:
batch_size: 4
num_workers: 4
test:
batch_size: 1
num_workers: 16
DATA:
data_root: dataset
dataset: scannetv2
filename_suffix: _inst_nostuff.pth
semantic_classes: 20
classes: 18
class_numpoint_mean: [-1., -1., 3917., 12056., 2303.,
8331., 3948., 3166., 5629., 11719.,
1003., 3317., 4912., 10221., 3889.,
4136., 2120., 945., 3967., 2589.]
ignore_label: -100
input_channel: 3
scale: 50 # voxel_size = 1 / scale, scale 50 -> voxel_size 0.02m
batch_size: 4
full_scale: [128, 512]
max_npoint: 250000
mode: 4 # 4=mean
STRUCTURE:
model_name: softgroup
width: 32
block_residual: True
block_reps: 2
use_coords: True
semantic_only: False
train:
epochs: 500
train_workers: 4 # data loader workers
optim: Adam # Adam or SGD
optimizer:
type: 'Adam'
lr: 0.001
step_epoch: 200
multiplier: 0.5
momentum: 0.9
weight_decay: 0.0001
save_freq: 16 # also eval_freq
loss_weight: [1.0, 1.0, 1.0, 1.0, 1.0] # semantic_loss, offset_norm_loss, cls_loss, mask_loss, score_loss
fg_thresh: 1.
bg_thresh: 0.
score_scale: 50 # the minimal voxel size is 2cm
score_fullscale: 20
score_mode: 4 # mean
pretrain_path: 'hais_ckpt.pth'
pretrain_module: ['input_conv', 'unet', 'output_layer', 'semantic_linear', 'offset_linear', 'intra_ins_unet', 'intra_ins_outputlayer']
fix_module: ['input_conv', 'unet', 'output_layer', 'semantic_linear', 'offset_linear']
point_aggr_radius: 0.04
cluster_shift_meanActive: 300
prepare_epochs: -1
max_proposal_num: 200
iou_thr: 0.5
score_thr: 0.2
TEST:
split: val
test_epoch: 500
test_workers: 16
test_seed: 567
using_NMS: False
TEST_NMS_THRESH: 0.3
TEST_SCORE_THRESH: -1
TEST_NPOINT_THRESH: 100
eval: True
save_semantic: False
save_pt_offsets: False
save_instance: False
test_mask_score_thre: -0.5 # bias fg << bg
epochs: 512
step_epoch: 200
save_freq: 8
pretrain: 'hais_ckpt.pth'
work_dir: 'work_dirs/softgroup_scannet'

View File

@ -12,9 +12,14 @@ model:
mean_active: 300
class_numpoint_mean: [1823, 7457, 6189, 7424, 34229, 1724, 5439,
6016, 39796, 5279, 5092, 12210, 10225]
npoint_thr: 0.05 # absolute if class_numpoint == -1, relative if class_numpoint != -1
ignore_classes: [0, 1]
instance_voxel_cfg:
scale: 50
spatial_shape: 20
train_cfg:
max_proposal_num: 200
pos_iou_thr: 0.5
test_cfg:
x4_split: True
cls_score_thr: 0.001
@ -46,9 +51,14 @@ data:
spatial_shape: [128, 512]
max_npoint: 250000
min_npoint: 5000
dataloader:
dataloader:
train:
batch_size: 4
num_workers: 4
test:
batch_size: 1
num_workers: 1
optimizer:
type: 'Adam'

View File

@ -51,9 +51,14 @@ data:
spatial_shape: [128, 512]
max_npoint: 250000
min_npoint: 5000
dataloader:
dataloader:
train:
batch_size: 4
num_workers: 4
test:
batch_size: 1
num_workers: 1
optimizer:
type: 'Adam'

View File

@ -29,11 +29,11 @@ def build_dataloader(dataset, batch_size=1, num_workers=1, training=True):
drop_last=True,
pin_memory=True)
else:
assert batch_size == 1 and num_workers == 1
assert batch_size == 1
return DataLoader(
dataset,
batch_size=1,
num_workers=1,
batch_size=batch_size,
num_workers=num_workers,
collate_fn=dataset.collate_fn,
shuffle=False,
drop_last=False,

View File

@ -8,7 +8,7 @@ class ScanNetDataset(CustomDataset):
'bathtub', 'otherfurniture')
def getInstanceInfo(self, xyz, instance_label, label):
ret = super().getInstanceInfo(xyz, instnace_label, label)
ret = super().getInstanceInfo(xyz, instance_label, label)
instance_num, instance_pointnum, instance_cls, pt_offset_label = ret
instance_cls = [x - 2 if x != -100 else x for x in instance_cls]
return instance_num, instance_pointnum, instance_cls, pt_offset_label

View File

@ -82,7 +82,7 @@ if __name__ == '__main__':
model.cuda()
dataset = build_dataset(cfg.data.test, logger)
dataloader = build_dataloader(dataset, training=False)
dataloader = build_dataloader(dataset, training=False, **cfg.dataloader.test)
all_preds, all_gts = [], []
with torch.no_grad():
model = model.eval()

View File

@ -87,8 +87,8 @@ if __name__ == '__main__':
# data
train_set = build_dataset(cfg.data.train, logger)
val_set = build_dataset(cfg.data.test, logger)
train_loader = build_dataloader(train_set, training=True, **cfg.data.dataloader)
val_loader = build_dataloader(val_set, training=False)
train_loader = build_dataloader(train_set, training=True, **cfg.dataloader.train)
val_loader = build_dataloader(val_set, training=False, **cfg.dataloader.test)
# optim
optimizer = build_optimizer(model, cfg.optimizer)