add s3dis instruction

This commit is contained in:
Thang Vu 2022-03-06 07:37:51 +00:00
parent 6a7356ffea
commit 5a48c91d29
7 changed files with 19 additions and 19 deletions

2
.gitignore vendored
View File

@ -74,3 +74,5 @@ dataset/scannetv2/scannetv2-labels.combined.tsv
dataset/s3dis/preprocess
dataset/s3dis/val_gt
dataset/s3dis/preprocess_sample
dataset/s3dis/Stanford3dDataset_v1.2

View File

@ -39,14 +39,6 @@ class Dataset:
def trainLoader(self):
# if self.train_split == 'trainval':
# train_file_names = sorted(glob.glob(os.path.join(self.data_root, self.dataset, 'train', '*' + self.filename_suffix))
# + glob.glob(os.path.join(self.data_root, self.dataset, 'val', '*' + self.filename_suffix))
# )
# elif self.train_split == 'train':
# train_file_names = sorted(glob.glob(os.path.join(self.data_root, self.dataset, 'train', '*' + self.filename_suffix)))
# else:
# raise Exception
train_file_names = []
for area in self.train_areas:
train_file_names += glob.glob(os.path.join(self.data_root, self.dataset, 'preprocess', area + '*' + self.filename_suffix))
@ -83,7 +75,7 @@ class Dataset:
drop_last=False, pin_memory=True)
def valLoader(self):
val_file_names = sorted(glob.glob(os.path.join(self.data_root, self.dataset, 'preprocess/_sample', self.test_area + '*' + self.filename_suffix)))
val_file_names = sorted(glob.glob(os.path.join(self.data_root, self.dataset, 'preprocess_sample', self.test_area + '*' + self.filename_suffix)))
self.val_files = val_file_names
logger.info('Validation samples: {}'.format(len(self.val_files)))
@ -94,7 +86,7 @@ class Dataset:
def testLoader(self):
self.test_file_names = sorted(glob.glob(os.path.join(self.data_root, self.dataset, 'preprocess/', self.test_area + '*' + self.filename_suffix)))
self.test_file_names = sorted(glob.glob(os.path.join(self.data_root, self.dataset, 'preprocess', self.test_area + '*' + self.filename_suffix)))
self.test_files = self.test_file_names
logger.info('Testing samples ({}): {}'.format(self.test_split, len(self.test_files)))

View File

@ -75,7 +75,7 @@ def get_parser():
description="downsample s3dis by voxelization")
parser.add_argument("--data-dir",
type=str,
default="./inputs",
default="./preprocess",
help="directory save processed data")
parser.add_argument("--ratio",
type=float,

View File

@ -0,0 +1,4 @@
#!/bin/bash
python prepare_data_inst.py
python downsample.py
python prepare_data_inst_gttxt.py

View File

@ -105,11 +105,11 @@ def get_parser():
parser = argparse.ArgumentParser(description="s3dis data prepare")
parser.add_argument("--data-root",
type=str,
default="./data",
default="./Stanford3dDataset_v1.2",
help="root dir save data")
parser.add_argument("--save-dir",
type=str,
default="./inputs",
default="./preprocess",
help="directory save processed data")
parser.add_argument(
"--patch",

View File

@ -5,3 +5,5 @@ tensorboardX
pyyaml==5.4.1
scipy
six
pandas
scikit-learn

View File

@ -172,12 +172,12 @@ def test(model, model_fn, data_name, epoch):
# import pdb; pdb.set_trace()
nclusters = clusters.shape[0]
if nclusters > cfg.max_clusters:
nclusters = cfg.max_clusters
_, topk_inds = cluster_scores.topk(cfg.max_clusters)
clusters = clusters[topk_inds]
cluster_scores = cluster_scores[topk_inds]
cluster_semantic_id = cluster_semantic_id[topk_inds]
# if nclusters > cfg.max_clusters:
# nclusters = cfg.max_clusters
# _, topk_inds = cluster_scores.topk(cfg.max_clusters)
# clusters = clusters[topk_inds]
# cluster_scores = cluster_scores[topk_inds]
# cluster_semantic_id = cluster_semantic_id[topk_inds]
# prepare for evaluation