mirror of
https://github.com/botastic/SoftGroup.git
synced 2025-10-16 11:45:42 +00:00
add pre-commit
This commit is contained in:
parent
2b2b431fda
commit
9a73b2feb1
@ -1,7 +0,0 @@
|
||||
[isort]
|
||||
line_length = 100
|
||||
multi_line_output = 0
|
||||
known_standard_library = setuptools
|
||||
known_third_party = spconv,numpy,torch,torchvision
|
||||
no_lines_before = STDLIB,LOCALFOLDER
|
||||
default_section = THIRDPARTY
|
||||
35
.pre-commit-config.yaml
Normal file
35
.pre-commit-config.yaml
Normal file
@ -0,0 +1,35 @@
|
||||
repos:
|
||||
- repo: https://gitlab.com/pycqa/flake8.git
|
||||
rev: 3.8.3
|
||||
hooks:
|
||||
- id: flake8
|
||||
- repo: https://github.com/asottile/seed-isort-config
|
||||
rev: v2.2.0
|
||||
hooks:
|
||||
- id: seed-isort-config
|
||||
- repo: https://github.com/timothycrosley/isort
|
||||
rev: 4.3.21
|
||||
hooks:
|
||||
- id: isort
|
||||
- repo: https://github.com/pre-commit/mirrors-yapf
|
||||
rev: v0.30.0
|
||||
hooks:
|
||||
- id: yapf
|
||||
- repo: https://github.com/pre-commit/pre-commit-hooks
|
||||
rev: v3.1.0
|
||||
hooks:
|
||||
- id: trailing-whitespace
|
||||
- id: check-yaml
|
||||
- id: end-of-file-fixer
|
||||
- id: requirements-txt-fixer
|
||||
- id: double-quote-string-fixer
|
||||
- id: check-merge-conflict
|
||||
- id: fix-encoding-pragma
|
||||
args: ["--remove"]
|
||||
- id: mixed-line-ending
|
||||
args: ["--fix=lf"]
|
||||
- repo: https://github.com/myint/docformatter
|
||||
rev: v1.3.1
|
||||
hooks:
|
||||
- id: docformatter
|
||||
args: ["--in-place", "--wrap-descriptions", "100"]
|
||||
@ -1,5 +0,0 @@
|
||||
[style]
|
||||
BASED_ON_STYLE = pep8
|
||||
BLANK_LINE_BEFORE_NESTED_CLASS_OR_DEF = true
|
||||
SPLIT_BEFORE_EXPRESSION_AFTER_OPENING_PAREN = true
|
||||
COLUMN_LIMIT = 100
|
||||
13
setup.cfg
Normal file
13
setup.cfg
Normal file
@ -0,0 +1,13 @@
|
||||
[isort]
|
||||
line_length = 100
|
||||
multi_line_output = 0
|
||||
known_standard_library = setuptools
|
||||
known_third_party = data,model,munch,numpy,pandas,plyfile,scannet_util,scipy,sklearn,spconv,tensorboardX,torch,tqdm,yaml
|
||||
no_lines_before = STDLIB,LOCALFOLDER
|
||||
default_section = THIRDPARTY
|
||||
|
||||
[yapf]
|
||||
BASED_ON_STYLE = pep8
|
||||
BLANK_LINE_BEFORE_NESTED_CLASS_OR_DEF = true
|
||||
SPLIT_BEFORE_EXPRESSION_AFTER_OPENING_PAREN = true
|
||||
COLUMN_LIMIT = 100
|
||||
@ -1,6 +1,7 @@
|
||||
from torch.utils.data import DataLoader
|
||||
|
||||
from .s3dis import S3DISDataset
|
||||
from .scannetv2 import ScanNetDataset
|
||||
from torch.utils.data import DataLoader
|
||||
|
||||
__all__ = ['S3DISDataset', 'ScanNetDataset', 'build_dataset']
|
||||
|
||||
|
||||
@ -1,16 +1,17 @@
|
||||
from .custom import CustomDataset
|
||||
import torch
|
||||
import numpy as np
|
||||
from glob import glob
|
||||
import os.path as osp
|
||||
from glob import glob
|
||||
|
||||
import numpy as np
|
||||
import torch
|
||||
|
||||
from ..lib.softgroup_ops import voxelization_idx
|
||||
from .custom import CustomDataset
|
||||
|
||||
|
||||
class S3DISDataset(CustomDataset):
|
||||
|
||||
CLASSES = ("ceiling", "floor", "wall", "beam", "column", "window", "door", "chair", "table",
|
||||
"bookcase", "sofa", "board", "clutter")
|
||||
CLASSES = ('ceiling', 'floor', 'wall', 'beam', 'column', 'window', 'door', 'chair', 'table',
|
||||
'bookcase', 'sofa', 'board', 'clutter')
|
||||
|
||||
def get_filenames(self):
|
||||
if isinstance(self.prefix, str):
|
||||
|
||||
@ -1,4 +1,5 @@
|
||||
from setuptools import setup
|
||||
|
||||
from torch.utils.cpp_extension import BuildExtension, CUDAExtension
|
||||
|
||||
setup(
|
||||
|
||||
@ -1,8 +1,9 @@
|
||||
from collections import OrderedDict
|
||||
|
||||
import spconv
|
||||
import torch
|
||||
from spconv.modules import SparseModule
|
||||
from torch import nn
|
||||
from collections import OrderedDict
|
||||
import torch
|
||||
|
||||
|
||||
class ResidualBlock(SparseModule):
|
||||
|
||||
@ -1,6 +1,7 @@
|
||||
from .dist import get_dist_info
|
||||
import logging
|
||||
|
||||
from .dist import get_dist_info
|
||||
|
||||
|
||||
def get_root_logger(log_file=None, log_level=logging.INFO):
|
||||
logger = logging.getLogger('pc-seg')
|
||||
|
||||
105
visualization.py
105
visualization.py
@ -1,10 +1,11 @@
|
||||
import numpy as np
|
||||
import os, glob, argparse
|
||||
import torch
|
||||
import argparse
|
||||
import os
|
||||
from operator import itemgetter
|
||||
import cv2
|
||||
import glob
|
||||
|
||||
import numpy as np
|
||||
import torch
|
||||
|
||||
# yapf:disable
|
||||
COLOR_DETECTRON2 = np.array(
|
||||
[
|
||||
0.000, 0.447, 0.741,
|
||||
@ -82,10 +83,14 @@ COLOR_DETECTRON2 = np.array(
|
||||
0.857, 0.857, 0.857,
|
||||
# 1.000, 1.000, 1.000
|
||||
]).astype(np.float32).reshape(-1, 3) * 255
|
||||
# yapf:enable
|
||||
|
||||
SEMANTIC_IDXS = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 14, 16, 24, 28, 33, 34, 36, 39])
|
||||
SEMANTIC_NAMES = np.array(['wall', 'floor', 'cabinet', 'bed', 'chair', 'sofa', 'table', 'door', 'window', 'bookshelf', 'picture', 'counter',
|
||||
'desk', 'curtain', 'refridgerator', 'shower curtain', 'toilet', 'sink', 'bathtub', 'otherfurniture'])
|
||||
SEMANTIC_NAMES = np.array([
|
||||
'wall', 'floor', 'cabinet', 'bed', 'chair', 'sofa', 'table', 'door', 'window', 'bookshelf',
|
||||
'picture', 'counter', 'desk', 'curtain', 'refridgerator', 'shower curtain', 'toilet', 'sink',
|
||||
'bathtub', 'otherfurniture'
|
||||
])
|
||||
CLASS_COLOR = {
|
||||
'unannotated': [0, 0, 0],
|
||||
'floor': [143, 223, 142],
|
||||
@ -109,21 +114,43 @@ CLASS_COLOR = {
|
||||
'sink': [110, 128, 143],
|
||||
'otherfurniture': [80, 83, 160]
|
||||
}
|
||||
SEMANTIC_IDX2NAME = {1: 'wall', 2: 'floor', 3: 'cabinet', 4: 'bed', 5: 'chair', 6: 'sofa', 7: 'table', 8: 'door', 9: 'window', 10: 'bookshelf', 11: 'picture',
|
||||
12: 'counter', 14: 'desk', 16: 'curtain', 24: 'refridgerator', 28: 'shower curtain', 33: 'toilet', 34: 'sink', 36: 'bathtub', 39: 'otherfurniture'}
|
||||
SEMANTIC_IDX2NAME = {
|
||||
1: 'wall',
|
||||
2: 'floor',
|
||||
3: 'cabinet',
|
||||
4: 'bed',
|
||||
5: 'chair',
|
||||
6: 'sofa',
|
||||
7: 'table',
|
||||
8: 'door',
|
||||
9: 'window',
|
||||
10: 'bookshelf',
|
||||
11: 'picture',
|
||||
12: 'counter',
|
||||
14: 'desk',
|
||||
16: 'curtain',
|
||||
24: 'refridgerator',
|
||||
28: 'shower curtain',
|
||||
33: 'toilet',
|
||||
34: 'sink',
|
||||
36: 'bathtub',
|
||||
39: 'otherfurniture'
|
||||
}
|
||||
|
||||
|
||||
def get_coords_color(opt):
|
||||
if opt.dataset == 's3dis':
|
||||
assert opt.data_split in ['Area_1', 'Area_2', 'Area_3', 'Area_4', 'Area_5', 'Area_6'],\
|
||||
'data_split for s3dis should be one of [Area_1, Area_2, Area_3, Area_4, Area_5, Area_6]'
|
||||
input_file = os.path.join('dataset', opt.dataset, 'preprocess', opt.room_name + '_inst_nostuff.pth')
|
||||
input_file = os.path.join('dataset', opt.dataset, 'preprocess',
|
||||
opt.room_name + '_inst_nostuff.pth')
|
||||
assert os.path.isfile(input_file), 'File not exist - {}.'.format(input_file)
|
||||
xyz, rgb, label, inst_label, _, _ = torch.load(input_file)
|
||||
# update variable to match scannet format
|
||||
opt.data_split = os.path.join('val', opt.data_split)
|
||||
else:
|
||||
input_file = os.path.join('dataset', opt.dataset, opt.data_split, opt.room_name + '_inst_nostuff.pth')
|
||||
input_file = os.path.join('dataset', opt.dataset, opt.data_split,
|
||||
opt.room_name + '_inst_nostuff.pth')
|
||||
assert os.path.isfile(input_file), 'File not exist - {}.'.format(input_file)
|
||||
if opt.data_split == 'test':
|
||||
xyz, rgb = torch.load(input_file)
|
||||
@ -136,12 +163,14 @@ def get_coords_color(opt):
|
||||
assert opt.data_split != 'test'
|
||||
label = label.astype(np.int)
|
||||
label_rgb = np.zeros(rgb.shape)
|
||||
label_rgb[label >= 0] = np.array(itemgetter(*SEMANTIC_NAMES[label[label >= 0]])(CLASS_COLOR))
|
||||
label_rgb[label >= 0] = np.array(
|
||||
itemgetter(*SEMANTIC_NAMES[label[label >= 0]])(CLASS_COLOR))
|
||||
rgb = label_rgb
|
||||
|
||||
elif (opt.task == 'semantic_pred'):
|
||||
assert opt.data_split != 'train'
|
||||
semantic_file = os.path.join(opt.prediction_path, opt.data_split, 'semantic', opt.room_name + '.npy')
|
||||
semantic_file = os.path.join(opt.prediction_path, opt.data_split, 'semantic',
|
||||
opt.room_name + '.npy')
|
||||
assert os.path.isfile(semantic_file), 'No semantic result - {}.'.format(semantic_file)
|
||||
label_pred = np.load(semantic_file).astype(np.int) # 0~19
|
||||
label_pred_rgb = np.array(itemgetter(*SEMANTIC_NAMES[label_pred])(CLASS_COLOR))
|
||||
@ -149,13 +178,15 @@ def get_coords_color(opt):
|
||||
|
||||
elif (opt.task == 'offset_semantic_pred'):
|
||||
assert opt.data_split != 'train'
|
||||
semantic_file = os.path.join(opt.prediction_path, opt.data_split, 'semantic', opt.room_name + '.npy')
|
||||
semantic_file = os.path.join(opt.prediction_path, opt.data_split, 'semantic',
|
||||
opt.room_name + '.npy')
|
||||
assert os.path.isfile(semantic_file), 'No semantic result - {}.'.format(semantic_file)
|
||||
label_pred = np.load(semantic_file).astype(np.int) # 0~19
|
||||
label_pred_rgb = np.array(itemgetter(*SEMANTIC_NAMES[label_pred])(CLASS_COLOR))
|
||||
rgb = label_pred_rgb
|
||||
|
||||
offset_file = os.path.join(opt.prediction_path, opt.data_split, 'coords_offsets', opt.room_name + '.npy')
|
||||
offset_file = os.path.join(opt.prediction_path, opt.data_split, 'coords_offsets',
|
||||
opt.room_name + '.npy')
|
||||
assert os.path.isfile(offset_file), 'No offset result - {}.'.format(offset_file)
|
||||
offset_coords = np.load(offset_file)
|
||||
xyz = offset_coords[:, :3] + offset_coords[:, 3:]
|
||||
@ -164,16 +195,16 @@ def get_coords_color(opt):
|
||||
elif (opt.task == 'instance_gt'):
|
||||
assert opt.data_split != 'test'
|
||||
inst_label = inst_label.astype(np.int)
|
||||
print("Instance number: {}".format(inst_label.max() + 1))
|
||||
print('Instance number: {}'.format(inst_label.max() + 1))
|
||||
inst_label_rgb = np.zeros(rgb.shape)
|
||||
object_idx = (inst_label >= 0)
|
||||
ins_num = inst_label.max() + 1
|
||||
ins_pointnum = np.zeros(ins_num)
|
||||
for _ins_id in range(ins_num):
|
||||
ins_pointnum[_ins_id] = (inst_label == _ins_id).sum()
|
||||
sort_idx = np.argsort(ins_pointnum)[::-1]
|
||||
for _sort_id in range(ins_num):
|
||||
inst_label_rgb[inst_label == sort_idx[_sort_id] ] = COLOR_DETECTRON2[_sort_id % len(COLOR_DETECTRON2)]
|
||||
inst_label_rgb[inst_label == sort_idx[_sort_id]] = COLOR_DETECTRON2[
|
||||
_sort_id % len(COLOR_DETECTRON2)]
|
||||
rgb = inst_label_rgb
|
||||
|
||||
# same color order according to instance pointnum
|
||||
@ -201,17 +232,19 @@ def get_coords_color(opt):
|
||||
continue
|
||||
mask = np.loadtxt(mask_path).astype(np.int)
|
||||
if opt.dataset == 'scannet':
|
||||
print('{} {}: {} pointnum: {}'.format(i, masks[i], SEMANTIC_IDX2NAME[int(masks[i][1])], mask.sum()))
|
||||
print('{} {}: {} pointnum: {}'.format(i,
|
||||
masks[i], SEMANTIC_IDX2NAME[int(masks[i][1])],
|
||||
mask.sum()))
|
||||
else:
|
||||
print('{} {}: pointnum: {}'.format(i, masks[i], mask.sum()))
|
||||
ins_pointnum[i] = mask.sum()
|
||||
inst_label[mask == 1] = i
|
||||
inst_label[mask == 1] = i
|
||||
sort_idx = np.argsort(ins_pointnum)[::-1]
|
||||
for _sort_id in range(ins_num):
|
||||
inst_label_pred_rgb[inst_label == sort_idx[_sort_id] ] = COLOR_DETECTRON2[_sort_id % len(COLOR_DETECTRON2)]
|
||||
inst_label_pred_rgb[inst_label == sort_idx[_sort_id]] = COLOR_DETECTRON2[
|
||||
_sort_id % len(COLOR_DETECTRON2)]
|
||||
rgb = inst_label_pred_rgb
|
||||
|
||||
|
||||
if opt.data_split != 'test':
|
||||
sem_valid = (label != -100)
|
||||
xyz = xyz[sem_valid]
|
||||
@ -240,9 +273,10 @@ def write_ply(verts, colors, indices, output_file):
|
||||
file.write('property list uchar uint vertex_indices\n')
|
||||
file.write('end_header\n')
|
||||
for vert, color in zip(verts, colors):
|
||||
file.write('{:f} {:f} {:f} {:d} {:d} {:d}\n'.format(
|
||||
vert[0], vert[1], vert[2], int(color[0] * 255),
|
||||
int(color[1] * 255), int(color[2] * 255)))
|
||||
file.write('{:f} {:f} {:f} {:d} {:d} {:d}\n'.format(vert[0], vert[1], vert[2],
|
||||
int(color[0] * 255),
|
||||
int(color[1] * 255),
|
||||
int(color[2] * 255)))
|
||||
for ind in indices:
|
||||
file.write('3 {:d} {:d} {:d}\n'.format(ind[0], ind[1], ind[2]))
|
||||
file.close()
|
||||
@ -250,13 +284,22 @@ def write_ply(verts, colors, indices, output_file):
|
||||
|
||||
if __name__ == '__main__':
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument('--dataset', choices=['scannet', 's3dis'], help='dataset for visualization', default='scannet')
|
||||
parser.add_argument('--prediction_path', help='path to the prediction results',
|
||||
default='./exp/scannetv2/softgroup/softgroup_default_scannet/result')
|
||||
parser.add_argument('--data_split', help='train/val/test for scannet or Area_ID for s3dis', default='val')
|
||||
parser.add_argument(
|
||||
'--dataset',
|
||||
choices=['scannet', 's3dis'],
|
||||
help='dataset for visualization',
|
||||
default='scannet')
|
||||
parser.add_argument(
|
||||
'--prediction_path',
|
||||
help='path to the prediction results',
|
||||
default='./exp/scannetv2/softgroup/softgroup_default_scannet/result')
|
||||
parser.add_argument(
|
||||
'--data_split', help='train/val/test for scannet or Area_ID for s3dis', default='val')
|
||||
parser.add_argument('--room_name', help='room_name', default='scene0011_00')
|
||||
parser.add_argument('--task', help='input / semantic_gt / semantic_pred / offset_semantic_pred / instance_gt / instance_pred',
|
||||
default='instance_pred')
|
||||
parser.add_argument(
|
||||
'--task',
|
||||
help='input/semantic_gt/semantic_pred/offset_semantic_pred/instance_gt/instance_pred',
|
||||
default='instance_pred')
|
||||
parser.add_argument('--out', help='output point cloud file in FILE.ply format')
|
||||
opt = parser.parse_args()
|
||||
|
||||
|
||||
Loading…
Reference in New Issue
Block a user