Skip to content

Commit 8f22855

Browse files
Yanghan Wangfacebook-github-bot
Yanghan Wang
authored andcommitted
reapply linter & upgrade black to 24.3.0
Summary: Pull Request resolved: #5275 Upgrade the `black` and `flake8` versions to match FB internal. Re-run the `./vision/fair/detectron2/dev/linter.sh` to reapply linter. Manually fix lint errors. Differential Revision: D56913399 fbshipit-source-id: 0e33fd3a888596871ad641a1c43562a61b50556a
1 parent bce6d72 commit 8f22855

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

48 files changed

+285
-476
lines changed

.github/workflows/workflow.yml

+2-2
Original file line numberDiff line numberDiff line change
@@ -18,8 +18,8 @@ jobs:
1818
# flake8-bugbear flake8-comprehensions are useful but not available internally
1919
run: |
2020
python -m pip install --upgrade pip
21-
python -m pip install flake8==3.8.1 isort==4.3.21
22-
python -m pip install black==22.3.0
21+
python -m pip install flake8==6.1.0 isort==4.3.21
22+
python -m pip install black==24.3.0
2323
flake8 --version
2424
- name: Lint
2525
run: |

configs/Misc/torchvision_imagenet_R_50.py

-1
Original file line numberDiff line numberDiff line change
@@ -10,7 +10,6 @@
1010
1111
"""
1212

13-
1413
import torch
1514
from torch import nn
1615
from torch.nn import functional as F

configs/common/train.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -13,6 +13,6 @@
1313
checkpointer=dict(period=5000, max_to_keep=100), # options for PeriodicCheckpointer
1414
eval_period=5000,
1515
log_period=20,
16-
device="cuda"
16+
device="cuda",
1717
# ...
1818
)

demo/demo.py

+10-17
Original file line numberDiff line numberDiff line change
@@ -2,13 +2,12 @@
22
import argparse
33
import glob
44
import multiprocessing as mp
5+
import numpy as np
56
import os
67
import tempfile
78
import time
89
import warnings
9-
1010
import cv2
11-
import numpy as np
1211
import tqdm
1312

1413
from detectron2.config import get_cfg
@@ -32,9 +31,7 @@ def setup_cfg(args):
3231
# Set score_threshold for builtin models
3332
cfg.MODEL.RETINANET.SCORE_THRESH_TEST = args.confidence_threshold
3433
cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = args.confidence_threshold
35-
cfg.MODEL.PANOPTIC_FPN.COMBINE.INSTANCES_CONFIDENCE_THRESH = (
36-
args.confidence_threshold
37-
)
34+
cfg.MODEL.PANOPTIC_FPN.COMBINE.INSTANCES_CONFIDENCE_THRESH = args.confidence_threshold
3835
cfg.freeze()
3936
return cfg
4037

@@ -47,9 +44,7 @@ def get_parser():
4744
metavar="FILE",
4845
help="path to config file",
4946
)
50-
parser.add_argument(
51-
"--webcam", action="store_true", help="Take inputs from webcam."
52-
)
47+
parser.add_argument("--webcam", action="store_true", help="Take inputs from webcam.")
5348
parser.add_argument("--video-input", help="Path to video file.")
5449
parser.add_argument(
5550
"--input",
@@ -118,9 +113,11 @@ def main() -> None:
118113
logger.info(
119114
"{}: {} in {:.2f}s".format(
120115
path,
121-
"detected {} instances".format(len(predictions["instances"]))
122-
if "instances" in predictions
123-
else "finished",
116+
(
117+
"detected {} instances".format(len(predictions["instances"]))
118+
if "instances" in predictions
119+
else "finished"
120+
),
124121
time.time() - start_time,
125122
)
126123
)
@@ -130,9 +127,7 @@ def main() -> None:
130127
assert os.path.isdir(args.output), args.output
131128
out_filename = os.path.join(args.output, os.path.basename(path))
132129
else:
133-
assert (
134-
len(args.input) == 1
135-
), "Please specify a directory with args.output"
130+
assert len(args.input) == 1, "Please specify a directory with args.output"
136131
out_filename = args.output
137132
visualized_output.save(out_filename)
138133
else:
@@ -159,9 +154,7 @@ def main() -> None:
159154
num_frames = int(video.get(cv2.CAP_PROP_FRAME_COUNT))
160155
basename = os.path.basename(args.video_input)
161156
codec, file_ext = (
162-
("x264", ".mkv")
163-
if test_opencv_video_format("x264", ".mkv")
164-
else ("mp4v", ".mp4")
157+
("x264", ".mkv") if test_opencv_video_format("x264", ".mkv") else ("mp4v", ".mp4")
165158
)
166159
if codec == ".mp4v":
167160
warnings.warn("x264 codec not available, switching to mp4v")

detectron2/checkpoint/c2_model_loading.py

-1
Original file line numberDiff line numberDiff line change
@@ -4,7 +4,6 @@
44
import re
55
from typing import Dict, List
66
import torch
7-
from tabulate import tabulate
87

98

109
def convert_basic_c2_names(original_keys):

detectron2/data/build.py

+27-20
Original file line numberDiff line numberDiff line change
@@ -364,7 +364,7 @@ def build_batch_data_loader(
364364
collate_fn=operator.itemgetter(0), # don't batch, but yield individual elements
365365
worker_init_fn=worker_init_reset_seed,
366366
generator=generator,
367-
**kwargs
367+
**kwargs,
368368
) # yield individual mapped dict
369369
data_loader = AspectRatioGroupedDataset(data_loader, batch_size)
370370
if collate_fn is None:
@@ -408,12 +408,14 @@ def _build_weighted_sampler(cfg, enable_category_balance=False):
408408
name: get_detection_dataset_dicts(
409409
[name],
410410
filter_empty=cfg.DATALOADER.FILTER_EMPTY_ANNOTATIONS,
411-
min_keypoints=cfg.MODEL.ROI_KEYPOINT_HEAD.MIN_KEYPOINTS_PER_IMAGE
412-
if cfg.MODEL.KEYPOINT_ON
413-
else 0,
414-
proposal_files=cfg.DATASETS.PROPOSAL_FILES_TRAIN
415-
if cfg.MODEL.LOAD_PROPOSALS
416-
else None,
411+
min_keypoints=(
412+
cfg.MODEL.ROI_KEYPOINT_HEAD.MIN_KEYPOINTS_PER_IMAGE
413+
if cfg.MODEL.KEYPOINT_ON
414+
else 0
415+
),
416+
proposal_files=(
417+
cfg.DATASETS.PROPOSAL_FILES_TRAIN if cfg.MODEL.LOAD_PROPOSALS else None
418+
),
417419
)
418420
for name in cfg.DATASETS.TRAIN
419421
}
@@ -466,9 +468,9 @@ def _train_loader_from_config(cfg, mapper=None, *, dataset=None, sampler=None):
466468
dataset = get_detection_dataset_dicts(
467469
cfg.DATASETS.TRAIN,
468470
filter_empty=cfg.DATALOADER.FILTER_EMPTY_ANNOTATIONS,
469-
min_keypoints=cfg.MODEL.ROI_KEYPOINT_HEAD.MIN_KEYPOINTS_PER_IMAGE
470-
if cfg.MODEL.KEYPOINT_ON
471-
else 0,
471+
min_keypoints=(
472+
cfg.MODEL.ROI_KEYPOINT_HEAD.MIN_KEYPOINTS_PER_IMAGE if cfg.MODEL.KEYPOINT_ON else 0
473+
),
472474
proposal_files=cfg.DATASETS.PROPOSAL_FILES_TRAIN if cfg.MODEL.LOAD_PROPOSALS else None,
473475
)
474476
_log_api_usage("dataset." + cfg.DATASETS.TRAIN[0])
@@ -522,7 +524,7 @@ def build_detection_train_loader(
522524
aspect_ratio_grouping=True,
523525
num_workers=0,
524526
collate_fn=None,
525-
**kwargs
527+
**kwargs,
526528
):
527529
"""
528530
Build a dataloader for object detection with some default features.
@@ -574,7 +576,7 @@ def build_detection_train_loader(
574576
aspect_ratio_grouping=aspect_ratio_grouping,
575577
num_workers=num_workers,
576578
collate_fn=collate_fn,
577-
**kwargs
579+
**kwargs,
578580
)
579581

580582

@@ -589,21 +591,26 @@ def _test_loader_from_config(cfg, dataset_name, mapper=None):
589591
dataset = get_detection_dataset_dicts(
590592
dataset_name,
591593
filter_empty=False,
592-
proposal_files=[
593-
cfg.DATASETS.PROPOSAL_FILES_TEST[list(cfg.DATASETS.TEST).index(x)] for x in dataset_name
594-
]
595-
if cfg.MODEL.LOAD_PROPOSALS
596-
else None,
594+
proposal_files=(
595+
[
596+
cfg.DATASETS.PROPOSAL_FILES_TEST[list(cfg.DATASETS.TEST).index(x)]
597+
for x in dataset_name
598+
]
599+
if cfg.MODEL.LOAD_PROPOSALS
600+
else None
601+
),
597602
)
598603
if mapper is None:
599604
mapper = DatasetMapper(cfg, False)
600605
return {
601606
"dataset": dataset,
602607
"mapper": mapper,
603608
"num_workers": cfg.DATALOADER.NUM_WORKERS,
604-
"sampler": InferenceSampler(len(dataset))
605-
if not isinstance(dataset, torchdata.IterableDataset)
606-
else None,
609+
"sampler": (
610+
InferenceSampler(len(dataset))
611+
if not isinstance(dataset, torchdata.IterableDataset)
612+
else None
613+
),
607614
}
608615

609616

detectron2/data/datasets/cityscapes.py

+10-21
Original file line numberDiff line numberDiff line change
@@ -3,17 +3,16 @@
33
import json
44
import logging
55
import multiprocessing as mp
6+
import numpy as np
67
import os
78
from itertools import chain
8-
9-
import numpy as np
109
import pycocotools.mask as mask_util
10+
from PIL import Image
1111

1212
from detectron2.structures import BoxMode
1313
from detectron2.utils.comm import get_world_size
1414
from detectron2.utils.file_io import PathManager
1515
from detectron2.utils.logger import setup_logger
16-
from PIL import Image
1716

1817
try:
1918
import cv2 # noqa
@@ -40,9 +39,7 @@ def _get_cityscapes_files(image_dir, gt_dir):
4039
assert basename.endswith(suffix), basename
4140
basename = basename[: -len(suffix)]
4241

43-
instance_file = os.path.join(
44-
city_gt_dir, basename + "gtFine_instanceIds.png"
45-
)
42+
instance_file = os.path.join(city_gt_dir, basename + "gtFine_instanceIds.png")
4643
label_file = os.path.join(city_gt_dir, basename + "gtFine_labelIds.png")
4744
json_file = os.path.join(city_gt_dir, basename + "gtFine_polygons.json")
4845

@@ -79,9 +76,7 @@ def load_cityscapes_instances(image_dir, gt_dir, from_json=True, to_polygons=Tru
7976
pool = mp.Pool(processes=max(mp.cpu_count() // get_world_size() // 2, 4))
8077

8178
ret = pool.map(
82-
functools.partial(
83-
_cityscapes_files_to_dict, from_json=from_json, to_polygons=to_polygons
84-
),
79+
functools.partial(_cityscapes_files_to_dict, from_json=from_json, to_polygons=to_polygons),
8580
files,
8681
)
8782
logger.info("Loaded {} images from {}".format(len(ret), image_dir))
@@ -110,9 +105,7 @@ def load_cityscapes_semantic(image_dir, gt_dir):
110105
ret = []
111106
# gt_dir is small and contain many small files. make sense to fetch to local first
112107
gt_dir = PathManager.get_local_path(gt_dir)
113-
for image_file, _, label_file, json_file in _get_cityscapes_files(
114-
image_dir, gt_dir
115-
):
108+
for image_file, _, label_file, json_file in _get_cityscapes_files(image_dir, gt_dir):
116109
label_file = label_file.replace("labelIds", "labelTrainIds")
117110

118111
with PathManager.open(json_file, "r") as f:
@@ -216,9 +209,7 @@ def _cityscapes_files_to_dict(files, from_json, to_polygons):
216209
elif isinstance(poly_wo_overlaps, MultiPolygon):
217210
poly_list = poly_wo_overlaps.geoms
218211
else:
219-
raise NotImplementedError(
220-
"Unknown geometric structure {}".format(poly_wo_overlaps)
221-
)
212+
raise NotImplementedError("Unknown geometric structure {}".format(poly_wo_overlaps))
222213

223214
poly_coord = []
224215
for poly_el in poly_list:
@@ -272,9 +263,9 @@ def _cityscapes_files_to_dict(files, from_json, to_polygons):
272263
if to_polygons:
273264
# This conversion comes from D4809743 and D5171122,
274265
# when Mask-RCNN was first developed.
275-
contours = cv2.findContours(
276-
mask.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE
277-
)[-2]
266+
contours = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)[
267+
-2
268+
]
278269
polygons = [c.reshape(-1).tolist() for c in contours if len(c) >= 3]
279270
# opencv's can produce invalid polygons
280271
if len(polygons) == 0:
@@ -318,9 +309,7 @@ def main() -> None:
318309
)
319310
logger.info("Done loading {} samples.".format(len(dicts)))
320311

321-
thing_classes = [
322-
k.name for k in labels if k.hasInstances and not k.ignoreInEval
323-
]
312+
thing_classes = [k.name for k in labels if k.hasInstances and not k.ignoreInEval]
324313
meta = Metadata().set(thing_classes=thing_classes)
325314

326315
else:

0 commit comments

Comments
 (0)