Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion .github/workflows/pythonapp.yml
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,7 @@ jobs:
# - docker-py3-pip- (shared)
# - ubuntu py37 pip-
# - os-latest-pip- (shared)
flake8-py3:
lint-py3:
runs-on: ubuntu-latest
strategy:
matrix:
Expand Down
2 changes: 1 addition & 1 deletion .github/workflows/weekly-preview.yml
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@ on:
- cron: "0 2 * * 0" # 02:00 of every Sunday

jobs:
flake8-py3:
lint-py3:
runs-on: ubuntu-latest
strategy:
matrix:
Expand Down
2 changes: 1 addition & 1 deletion monai/apps/auto3dseg/bundle_gen.py
Original file line number Diff line number Diff line change
Expand Up @@ -208,7 +208,7 @@ def _create_cmd(self, train_params: None | dict = None) -> tuple[str, str]:
config_files = []
if os.path.isdir(config_dir):
for file in sorted(os.listdir(config_dir)):
if file.endswith("yaml") or file.endswith("json"):
if file.endswith(("yaml", "json")):
# Python Fire may be confused by single-quoted WindowsPath
config_files.append(Path(os.path.join(config_dir, file)).as_posix())

Expand Down
2 changes: 1 addition & 1 deletion monai/apps/detection/transforms/box_ops.py
Original file line number Diff line number Diff line change
Expand Up @@ -179,7 +179,7 @@ def flip_boxes(
spatial_dims: int = get_spatial_dims(boxes=boxes)
spatial_size = ensure_tuple_rep(spatial_size, spatial_dims)
if flip_axes is None:
flip_axes = tuple(range(0, spatial_dims))
flip_axes = tuple(range(spatial_dims))
flip_axes = ensure_tuple(flip_axes)

# flip box
Expand Down
13 changes: 0 additions & 13 deletions monai/auto3dseg/algo_gen.py
Original file line number Diff line number Diff line change
Expand Up @@ -25,23 +25,18 @@ class Algo:

def set_data_stats(self, *args, **kwargs):
"""Provide dataset (and summaries) so that the model creation can depend on the input datasets."""
pass

def train(self, *args, **kwargs):
"""Read training/validation data and output a model."""
pass

def predict(self, *args, **kwargs):
"""Read test data and output model predictions."""
pass

def get_score(self, *args, **kwargs):
"""Returns the model quality measurement based on training and validation datasets."""
pass

def get_output_path(self, *args, **kwargs):
"""Returns the algo output paths for scripts location"""
pass


class AlgoGen(Randomizable):
Expand Down Expand Up @@ -70,31 +65,24 @@ class AlgoGen(Randomizable):

def set_data_stats(self, *args, **kwargs): # type ignore
"""Provide dataset summaries/properties so that the generator can be conditioned on the input datasets."""
pass

def set_budget(self, *args, **kwargs):
"""Provide computational budget so that the generator outputs algorithms that requires reasonable resources."""
pass

def set_score(self, *args, **kwargs):
"""Feedback from the previously generated algo, the score can be used for new Algo generations."""
pass

def get_data_stats(self, *args, **kwargs):
"""Get current dataset summaries."""
pass

def get_budget(self, *args, **kwargs):
"""Get the current computational budget."""
pass

def get_history(self, *args, **kwargs):
"""Get the previously generated algo."""
pass

def generate(self):
"""Generate new Algo -- based on data_stats, budget, and history of previous algo generations."""
pass

def run_algo(self, *args, **kwargs):
"""
Expand All @@ -104,4 +92,3 @@ def run_algo(self, *args, **kwargs):
implemented separately is preferred to run them. In this case the controller should also report back the
scores and the algo history, so that the future ``AlgoGen.generate`` can leverage the information.
"""
pass
8 changes: 4 additions & 4 deletions monai/data/box_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -591,7 +591,7 @@ def convert_box_mode(

# check validity of corners
spatial_dims = get_spatial_dims(boxes=boxes_t)
for axis in range(0, spatial_dims):
for axis in range(spatial_dims):
if (corners[spatial_dims + axis] < corners[axis]).sum() > 0:
warnings.warn("Given boxes has invalid values. The box size must be non-negative.")

Expand Down Expand Up @@ -731,7 +731,7 @@ def is_valid_box_values(boxes: NdarrayOrTensor) -> bool:
whether ``boxes`` is valid
"""
spatial_dims = get_spatial_dims(boxes=boxes)
for axis in range(0, spatial_dims):
for axis in range(spatial_dims):
if (boxes[:, spatial_dims + axis] < boxes[:, axis]).sum() > 0:
return False
return True
Expand Down Expand Up @@ -1041,7 +1041,7 @@ def spatial_crop_boxes(

# makes sure the bounding boxes are within the patch
spatial_dims = get_spatial_dims(boxes=boxes, spatial_size=roi_end)
for axis in range(0, spatial_dims):
for axis in range(spatial_dims):
boxes_t[:, axis] = boxes_t[:, axis].clamp(min=roi_start_t[axis], max=roi_end_t[axis] - TO_REMOVE)
boxes_t[:, axis + spatial_dims] = boxes_t[:, axis + spatial_dims].clamp(
min=roi_start_t[axis], max=roi_end_t[axis] - TO_REMOVE
Expand Down Expand Up @@ -1133,7 +1133,7 @@ def non_max_suppression(

# initialize the list of picked indexes
pick = []
idxs = torch.Tensor(list(range(0, boxes_sort.shape[0]))).to(device=boxes_t.device, dtype=torch.long)
idxs = torch.Tensor(list(range(boxes_sort.shape[0]))).to(device=boxes_t.device, dtype=torch.long)

# keep looping while some indexes still remain in the indexes list
while len(idxs) > 0:
Expand Down
4 changes: 0 additions & 4 deletions monai/fl/client/client_algo.py
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,6 @@ def initialize(self, extra: dict | None = None) -> None:
Args:
extra: optional extra information, e.g. dict of `ExtraItems.CLIENT_NAME` and/or `ExtraItems.APP_ROOT`.
"""
pass

def finalize(self, extra: dict | None = None) -> None:
"""
Expand All @@ -43,7 +42,6 @@ def finalize(self, extra: dict | None = None) -> None:
Args:
extra: Dict with additional information that can be provided by the FL system.
"""
pass

def abort(self, extra: dict | None = None) -> None:
"""
Expand All @@ -53,8 +51,6 @@ def abort(self, extra: dict | None = None) -> None:
extra: Dict with additional information that can be provided by the FL system.
"""

pass


class ClientAlgoStats(BaseClient):

Expand Down
2 changes: 1 addition & 1 deletion monai/metrics/loss_metric.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,10 +16,10 @@
import torch
from torch.nn.modules.loss import _Loss

from monai.config import TensorOrList
from monai.metrics.utils import do_metric_reduction
from monai.utils import MetricReduction

from ..config import TensorOrList
from .metric import CumulativeIterationMetric


Expand Down
1 change: 0 additions & 1 deletion monai/networks/blocks/feature_pyramid_network.py
Original file line number Diff line number Diff line change
Expand Up @@ -85,7 +85,6 @@ def forward(self, results: list[Tensor], x: list[Tensor], names: list[str]):
- the extended set of results of the FPN
- the extended set of names for the results
"""
pass


class LastLevelMaxPool(ExtraFPNBlock):
Expand Down
1 change: 0 additions & 1 deletion monai/networks/nets/dints.py
Original file line number Diff line number Diff line change
Expand Up @@ -627,7 +627,6 @@ def __init__(

def forward(self, x):
"""This function to be implemented by the architecture instances or search spaces."""
pass


class TopologyInstance(TopologyConstruction):
Expand Down
2 changes: 0 additions & 2 deletions monai/networks/trt_compiler.py
Original file line number Diff line number Diff line change
Expand Up @@ -98,8 +98,6 @@ class ShapeError(Exception):
Exception class to report errors from setting TRT plan input shapes
"""

pass


class TRTEngine:
"""
Expand Down
3 changes: 1 addition & 2 deletions monai/transforms/regularization/array.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,10 +17,9 @@
import torch

from monai.data.meta_obj import get_track_meta
from monai.transforms.transform import RandomizableTransform
from monai.utils.type_conversion import convert_to_dst_type, convert_to_tensor

from ..transform import RandomizableTransform

__all__ = ["MixUp", "CutMix", "CutOut", "Mixer"]


Expand Down
2 changes: 1 addition & 1 deletion monai/transforms/regularization/dictionary.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,10 +18,10 @@
from monai.config import KeysCollection
from monai.config.type_definitions import NdarrayOrTensor
from monai.data.meta_obj import get_track_meta
from monai.transforms.transform import MapTransform, RandomizableTransform
from monai.utils import convert_to_tensor
from monai.utils.misc import ensure_tuple

from ..transform import MapTransform, RandomizableTransform
from .array import CutMix, CutOut, MixUp

__all__ = ["MixUpd", "MixUpD", "MixUpDict", "CutMixd", "CutMixD", "CutMixDict", "CutOutd", "CutOutD", "CutOutDict"]
Expand Down
8 changes: 0 additions & 8 deletions monai/transforms/traits.py
Original file line number Diff line number Diff line change
Expand Up @@ -74,8 +74,6 @@ class RandomizableTrait:
implementors of MONAI transforms.
"""

pass


class MultiSampleTrait:
"""
Expand All @@ -85,8 +83,6 @@ class MultiSampleTrait:
of MONAI transforms.
"""

pass


class ThreadUnsafe:
"""
Expand All @@ -98,8 +94,6 @@ class ThreadUnsafe:
its extensions, where the transform cache is built with multiple threads.
"""

pass


class ReduceTrait:
"""
Expand All @@ -108,5 +102,3 @@ class ReduceTrait:
This interface can be extended from by people adapting transforms to the MONAI framework as well
as by implementors of MONAI transforms.
"""

pass
5 changes: 2 additions & 3 deletions monai/utils/deprecate_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,10 +19,9 @@
from types import FunctionType
from typing import Any, TypeVar

from monai import __version__
from monai.utils.module import version_leq

from .. import __version__

__all__ = ["deprecated", "deprecated_arg", "DeprecatedError", "deprecated_arg_default"]
T = TypeVar("T", type, Callable)

Expand Down Expand Up @@ -201,7 +200,7 @@ def _wrapper(*args, **kwargs):
# if name is specified and new_name is not specified
kwargs[new_name] = kwargs[name]
try:
sig.bind(*args, **kwargs).arguments
_ = sig.bind(*args, **kwargs).arguments
except TypeError:
# multiple values for new_name using both args and kwargs
kwargs.pop(new_name, None)
Expand Down
2 changes: 1 addition & 1 deletion monai/utils/profiling.py
Original file line number Diff line number Diff line change
Expand Up @@ -337,7 +337,7 @@ def profile_iter(self, name, iterable):

class _Iterable:

def __iter__(_self): # noqa: B902, N805 pylint: disable=E0213
def __iter__(_self): # noqa: N805 pylint: disable=E0213
do_iter = True
orig_iter = iter(iterable)
caller = getframeinfo(stack()[1][0])
Expand Down
32 changes: 30 additions & 2 deletions pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -39,12 +39,19 @@ all = true
exclude = "monai/bundle/__main__.py"

[tool.ruff]
line-length = 133
line-length = 120
target-version = "py39"

[tool.ruff.lint]
select = [
"E", "F", "W", # flake8
"B", # flake8-bugbear - https://docs.astral.sh/ruff/rules/#flake8-bugbear-b
"C90", # mccabe (complexity) - https://docs.astral.sh/ruff/rules/#mccabe-c90
"E", # pycodestyle errors - https://docs.astral.sh/ruff/rules/#error-e
"F", # pyflakes - https://docs.astral.sh/ruff/rules/#pyflakes-f
"N", # pep8-naming - https://docs.astral.sh/ruff/rules/#pep8-naming-n
"PIE", # flake8-pie - https://docs.astral.sh/ruff/rules/#flake8-pie-pie
"TID", # flake8-tidy-imports - https://docs.astral.sh/ruff/rules/#flake8-tidy-imports-tid
"W", # pycodestyle warnings - https://docs.astral.sh/ruff/rules/#warning-w
"NPY", # NumPy specific rules
"UP", # pyupgrade
# "RUF100", # aka yesqa
Expand All @@ -53,7 +60,28 @@ extend-ignore = [
"E741", # ambiguous variable name
"F401", # unused import
"NPY002", # numpy-legacy-random
"E203", # whitespace before ':' (pycodestyle)
"E501", # line too long (pycodestyle)
"C408", # unnecessary collection call (flake8-comprehensions)
"N812", # lowercase imported as non lowercase (pep8-naming)
"B023", # function uses loop variable (flake8-bugbear)
"B905", # zip() without an explicit strict= parameter (flake8-bugbear)
"B028", # no explicit stacklevel keyword argument found (flake8-bugbear)
]

[tool.ruff.lint.per-file-ignores]
"tests/**" = [
"B018",
"C901",
"N999",
"N801"
]
"monai/apps/detection/utils/ATSS_matcher.py" = [
"N999"
]

[tool.ruff.lint.mccabe]
max-complexity = 50 # todo lower this treshold when yesqa id replaced with Ruff's RUF100

[tool.pytype]
# Space-separated list of files or directories to exclude.
Expand Down
3 changes: 0 additions & 3 deletions requirements-dev.txt
Original file line number Diff line number Diff line change
Expand Up @@ -10,9 +10,6 @@ tensorboard>=2.12.0 # https://github.com/Project-MONAI/MONAI/issues/7434
scikit-image>=0.19.0
tqdm>=4.47.0
lmdb
flake8>=3.8.1
flake8-bugbear<=24.2.6 # https://github.com/Project-MONAI/MONAI/issues/7690
flake8-comprehensions
mccabe
pep8-naming
pycodestyle
Expand Down
Loading
Loading