diff --git a/.github/workflows/blossom-ci.yml b/.github/workflows/blossom-ci.yml index c10c7bd..6f02a43 100644 --- a/.github/workflows/blossom-ci.yml +++ b/.github/workflows/blossom-ci.yml @@ -54,7 +54,6 @@ jobs: repository: ${{ fromJson(needs.Authorization.outputs.args).repo }} ref: ${{ fromJson(needs.Authorization.outputs.args).ref }} lfs: 'true' - fetch-depth: 0 - name: Run blossom action uses: NVIDIA/blossom-action@main env: diff --git a/build_config/accvlab_build_config/helpers/__init__.py b/build_config/accvlab_build_config/helpers/__init__.py index a37bfe9..af640bf 100644 --- a/build_config/accvlab_build_config/helpers/__init__.py +++ b/build_config/accvlab_build_config/helpers/__init__.py @@ -17,9 +17,11 @@ """ from .build_utils import ( + CudaArchitectureSelection, load_config, detect_cuda_info, get_compile_flags, + select_cuda_architectures_for_nvcc, run_external_build, get_abs_setup_dir, ) @@ -29,9 +31,11 @@ ) __all__ = [ + 'CudaArchitectureSelection', 'load_config', 'detect_cuda_info', 'get_compile_flags', + 'select_cuda_architectures_for_nvcc', 'run_external_build', 'get_abs_setup_dir', 'build_cmake_args', diff --git a/build_config/accvlab_build_config/helpers/build_utils.py b/build_config/accvlab_build_config/helpers/build_utils.py index 7ebd4af..db881fd 100644 --- a/build_config/accvlab_build_config/helpers/build_utils.py +++ b/build_config/accvlab_build_config/helpers/build_utils.py @@ -18,10 +18,150 @@ """ import os +import re from pathlib import Path +import shutil import subprocess import sys -from typing import Optional +from typing import List, NamedTuple, Optional + + +class CudaArchitectureSelection(NamedTuple): + """CUDA architecture selection compatible with the available ``nvcc``. + + Attributes: + architectures: CUDA architectures to build as cubin targets. + ptx_architectures: At most one architecture to build as a PTX target + because a detected GPU architecture had to be capped. + """ + + architectures: List[str] + ptx_architectures: List[str] + + +def _find_nvcc() -> Optional[str]: + """ + Locate the CUDA compiler used to determine supported target architectures. + """ + candidate = os.environ.get("CUDACXX") + if candidate: + return candidate + + for env_var in ("CUDA_HOME", "CUDA_PATH"): + cuda_root = os.environ.get(env_var) + if cuda_root: + candidate = os.path.join(cuda_root, "bin", "nvcc") + if os.path.exists(candidate): + return candidate + + return shutil.which("nvcc") + + +def _detect_nvcc_supported_architectures() -> List[str]: + """ + Ask nvcc which virtual GPU architectures it supports. + Returns values like ['70', '75', '80', '90']. + """ + nvcc = _find_nvcc() + if not nvcc: + return [] + + try: + result = subprocess.run( + [nvcc, "--list-gpu-arch"], + check=True, + stdout=subprocess.PIPE, + stderr=subprocess.STDOUT, + text=True, + timeout=10, + ) + except Exception: + return [] + + archs: List[str] = [] + for match in re.finditer(r"compute_([0-9]+)", result.stdout): + arch = match.group(1) + if arch not in archs: + archs.append(arch) + + return sorted(archs, key=int) + + +def _split_cuda_architectures(value: str) -> List[str]: + return [arch.strip() for arch in re.split(r"[,;]", value) if arch.strip()] + + +def _forward_compatible_ptx_architecture( + supported_architectures: List[str], max_architecture: int +) -> Optional[str]: + forward_compatible_archs: List[str] = [] + fallback_archs: List[str] = [] + for arch in supported_architectures: + try: + arch_int = int(arch) + except ValueError: + continue + + if arch_int > max_architecture: + continue + + fallback_archs.append(arch) + if arch_int % 10 == 0: + forward_compatible_archs.append(arch) + + if forward_compatible_archs: + return max(forward_compatible_archs, key=int) + if fallback_archs: + return max(fallback_archs, key=int) + return None + + +def select_cuda_architectures_for_nvcc( + cuda_architectures: List[str], +) -> CudaArchitectureSelection: + """Select CUDA cubin and PTX targets supported by the installed ``nvcc``. + + Numeric architectures above ``nvcc``'s maximum supported architecture are + capped to that maximum. When capping occurs, one PTX target is added using + the newest forward-compatible base architecture supported by ``nvcc`` at or + below the capped architecture. For example, if the highest supported + architecture is ``96``, the PTX target is ``90``. + + Args: + cuda_architectures: CUDA architecture numbers to select from, for + example ``["80", "90", "103"]``. + + Returns: + CudaArchitectureSelection: The capped cubin architectures and, when + capping occurred, the single architecture to emit as a PTX target. If + ``nvcc`` cannot be found or queried, the input architectures are returned + unchanged and no PTX targets are added. + """ + supported_archs = _detect_nvcc_supported_architectures() + if not cuda_architectures or not supported_archs: + return CudaArchitectureSelection(cuda_architectures, []) + + max_supported = max(int(arch) for arch in supported_archs) + capped_archs: List[str] = [] + any_arch_capped = False + for arch in cuda_architectures: + try: + arch_int = int(arch) + capped_arch = str(min(arch_int, max_supported)) + any_arch_capped = any_arch_capped or arch_int > max_supported + except ValueError: + capped_arch = arch + + if capped_arch not in capped_archs: + capped_archs.append(capped_arch) + + ptx_archs: List[str] = [] + if any_arch_capped: + ptx_arch = _forward_compatible_ptx_architecture(supported_archs, max_supported) + if ptx_arch is not None: + ptx_archs.append(ptx_arch) + + return CudaArchitectureSelection(capped_archs, ptx_archs) def missing_torch_error() -> RuntimeError: @@ -106,8 +246,8 @@ def load_config(default_config: Optional[dict] = None) -> dict: config[key] = env_val.lower() in ('1', 'true', 'yes', 'on') elif isinstance(config[key], int): config[key] = int(env_val) - elif key == 'CUSTOM_CUDA_ARCHS' and env_val: - config[key] = env_val.split(',') + elif key == 'CUSTOM_CUDA_ARCHS': + config[key] = _split_cuda_architectures(env_val) if env_val else None else: config[key] = env_val @@ -152,7 +292,12 @@ def detect_cuda_info(): def get_compile_flags(config, cuda_info, include_dirs=None): - """Construct compilation flags + """Construct compilation flags. + + If ``CUSTOM_CUDA_ARCHS`` is unset, detected CUDA architectures are capped to + the maximum supported by ``nvcc``. If any architecture is capped, the newest + forward-compatible base architecture supported by ``nvcc`` is also emitted + as a PTX target. Args: config (dict): Build configuration @@ -202,17 +347,24 @@ def get_compile_flags(config, cuda_info, include_dirs=None): # CUDA flags (only if CUDA is available) if cuda_info['cuda_available']: - cuda_archs = ( - config['CUSTOM_CUDA_ARCHS'] - if config['CUSTOM_CUDA_ARCHS'] is not None - else cuda_info['gpu_architectures'] - ) + ptx_archs: List[str] = [] + if config['CUSTOM_CUDA_ARCHS'] is not None: + cuda_archs = config['CUSTOM_CUDA_ARCHS'] + else: + arch_selection = select_cuda_architectures_for_nvcc(cuda_info['gpu_architectures']) + cuda_archs = arch_selection.architectures + ptx_archs = arch_selection.ptx_architectures + if not cuda_archs: - cuda_archs = ['70', '75', '80', '86'] # Default modern architectures + arch_selection = select_cuda_architectures_for_nvcc(['70', '75', '80', '86']) + cuda_archs = arch_selection.architectures + ptx_archs = arch_selection.ptx_architectures # Generate architecture flags for arch in cuda_archs: flags['nvcc'].extend([f'-gencode=arch=compute_{arch},code=sm_{arch}']) + for arch in ptx_archs: + flags['nvcc'].extend([f'-gencode=arch=compute_{arch},code=compute_{arch}']) # CUDA compilation flags flags['nvcc'].extend( diff --git a/build_config/accvlab_build_config/helpers/cmake_args.py b/build_config/accvlab_build_config/helpers/cmake_args.py index 4967af2..bcfba8b 100644 --- a/build_config/accvlab_build_config/helpers/cmake_args.py +++ b/build_config/accvlab_build_config/helpers/cmake_args.py @@ -3,7 +3,7 @@ from pathlib import Path from typing import List, Optional -from .build_utils import missing_torch_error, require_torch_cuda_support +from .build_utils import detect_cuda_info, select_cuda_architectures_for_nvcc # Marker file at the ACCV-Lab monorepo root (see `.nav` in the repository). _NAV_MARKER = ".nav" @@ -57,35 +57,16 @@ def _normalize_cpp_standard(value: str) -> str: return v -def _detect_cuda_architectures() -> List[str]: - """ - Try to detect CUDA architectures from PyTorch if available. - - Returns a list like ['70', '75', '80']. Returns an empty list if PyTorch is - CUDA-enabled but no CUDA devices are available. - - Raises: - RuntimeError: If PyTorch is not installed or is installed without CUDA - support. ACCV-Lab CUDA extension builds require a CUDA-enabled - PyTorch wheel, so this is treated as a build configuration error - rather than as "CUDA not detected". - """ - try: - import torch # type: ignore - except ImportError as exc: - raise missing_torch_error() from exc - - require_torch_cuda_support(torch) +def _format_cmake_cuda_architectures(archs: List[str], ptx_archs: List[str]) -> List[str]: + if not ptx_archs: + return archs - if not torch.cuda.is_available(): - return [] - arches: List[str] = [] - for i in range(torch.cuda.device_count()): - major, minor = torch.cuda.get_device_capability(i) - arch = f"{major}{minor}" - if arch not in arches: - arches.append(arch) - return arches + cmake_archs: List[str] = [] + for arch in archs: + cmake_archs.append(f"{arch}-real") + for arch in ptx_archs: + cmake_archs.append(f"{arch}-virtual") + return cmake_archs def get_project_root() -> Path: @@ -112,6 +93,11 @@ def _build_cmake_args_from_env() -> List[str]: """ Build a list of -D CMake arguments from environment variables to harmonize build configuration across setuptools, external CMake, and scikit-build flows. + + If ``CUSTOM_CUDA_ARCHS`` is unset, detected CUDA architectures are capped to + the maximum supported by ``nvcc``. If capping occurs, CMake builds cubins for + the capped architectures and adds one PTX target for the newest supported + forward-compatible base architecture. """ args: List[str] = [] # Always export compile_commands.json for tooling/validation @@ -139,9 +125,15 @@ def _build_cmake_args_from_env() -> List[str]: args.append(f'-DCMAKE_CUDA_ARCHITECTURES={norm_archs}') else: # Attempt auto-detection via torch; if empty, let CMake defaults apply - detected = _detect_cuda_architectures() + cuda_info = detect_cuda_info() + detected = cuda_info['gpu_architectures'] if cuda_info['cuda_available'] else [] if detected: - args.append(f'-DCMAKE_CUDA_ARCHITECTURES={";".join(detected)}') + selection = select_cuda_architectures_for_nvcc(detected) + cmake_archs = _format_cmake_cuda_architectures( + selection.architectures, + selection.ptx_architectures, + ) + args.append(f'-DCMAKE_CUDA_ARCHITECTURES={";".join(cmake_archs)}') # VERBOSE_BUILD -> CMAKE_VERBOSE_MAKEFILE if _parse_bool_env(os.environ.get("VERBOSE_BUILD", "")): @@ -186,7 +178,7 @@ def _build_cmake_args_package_scm_version(repo_root: Path) -> List[str]: Pass numeric version from setuptools-scm to CMake as a repo-aligned package version define (and harmless for CMake projects that ignore the variable). """ - from setuptools_scm import get_version + from setuptools_scm import get_version # type: ignore v = get_version( root=str(repo_root), @@ -203,6 +195,10 @@ def _build_cmake_args_package_scm_version(repo_root: Path) -> List[str]: def build_cmake_args() -> List[str]: """ Full CMake -D list: environment-based flags plus repo-aligned SCM version define. + + Auto-detected CUDA architectures are capped to ``nvcc`` support when + ``CUSTOM_CUDA_ARCHS`` is unset. If capping occurs, one PTX target is emitted + for the newest supported forward-compatible base architecture. """ root = get_project_root() return _build_cmake_args_from_env() + _build_cmake_args_package_scm_version(root) diff --git a/docker/Dockerfile b/docker/Dockerfile index 378f70a..6ca5f14 100644 --- a/docker/Dockerfile +++ b/docker/Dockerfile @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -FROM nvidia/cuda:12.4.1-devel-ubuntu22.04 +FROM nvidia/cuda:12.8.2-devel-ubuntu22.04 ENV DEBIAN_FRONTEND=noninteractive @@ -86,7 +86,7 @@ RUN pip install numpy==1.23.5 \ scipy==1.15.3 \ opencv-python-headless==4.5.5.64 -RUN pip install torch==2.6.0 torchvision==0.21.0 torchaudio==2.6.0 --index-url https://download.pytorch.org/whl/cu124 +RUN pip install torch==2.11.0 torchvision==0.26.0 torchaudio==2.11.0 --index-url https://download.pytorch.org/whl/cu128 RUN pip install black @@ -101,13 +101,13 @@ RUN pip install sphinx \ RUN pip install ninja \ scikit-build -RUN pip install pycuda==2025.1.1 \ +RUN pip install pycuda==2025.1.2 \ pybind11==3.0.0 \ cvcuda-cu12==0.15.0 RUN pip install pandas==1.5.3 \ IPython \ - nvidia-dali-cuda120==1.51.2 \ + nvidia-dali-cuda120==1.53.0 \ nvtx \ psutil \ numba==0.59 \ @@ -123,7 +123,7 @@ RUN apt-get install -y libjpeg-dev zlib1g-dev RUN pip install --upgrade pip setuptools==80.9.0 wheel setuptools-scm>=8 -RUN pip install cupy==13.6.0 +RUN pip install cupy-cuda12x==13.6.0 WORKDIR /workspace diff --git a/docs/guides/DEVELOPMENT_GUIDE.md b/docs/guides/DEVELOPMENT_GUIDE.md index 28e1433..d195bae 100644 --- a/docs/guides/DEVELOPMENT_GUIDE.md +++ b/docs/guides/DEVELOPMENT_GUIDE.md @@ -753,8 +753,7 @@ The `accvlab_build_config` package provides the following shared build & configu - **Environment-derived build settings**: Converts ACCV-Lab build variables into CMake cache entries: - `DEBUG_BUILD` → `CMAKE_BUILD_TYPE` - `CPP_STANDARD` → `CMAKE_CXX_STANDARD`, `CMAKE_CUDA_STANDARD` - - `CUSTOM_CUDA_ARCHS` → `CMAKE_CUDA_ARCHITECTURES` (auto-detect via CUDA-enabled PyTorch if unset; if no architecture - can be detected, do not pass it so package-specific CMake defaults apply) + - `CUSTOM_CUDA_ARCHS` → `CMAKE_CUDA_ARCHITECTURES` - `VERBOSE_BUILD` → `CMAKE_VERBOSE_MAKEFILE` - `OPTIMIZE_LEVEL`, `USE_FAST_MATH`, `ENABLE_PROFILING` → appended to `CMAKE_CXX_FLAGS`, `CMAKE_CUDA_FLAGS` - Always sets `-DCMAKE_EXPORT_COMPILE_COMMANDS=ON` @@ -765,6 +764,11 @@ The `accvlab_build_config` package provides the following shared build & configu - `run_external_build()` - Executes `build_and_copy.sh` build script (used in external implementations, see [External Implementations](#external-implementations) section for more details). +> **ℹ️ Note**: The authoritative list of supported build variables, defaults, and CUDA +> architecture handling is in the +> [Available Build Variables](INSTALLATION_GUIDE.md#available-build-variables) section of the +> [Installation Guide](INSTALLATION_GUIDE.md). + ### Usage in Namespace Packages Each namespace package's `setup.py` imports and uses these shared utilities, for example: @@ -818,6 +822,13 @@ Please see the `setup.py` files of the example packages (e.g. `packages/example_ ### How Build Variables Are Picked Up +> **ℹ️ Note**: The authoritative list of supported build variables, defaults, and CUDA +> architecture handling is in the +> [Available Build Variables](INSTALLATION_GUIDE.md#available-build-variables) section of the +> [Installation Guide](INSTALLATION_GUIDE.md). The +> [Shared Build & Configuration Utilities](#shared-build--configuration-utilities) section explains how the +> shared helper utilities consume those variables. + Depending on the package type, build variables are consumed as follows: - Setuptools (PyTorch extensions): diff --git a/docs/guides/DOCKER_GUIDE.md b/docs/guides/DOCKER_GUIDE.md index 520ad53..c6d2814 100644 --- a/docs/guides/DOCKER_GUIDE.md +++ b/docs/guides/DOCKER_GUIDE.md @@ -9,7 +9,7 @@ dependencies needed to build and install ACCV-Lab, including all the contained n The Dockerfile is located in the `docker` directory of the ACCV-Lab repository. The Dockerfile uses the [nvidia/cuda](https://hub.docker.com/r/nvidia/cuda) image -(version: `nvidia/cuda:12.4.1-devel-ubuntu22.04`) as the base image. +(version: `nvidia/cuda:12.8.2-devel-ubuntu22.04`) as the base image. The Dockerfile has the following optional build arguments: - `USER_ID`: The ID of the user to use for the container. diff --git a/docs/guides/DOCUMENTATION_SETUP_GUIDE.md b/docs/guides/DOCUMENTATION_SETUP_GUIDE.md index 369d0b6..6b86ac6 100644 --- a/docs/guides/DOCUMENTATION_SETUP_GUIDE.md +++ b/docs/guides/DOCUMENTATION_SETUP_GUIDE.md @@ -180,6 +180,16 @@ root directory): ./scripts/build_docs.sh ``` +**Capture Sphinx warnings in a dedicated file**: +```bash +./scripts/build_docs.sh --warning-file docs/_build/sphinx-warnings.log +``` +This is useful for CI and other automation that wants to inspect warnings separately from the console output. + +Relative warning-file paths are resolved from the project root. The warning file captures Sphinx warnings from +the selected builder (HTML by default, PDF with `--pdf`, or spelling with `--spelling`). In spelling mode, this +is separate from spelling findings, which are still written to `docs/_build/spelling/output.txt`. + **Manual build**: ```bash cd docs @@ -199,7 +209,7 @@ make livehtml - The `html` target ensures all scripts run before building - The `livehtml` target also runs the scripts for development builds - When running spelling via the script, the generation scripts are executed first to ensure mirrored package - docs are up to date + docs are up to date. Spelling findings are written to `docs/_build/spelling/output.txt`. > **ℹ️ Note**: > `make livehtml` watches the `docs/` tree and mirrored package documentation, but: @@ -433,6 +443,8 @@ The configuration includes: #### Build Script (`scripts/build_docs.sh`) - **Dependency installation**: Installs documentation requirements from `docs/requirements.txt` - **Complete build process**: Generation + Sphinx build +- **Warning capture**: Can write Sphinx warnings from the to a dedicated file via + `--warning-file` - **Browser integration**: Optionally opens documentation - **Error handling**: Provides clear error messages - **Location**: Can be run from any directory (script automatically determines project root) diff --git a/docs/guides/INSTALLATION_GUIDE.md b/docs/guides/INSTALLATION_GUIDE.md index d6d4ac0..f9c261f 100644 --- a/docs/guides/INSTALLATION_GUIDE.md +++ b/docs/guides/INSTALLATION_GUIDE.md @@ -388,7 +388,7 @@ DEBUG_BUILD=1 VERBOSE_BUILD=1 ./scripts/package_manager.sh install # Optimized build for production OPTIMIZE_LEVEL=3 USE_FAST_MATH=1 ./scripts/package_manager.sh install -# Custom CUDA architectures (if you know your GPU architecture) +# Custom CUDA architectures (if you need to override auto-detection) CUSTOM_CUDA_ARCHS="70,75,80" ./scripts/package_manager.sh install # Enable profiling support @@ -414,10 +414,18 @@ ENABLE_PROFILING=1 ./scripts/package_manager.sh install > `CPP_STANDARD=c++17`. Using newer standards (e.g., C++20) may not be supported for CUDA builds for some > of the packages. -> **⚠️ Important**: If `CUSTOM_CUDA_ARCHS` is not set, ACCV-Lab first tries to auto-detect GPU architectures -> via CUDA-enabled PyTorch. Missing PyTorch or CPU-only PyTorch is treated as a build configuration error. -> If PyTorch is CUDA-enabled but no architecture can be detected (for example because no CUDA device is visible), -> ACCV-Lab does not pass `CMAKE_CUDA_ARCHITECTURES`; package-specific CMake defaults then apply. +> **⚠️ Important**: If `CUSTOM_CUDA_ARCHS` is not set, ACCV-Lab tries to auto-detect +> GPU architectures via CUDA-enabled PyTorch. Missing PyTorch or CPU-only PyTorch is treated as a build +> configuration error. +> +> Auto-detected architectures are capped to the maximum architecture supported by the +> installed `nvcc`, which avoids selecting a GPU architecture that is newer than the CUDA toolkit used for +> the build. When an architecture is capped, the build also includes one PTX target for the newest supported +> forward-compatible base architecture. +> +> If PyTorch is CUDA-enabled but no architecture can be detected +> (for example because no CUDA device is visible), ACCV-Lab does not pass `CMAKE_CUDA_ARCHITECTURES`; +> package-specific CMake defaults then apply. ## Additional Information diff --git a/docs/index.rst b/docs/index.rst index b626f31..273cc02 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -14,6 +14,12 @@ contained packages and a :ref:`Quick Start ` secti detailed guides on how to use this project (:doc:`Installation Guide `, :doc:`Development Guide `, ...) and further information. +.. note:: + + The online documentation reflects the current state of the ``main`` branch. If you need the documentation for a specific version, + please follow the instructions in the :ref:`Quick Start ` section to build the documentation + locally. + .. toctree:: :maxdepth: 2 :caption: General Info diff --git a/packages/dali_pipeline_framework/ext_impl/DrawGaussians.h b/packages/dali_pipeline_framework/ext_impl/DrawGaussians.h index c548480..e4827ef 100644 --- a/packages/dali_pipeline_framework/ext_impl/DrawGaussians.h +++ b/packages/dali_pipeline_framework/ext_impl/DrawGaussians.h @@ -22,7 +22,6 @@ #include "dali/core/error_handling.h" #include "dali/core/static_switch.h" -#include "dali/kernels/dynamic_scratchpad.h" #include "dali/pipeline/data/types.h" #include "dali/pipeline/operator/operator.h" @@ -99,9 +98,6 @@ class DrawGaussians : public ::dali::Operator { void RunImpl(::dali::Workspace& ws) override; private: - dali::kernels::DynamicScratchpad _scratch_alloc; - size_t _curr_scratch_size = 0; - std::vector _k_for_classes; float _radius_to_sigma_factor; diff --git a/packages/dali_pipeline_framework/pyproject.toml b/packages/dali_pipeline_framework/pyproject.toml index 9558ab2..cee786a 100644 --- a/packages/dali_pipeline_framework/pyproject.toml +++ b/packages/dali_pipeline_framework/pyproject.toml @@ -25,7 +25,7 @@ dependencies = [ optional = [ "pytest", "opencv-python-headless", - "cupy==13.6.0", + "cupy-cuda12x==13.6.0", "pyquaternion", "nuscenes-devkit", "shapely", diff --git a/scripts/build_docs.sh b/scripts/build_docs.sh index 0cc7421..8c034da 100755 --- a/scripts/build_docs.sh +++ b/scripts/build_docs.sh @@ -24,15 +24,24 @@ PROJECT_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)" # Parse flags VERBOSE_FLAG="" +WARNING_FILE="" PDF_MODE=0 OPEN_MODE=0 SPELLING_MODE=0 -for arg in "$@"; do - case "$arg" in +while [[ $# -gt 0 ]]; do + case "$1" in -v|--verbose) VERBOSE_FLAG="--verbose" echo "Verbose mode enabled" ;; + --warning-file) + if [[ -z "${2:-}" || "${2:-}" == -* ]]; then + echo "Error: --warning-file requires a file path" + exit 1 + fi + WARNING_FILE="$2" + shift + ;; -p|--pdf) PDF_MODE=1 ;; @@ -43,18 +52,21 @@ for arg in "$@"; do SPELLING_MODE=1 ;; -h|--help) - echo "Usage: $0 [-v|--verbose] [-p|--pdf] [-s|--spelling] [-o|--open]" + echo "Usage: $0 [-v|--verbose] [--warning-file PATH] [-p|--pdf] [-s|--spelling] [-o|--open]" echo " Default builds HTML docs. Use --pdf to build PDF via LaTeX." + echo " Use --warning-file PATH to write Sphinx warnings to a dedicated file." + echo " Relative warning-file paths are resolved from the project root." echo " Use --spelling to run spelling checks with sphinxcontrib-spelling." echo " Use --open to auto-open the built HTML/PDF if supported." exit 0 ;; *) - echo "Error: Unknown argument '$arg'" + echo "Error: Unknown argument '$1'" echo "Use -h|--help for usage." exit 1 ;; esac + shift done echo "==========================" @@ -76,6 +88,10 @@ if [ ! -d "docs" ]; then exit 1 fi +if [[ -n "$WARNING_FILE" && "$WARNING_FILE" != /* ]]; then + WARNING_FILE="$PROJECT_ROOT/$WARNING_FILE" +fi + # Install documentation dependencies, which are listed in the requirements.txt file # of the docs directory echo "Installing documentation dependencies..." @@ -84,6 +100,14 @@ pip install -r docs/requirements.txt cd docs make clean +SPHINXOPTS_ARGS="$VERBOSE_FLAG" +if [[ -n "$WARNING_FILE" ]]; then + mkdir -p "$(dirname "$WARNING_FILE")" + : > "$WARNING_FILE" + SPHINXOPTS_ARGS="${SPHINXOPTS_ARGS:+$SPHINXOPTS_ARGS }-w $WARNING_FILE" + echo "Sphinx warnings will be written to: $WARNING_FILE" +fi + if [[ "$PDF_MODE" -eq 1 && "$SPELLING_MODE" -eq 1 ]]; then echo "Error: --pdf and --spelling cannot be used together. Choose one." exit 1 @@ -92,7 +116,7 @@ fi if [[ "$PDF_MODE" -eq 1 ]]; then echo "Building PDF documentation..." make generate - make latexpdf SPHINXOPTS="$VERBOSE_FLAG" + make latexpdf SPHINXOPTS="$SPHINXOPTS_ARGS" echo "" echo "PDF build complete!" # Try to open the resulting PDF (handle common naming variants) @@ -156,7 +180,7 @@ PY # Ensure generated/mirrored docs are up to date before spelling make generate # Run the spelling builder - make spelling SPHINXOPTS="$VERBOSE_FLAG" + make spelling SPHINXOPTS="$SPHINXOPTS_ARGS" echo "" echo "Spelling check complete!" # Point to common output location @@ -172,7 +196,7 @@ PY fi else echo "Building HTML documentation..." - make html SPHINXOPTS="$VERBOSE_FLAG" + make html SPHINXOPTS="$SPHINXOPTS_ARGS" echo "" echo "Documentation build complete!" echo "Open docs/_build/html/index.html in your browser to view the documentation." diff --git a/scripts/formatting/black_format_package.sh b/scripts/formatting/black_format_package.sh index c6aeef4..fc52d35 100755 --- a/scripts/formatting/black_format_package.sh +++ b/scripts/formatting/black_format_package.sh @@ -19,6 +19,8 @@ # # By default, formats: # - Root Python files (namespace_packages_config.py, etc.) +# - Docs Python files (docs/**/*.py) +# - Scripts Python files (scripts/**/*.py) # - Common accvlab code (accvlab/__init__.py, etc.) # - Common build_config code (build_config/_helpers/, build_config/__init__.py) # - All subpackages (both in accvlab/ and build_config/subpackages/) @@ -46,7 +48,7 @@ while [[ $# -gt 0 ]]; do echo "" echo "Format Python code in the project with Black." echo "" - echo "By default, formats only common code (root files, docs, common accvlab, common build_config)." + echo "By default, formats only common code (root files, docs, scripts, common accvlab, common build_config)." echo "" echo "Options:" echo " --include-subpackages Also format individual subpackages" @@ -88,26 +90,19 @@ if [ -d "docs" ]; then find docs/ -name "*.py" -exec black {} \; fi +# Format scripts Python files if they exist +if [ -d "scripts" ]; then + echo "Formatting scripts Python files..." + find scripts/ -name "*.py" -exec black {} \; +fi + if [ "$INCLUDE_SUBPACKAGES" = true ]; then - # Format all namespace packages + # Format all namespace packages, excluding directories declared in .gitmodules. echo "Formatting namespace packages..." - python3 -c " -from namespace_packages_config import get_package_names -import subprocess -import os - -packages = get_package_names() -if not packages: - print(' No namespace packages found') -else: - for pkg in packages: - print(f' Formatting namespace package: {pkg}') - - # Format packages// (includes tests/ and ext_impl/ subdirectories) - package_path = f'packages/{pkg}' - if os.path.exists(package_path): - subprocess.run(['black', package_path], check=True) -" + python3 "$SCRIPT_DIR/helpers/run_formatter_on_namespace_packages.py" \ + --extension .py \ + --language-name Python \ + -- black else echo "Skipping namespace packages (use --include-subpackages to format them)" fi diff --git a/scripts/formatting/black_format_subpackage.sh b/scripts/formatting/black_format_subpackage.sh index 48ba42f..1c2df14 100755 --- a/scripts/formatting/black_format_subpackage.sh +++ b/scripts/formatting/black_format_subpackage.sh @@ -35,11 +35,7 @@ if [ $# -eq 0 ] || [ $# -gt 1 ] || [[ "$1" == -* ]]; then echo "Error: Unknown option: $1" fi echo "Available namespace packages:" - python3 -c " -from namespace_packages_config import get_package_names -for pkg in get_package_names(): - print(f' - {pkg}') -" + python3 "$SCRIPT_DIR/helpers/list_namespace_packages.py" exit 1 fi @@ -53,10 +49,14 @@ fi echo "Formatting namespace package: $PACKAGE" -# Format Python files in packages// (includes tests/ and ext_impl/ subdirectories) +# Format Python files while excluding directories declared in .gitmodules. echo "Formatting packages/$PACKAGE/..." if [ -d "packages/$PACKAGE" ]; then - black packages/$PACKAGE/ + python3 "$SCRIPT_DIR/helpers/run_formatter_on_files.py" \ + --extension .py \ + --root "packages/$PACKAGE" \ + --empty-message " No Python files found in packages/$PACKAGE" \ + -- black fi echo "Formatting of namespace package '$PACKAGE' completed successfully!" \ No newline at end of file diff --git a/scripts/formatting/clang_format_package.sh b/scripts/formatting/clang_format_package.sh index 7254188..2d92ce2 100755 --- a/scripts/formatting/clang_format_package.sh +++ b/scripts/formatting/clang_format_package.sh @@ -79,45 +79,19 @@ if ! command -v clang-format &> /dev/null; then fi if [ "$INCLUDE_SUBPACKAGES" = true ]; then - # Format all namespace packages + # Format all namespace packages, excluding directories declared in .gitmodules. echo "Formatting namespace packages..." echo "Using nearest .clang-format (auto-discovery)" - python3 -c " -from namespace_packages_config import get_package_names -import subprocess -import os -import shutil -import sys - -# We intentionally do not pass a specific config path so clang-format -# will auto-discover the nearest .clang-format for each file - -# Check if clang-format is available -if not shutil.which('clang-format'): - print(' Warning: clang-format not found, skipping C++ formatting') - exit(0) - -packages = get_package_names() -if not packages: - print(' No namespace packages found') -else: - for pkg in packages: - print(f' Formatting C++ in namespace package: {pkg}') - - # Format packages// (includes tests/ and ext_impl/ subdirectories) - pkg_path = f'packages/{pkg}' - if os.path.exists(pkg_path): - try: - result = subprocess.run(['find', pkg_path, '-regex', r'.*\.\(cpp\|cc\|c\|cu\|hpp\|h\|cuh\)'], - capture_output=True, text=True, check=True) - if result.stdout.strip(): - cpp_files = result.stdout.strip().split('\n') - subprocess.run(['clang-format', '-style=file', '-fallback-style=none', '-i'] + cpp_files, check=True) - else: - print(f' No C++ files found in {pkg_path}') - except subprocess.CalledProcessError: - pass -" CLANG_FORMAT_CONFIG="$CLANG_FORMAT_CONFIG" + python3 "$SCRIPT_DIR/helpers/run_formatter_on_namespace_packages.py" \ + --extension .cpp \ + --extension .cc \ + --extension .c \ + --extension .cu \ + --extension .hpp \ + --extension .h \ + --extension .cuh \ + --language-name "C++" \ + -- clang-format -style=file -fallback-style=none -i else echo "Skipping namespace packages (use --include-subpackages to format them)" echo "Note: C++ code only exists in namespace packages" diff --git a/scripts/formatting/clang_format_subpackage.sh b/scripts/formatting/clang_format_subpackage.sh index 26920c8..bab91ef 100755 --- a/scripts/formatting/clang_format_subpackage.sh +++ b/scripts/formatting/clang_format_subpackage.sh @@ -43,11 +43,7 @@ if [ $# -eq 0 ] || [ $# -gt 1 ] || [[ "$1" == -* ]]; then echo "Error: Unknown option: $1" fi echo "Available namespace packages:" - python3 -c " -from namespace_packages_config import get_package_names -for pkg in get_package_names(): - print(f' - {pkg}') -" + python3 "$SCRIPT_DIR/helpers/list_namespace_packages.py" exit 1 fi @@ -69,15 +65,20 @@ if ! command -v clang-format &> /dev/null; then return 0 2>/dev/null || exit 0 fi -# Format C++ files in packages// (includes tests/ and ext_impl/ subdirectories) +# Format C++ files while excluding directories declared in .gitmodules. echo "Formatting C++ files in packages/$PACKAGE/..." if [ -d "packages/$PACKAGE" ]; then - CPP_FILES=$(find "packages/$PACKAGE" -regex '.*\.\(cpp\|cc\|c\|cu\|hpp\|h\|cuh\)' 2>/dev/null) - if [ -n "$CPP_FILES" ]; then - echo "$CPP_FILES" | xargs clang-format -style=file -fallback-style=none -i - else - echo " No C++ files found" - fi + python3 "$SCRIPT_DIR/helpers/run_formatter_on_files.py" \ + --extension .cpp \ + --extension .cc \ + --extension .c \ + --extension .cu \ + --extension .hpp \ + --extension .h \ + --extension .cuh \ + --root "packages/$PACKAGE" \ + --empty-message " No C++ files found in packages/$PACKAGE" \ + -- clang-format -style=file -fallback-style=none -i fi echo "C++ formatting for namespace package '$PACKAGE' completed successfully!" diff --git a/scripts/formatting/format_subpackage.sh b/scripts/formatting/format_subpackage.sh index 3dcac3f..8b5355d 100755 --- a/scripts/formatting/format_subpackage.sh +++ b/scripts/formatting/format_subpackage.sh @@ -35,11 +35,7 @@ if [ $# -eq 0 ] || [ $# -gt 1 ] || [[ "$1" == -* ]]; then echo "Error: Unknown option: $1" fi echo "Available namespace packages:" - python3 -c " -from namespace_packages_config import get_package_names -for pkg in get_package_names(): - print(f' - {pkg}') -" + python3 "$SCRIPT_DIR/helpers/list_namespace_packages.py" exit 1 fi diff --git a/scripts/formatting/helpers/format_file_utils.py b/scripts/formatting/helpers/format_file_utils.py new file mode 100644 index 0000000..c883725 --- /dev/null +++ b/scripts/formatting/helpers/format_file_utils.py @@ -0,0 +1,120 @@ +#!/usr/bin/env python3 + +# Copyright (c) 2025, NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Shared file discovery helpers for formatting scripts.""" + +import configparser +import os +from pathlib import Path + + +def find_project_root(start_path): + """Walk upward from start_path until the repository root marker is found.""" + current_path = start_path.resolve() + if current_path.is_file(): + current_path = current_path.parent + + for candidate in [current_path, *current_path.parents]: + marker_path = candidate / ".nav" + if marker_path.is_file() and marker_path.read_text(encoding="utf-8").strip() == "project root": + return candidate + + raise RuntimeError( + f"Could not find project root marker .nav containing 'project root' above {start_path}" + ) + + +PROJECT_ROOT = find_project_root(Path(__file__)) + + +def load_submodule_paths(project_root=PROJECT_ROOT): + """Return submodule paths declared in .gitmodules, relative to project_root.""" + gitmodules = project_root / ".gitmodules" + if not gitmodules.exists(): + return set() + + config = configparser.ConfigParser() + config.read(gitmodules) + + submodule_paths = set() + for section in config.sections(): + if not section.startswith("submodule "): + continue + + if "path" not in config[section]: + raise ValueError(f"Submodule section {section!r} in .gitmodules is missing required 'path'") + + submodule_path = Path(config[section]["path"].strip()).as_posix().rstrip("/") + if submodule_path: + submodule_paths.add(submodule_path) + + return submodule_paths + + +def relative_to_project(path, project_root=PROJECT_ROOT): + """Return path relative to project_root, using POSIX separators.""" + try: + return path.resolve().relative_to(project_root).as_posix() + except ValueError: + return path.as_posix() + + +def is_in_submodule(relative_path, submodule_paths): + """Return true if relative_path is inside a declared submodule path.""" + relative_path = relative_path.rstrip("/") + return any( + relative_path == submodule_path or relative_path.startswith(f"{submodule_path}/") + for submodule_path in submodule_paths + ) + + +def iter_format_files(roots, extensions, project_root=PROJECT_ROOT): + """Yield files under roots matching extensions, excluding Git submodules.""" + submodule_paths = load_submodule_paths(project_root) + + for root in roots: + root_path = (project_root / root).resolve() + if not root_path.exists(): + continue + + # Allow callers to pass an individual file as a root. + if root_path.is_file(): + relative_path = relative_to_project(root_path, project_root) + if not is_in_submodule(relative_path, submodule_paths) and root_path.name.endswith(extensions): + yield relative_path + continue + + for current_root, dirs, files in os.walk(root_path): + current_path = Path(current_root) + + # `dirs` will be used in the next iteration of the loop by `os.walk()``. + # Removing sub-module directories in-place will prevent them os.walk from + # walking into them, pruning the search to non-submodule directories. + dirs[:] = [ + dirname + for dirname in dirs + if not is_in_submodule( + relative_to_project(current_path / dirname, project_root), submodule_paths + ) + ] + + # Yield paths relative to the project root so formatter output is + # stable regardless of where the helper script itself lives. + for filename in files: + file_path = current_path / filename + relative_path = relative_to_project(file_path, project_root) + if not is_in_submodule(relative_path, submodule_paths) and filename.endswith(extensions): + yield relative_path diff --git a/scripts/formatting/helpers/formatter_command_utils.py b/scripts/formatting/helpers/formatter_command_utils.py new file mode 100644 index 0000000..068da72 --- /dev/null +++ b/scripts/formatting/helpers/formatter_command_utils.py @@ -0,0 +1,43 @@ +#!/usr/bin/env python3 + +# Copyright (c) 2025, NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Command batching helpers for formatter invocations.""" + +import subprocess + +DEFAULT_BATCH_SIZE = 100 + + +def iter_file_batches(files, batch_size=DEFAULT_BATCH_SIZE): + """Yield fixed-size batches from files.""" + if batch_size < 1: + raise ValueError("batch_size must be at least 1") + + batch = [] + for file_path in files: + batch.append(file_path) + if len(batch) == batch_size: + yield batch + batch = [] + + if batch: + yield batch + + +def run_formatter_in_batches(formatter_command, files, batch_size=DEFAULT_BATCH_SIZE): + """Run formatter_command with files appended in fixed-size batches.""" + for batch in iter_file_batches(files, batch_size): + subprocess.run(formatter_command + batch, check=True) diff --git a/scripts/formatting/helpers/list_namespace_packages.py b/scripts/formatting/helpers/list_namespace_packages.py new file mode 100644 index 0000000..e3cf3a7 --- /dev/null +++ b/scripts/formatting/helpers/list_namespace_packages.py @@ -0,0 +1,34 @@ +#!/usr/bin/env python3 + +# Copyright (c) 2025, NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Print configured namespace package names for shell script help output.""" + +import sys + +from format_file_utils import PROJECT_ROOT + +sys.path.insert(0, str(PROJECT_ROOT)) + +from namespace_packages_config import get_package_names # noqa: E402 + + +def main(): + for package in get_package_names(): + print(f" - {package}") + + +if __name__ == "__main__": + main() diff --git a/scripts/formatting/helpers/run_formatter_on_files.py b/scripts/formatting/helpers/run_formatter_on_files.py new file mode 100644 index 0000000..a9cef50 --- /dev/null +++ b/scripts/formatting/helpers/run_formatter_on_files.py @@ -0,0 +1,83 @@ +#!/usr/bin/env python3 + +# Copyright (c) 2025, NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Run a formatter on files under selected roots, excluding Git submodules.""" + +import argparse + +from format_file_utils import iter_format_files +from formatter_command_utils import run_formatter_in_batches + + +def parse_args(): + parser = argparse.ArgumentParser( + description=( + "Discover files under one or more roots, exclude paths declared as " + "Git submodules, and run a formatter command on the resulting files." + ) + ) + parser.add_argument( + "--extension", + action="append", + required=True, + help="File extension to include, such as .py or .cpp. Can be repeated.", + ) + parser.add_argument( + "--root", + action="append", + required=True, + help="File or directory to scan, relative to the project root. Can be repeated.", + ) + parser.add_argument( + "--empty-message", + default="No matching files found", + help="Message to print when no files match after submodule exclusions.", + ) + parser.add_argument( + "--batch-size", + type=int, + default=100, + help="Maximum number of files to pass to one formatter invocation.", + ) + parser.add_argument( + "formatter_command", + nargs=argparse.REMAINDER, + help="Formatter command to run after '--', for example: -- black", + ) + args = parser.parse_args() + + if args.formatter_command and args.formatter_command[0] == "--": + args.formatter_command = args.formatter_command[1:] + + if not args.formatter_command: + parser.error("formatter command is required after '--'") + + return args + + +def main(): + args = parse_args() + files = list(iter_format_files(args.root, tuple(args.extension))) + + if not files: + print(args.empty_message) + return + + run_formatter_in_batches(args.formatter_command, files, args.batch_size) + + +if __name__ == "__main__": + main() diff --git a/scripts/formatting/helpers/run_formatter_on_namespace_packages.py b/scripts/formatting/helpers/run_formatter_on_namespace_packages.py new file mode 100644 index 0000000..e022fad --- /dev/null +++ b/scripts/formatting/helpers/run_formatter_on_namespace_packages.py @@ -0,0 +1,91 @@ +#!/usr/bin/env python3 + +# Copyright (c) 2025, NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Run a formatter across all namespace packages, excluding Git submodules.""" + +import argparse +import sys + +from format_file_utils import PROJECT_ROOT, iter_format_files +from formatter_command_utils import run_formatter_in_batches + +sys.path.insert(0, str(PROJECT_ROOT)) + +from namespace_packages_config import get_package_names # noqa: E402 + + +def parse_args(): + parser = argparse.ArgumentParser( + description=( + "Run a formatter for every namespace package under packages/, while " + "excluding directories declared as Git submodules." + ) + ) + parser.add_argument( + "--extension", + action="append", + required=True, + help="File extension to include, such as .py or .cpp. Can be repeated.", + ) + parser.add_argument( + "--language-name", + required=True, + help="Human-readable language name for progress messages, such as Python or C++.", + ) + parser.add_argument( + "--batch-size", + type=int, + default=100, + help="Maximum number of files to pass to one formatter invocation.", + ) + parser.add_argument( + "formatter_command", + nargs=argparse.REMAINDER, + help="Formatter command to run after '--', for example: -- black", + ) + args = parser.parse_args() + + if args.formatter_command and args.formatter_command[0] == "--": + args.formatter_command = args.formatter_command[1:] + + if not args.formatter_command: + parser.error("formatter command is required after '--'") + + return args + + +def main(): + args = parse_args() + packages = get_package_names() + + if not packages: + print(" No namespace packages found") + return + + for package in packages: + package_path = f"packages/{package}" + print(f" Formatting {args.language_name} in namespace package: {package}") + + files = list(iter_format_files([package_path], tuple(args.extension))) + if not files: + print(f" No {args.language_name} files found in {package_path}") + continue + + run_formatter_in_batches(args.formatter_command, files, args.batch_size) + + +if __name__ == "__main__": + main()