diff --git a/README.md b/README.md index 0a7e8bddb26..70accc10a3c 100644 --- a/README.md +++ b/README.md @@ -60,11 +60,17 @@ Learn more: [How ExecuTorch Works](https://docs.pytorch.org/executorch/main/intr ### Installation +**Python (via pip):** ```bash pip install executorch ``` -For platform-specific setup (Android, iOS, embedded systems), see the [Quick Start](https://docs.pytorch.org/executorch/main/quick-start-section.html) documentation for additional info. +**C++ (via VCPKG):** +```bash +vcpkg install executorch +``` + +For platform-specific setup (Android, iOS, embedded systems), see the [Quick Start](https://docs.pytorch.org/executorch/main/quick-start-section.html) documentation for additional info. For VCPKG port details and features, see the [ports/README.md](ports/README.md). ### Export and Deploy in 3 Steps diff --git a/ports/README.md b/ports/README.md new file mode 100644 index 00000000000..42db7791c2a --- /dev/null +++ b/ports/README.md @@ -0,0 +1,69 @@ +# ExecuTorch VCPKG Port + +This directory contains the VCPKG port configuration for ExecuTorch, PyTorch's on-device AI inference framework. + +## About VCPKG Ports + +VCPKG is a C/C++ package manager from Microsoft. A "port" is a set of scripts and metadata that tells VCPKG how to download, build, and install a library. + +## Files in this Directory + +- **vcpkg.json**: Manifest file containing package metadata, version, dependencies, and optional features +- **portfile.cmake**: Main build script that tells VCPKG how to configure, build, and install ExecuTorch +- **usage**: Instructions for users on how to use the installed package in their CMake projects + +## Using this Port + +### For VCPKG Registry Maintainers + +To submit this port to the official VCPKG registry: + +1. Follow the VCPKG contribution guidelines: https://github.com/microsoft/vcpkg/blob/master/docs/maintainers/control-files.md +2. Copy the `ports/executorch` directory to your VCPKG installation's `ports/` directory +3. Update the `SHA512` hash in `portfile.cmake` after the first build attempt +4. Test the port locally: `vcpkg install executorch` +5. Submit a pull request to the VCPKG repository + +### For Local Development + +To use this port locally without submitting to the registry: + +1. Copy the `ports/executorch` directory to your VCPKG installation's `ports/` directory +2. Install: `vcpkg install executorch` +3. Use in your project as shown in the `usage` file + +## Features + +The port supports several optional features: + +- **xnnpack**: XNNPACK backend for accelerated inference +- **coreml**: CoreML backend (Apple platforms only) +- **mps**: Metal Performance Shaders backend (Apple platforms only) +- **vulkan**: Vulkan backend for GPU acceleration +- **qnn**: Qualcomm QNN backend +- **portable-ops**: Portable CPU operators +- **optimized-ops**: Optimized CPU operators +- **quantized-ops**: Quantized operators +- **pybind**: Python bindings +- **tests**: Build and run tests + +To install with features: +```bash +vcpkg install executorch[portable-ops,xnnpack] +``` + +## Maintenance + +When a new version of ExecuTorch is released: + +1. Update the `version-string` in `vcpkg.json` +2. Update the `REF` in `portfile.cmake` to point to the new release tag +3. Run `vcpkg install executorch` and update the `SHA512` hash based on the error message +4. Test the build +5. Update the VCPKG registry + +## Documentation + +- ExecuTorch: https://pytorch.org/executorch/ +- VCPKG Getting Started: https://learn.microsoft.com/en-us/vcpkg/get_started/get-started +- VCPKG Packaging Guide: https://learn.microsoft.com/en-us/vcpkg/get_started/get-started-packaging diff --git a/ports/executorch/portfile.cmake b/ports/executorch/portfile.cmake new file mode 100644 index 00000000000..e3b61a74075 --- /dev/null +++ b/ports/executorch/portfile.cmake @@ -0,0 +1,84 @@ +vcpkg_check_linkage(ONLY_STATIC_LIBRARY) + +vcpkg_from_github( + OUT_SOURCE_PATH SOURCE_PATH + REPO pytorch/executorch + REF "v${VERSION}" + SHA512 0 + HEAD_REF main +) + +# Set CMake options based on features +set(FEATURE_OPTIONS "") + +if("xnnpack" IN_LIST FEATURES) + list(APPEND FEATURE_OPTIONS -DEXECUTORCH_BUILD_XNNPACK=ON) +endif() + +if("coreml" IN_LIST FEATURES) + list(APPEND FEATURE_OPTIONS -DEXECUTORCH_BUILD_COREML=ON) +endif() + +if("mps" IN_LIST FEATURES) + list(APPEND FEATURE_OPTIONS -DEXECUTORCH_BUILD_MPS=ON) +endif() + +if("vulkan" IN_LIST FEATURES) + list(APPEND FEATURE_OPTIONS -DEXECUTORCH_BUILD_VULKAN=ON) +endif() + +if("qnn" IN_LIST FEATURES) + list(APPEND FEATURE_OPTIONS -DEXECUTORCH_BUILD_QNN=ON) +endif() + +if("portable-ops" IN_LIST FEATURES) + list(APPEND FEATURE_OPTIONS -DEXECUTORCH_BUILD_PORTABLE_OPS=ON) +endif() + +if("optimized-ops" IN_LIST FEATURES) + list(APPEND FEATURE_OPTIONS -DEXECUTORCH_BUILD_OPTIMIZED_OPS=ON) +endif() + +if("quantized-ops" IN_LIST FEATURES) + list(APPEND FEATURE_OPTIONS -DEXECUTORCH_BUILD_QUANTIZED_OPS=ON) +endif() + +if("pybind" IN_LIST FEATURES) + list(APPEND FEATURE_OPTIONS -DEXECUTORCH_BUILD_PYBIND=ON) +endif() + +if("tests" IN_LIST FEATURES) + list(APPEND FEATURE_OPTIONS -DEXECUTORCH_BUILD_TESTS=ON) +else() + list(APPEND FEATURE_OPTIONS -DEXECUTORCH_BUILD_TESTS=OFF) +endif() + +vcpkg_cmake_configure( + SOURCE_PATH "${SOURCE_PATH}" + OPTIONS + ${FEATURE_OPTIONS} + -DEXECUTORCH_ENABLE_LOGGING=ON + -DEXECUTORCH_ENABLE_PROGRAM_VERIFICATION=ON + -DEXECUTORCH_BUILD_EXTENSION_DATA_LOADER=ON + -DEXECUTORCH_BUILD_EXTENSION_MODULE=ON + -DEXECUTORCH_BUILD_EXTENSION_RUNNER_UTIL=ON +) + +vcpkg_cmake_build() + +if("tests" IN_LIST FEATURES) + vcpkg_cmake_test() +endif() + +vcpkg_cmake_install() + +vcpkg_cmake_config_fixup(CONFIG_PATH lib/cmake/ExecuTorch) + +file(REMOVE_RECURSE "${CURRENT_PACKAGES_DIR}/debug/include") +file(REMOVE_RECURSE "${CURRENT_PACKAGES_DIR}/debug/share") + +# Handle copyright +vcpkg_install_copyright(FILE_LIST "${SOURCE_PATH}/LICENSE") + +# Create usage file +file(INSTALL "${CMAKE_CURRENT_LIST_DIR}/usage" DESTINATION "${CURRENT_PACKAGES_DIR}/share/${PORT}") diff --git a/ports/executorch/usage b/ports/executorch/usage new file mode 100644 index 00000000000..1759ffced67 --- /dev/null +++ b/ports/executorch/usage @@ -0,0 +1,30 @@ +ExecuTorch provides CMake targets: + + find_package(ExecuTorch CONFIG REQUIRED) + target_link_libraries(main PRIVATE executorch) + +Available components/backends can be linked as needed: +- executorch - Core runtime library +- extension_module - Extension module support +- extension_data_loader - Data loader extension +- extension_runner_util - Runner utilities +- portable_ops_lib - Portable CPU operators (if portable-ops feature enabled) +- optimized_ops_lib - Optimized CPU operators (if optimized-ops feature enabled) +- quantized_ops_lib - Quantized operators (if quantized-ops feature enabled) +- xnnpack_backend - XNNPACK backend (if xnnpack feature enabled) +- coreml_backend - CoreML backend (if coreml feature enabled, Apple only) +- mps_backend - MPS backend (if mps feature enabled, Apple only) +- vulkan_backend - Vulkan backend (if vulkan feature enabled) +- qnn_backend - Qualcomm QNN backend (if qnn feature enabled) + +Example with specific components: + + find_package(ExecuTorch CONFIG REQUIRED) + target_link_libraries(main PRIVATE + executorch + extension_module + portable_ops_lib + ) + +For more information, see: +https://pytorch.org/executorch/ diff --git a/ports/executorch/vcpkg.json b/ports/executorch/vcpkg.json new file mode 100644 index 00000000000..9415e668ddc --- /dev/null +++ b/ports/executorch/vcpkg.json @@ -0,0 +1,64 @@ +{ + "name": "executorch", + "version-string": "1.2.0-alpha", + "description": "ExecuTorch: On-device AI across mobile, embedded and edge for PyTorch. PyTorch's unified on-device AI inference framework for deploying models from smartphones to microcontrollers.", + "homepage": "https://pytorch.org/executorch/", + "license": "BSD-3-Clause", + "supports": "!uwp", + "dependencies": [ + { + "name": "vcpkg-cmake", + "host": true + }, + { + "name": "vcpkg-cmake-config", + "host": true + } + ], + "features": { + "xnnpack": { + "description": "Build with XNNPACK backend support", + "dependencies": [] + }, + "coreml": { + "description": "Build with CoreML backend support (Apple platforms only)", + "dependencies": [] + }, + "mps": { + "description": "Build with MPS (Metal Performance Shaders) backend support (Apple platforms only)", + "dependencies": [] + }, + "vulkan": { + "description": "Build with Vulkan backend support", + "dependencies": [] + }, + "qnn": { + "description": "Build with Qualcomm QNN backend support", + "dependencies": [] + }, + "portable-ops": { + "description": "Build with portable CPU operators", + "dependencies": [] + }, + "optimized-ops": { + "description": "Build with optimized CPU operators", + "dependencies": [] + }, + "quantized-ops": { + "description": "Build with quantized operators", + "dependencies": [] + }, + "pybind": { + "description": "Build Python bindings", + "dependencies": [ + "pybind11" + ] + }, + "tests": { + "description": "Build tests", + "dependencies": [ + "gtest" + ] + } + } +}