diff --git a/.devcontainer/devcontainer.json b/.devcontainer/devcontainer.json new file mode 100644 index 000000000..8c6f3fb3d --- /dev/null +++ b/.devcontainer/devcontainer.json @@ -0,0 +1,22 @@ +// For format details, see https://aka.ms/devcontainer.json. For config options, see the +// README at: https://github.com/devcontainers/templates/tree/main/src/python +{ + "name": "Python 3", + // Or use a Dockerfile or Docker Compose file. More info: https://containers.dev/guide/dockerfile + "image": "mcr.microsoft.com/devcontainers/python:1-3.12-bullseye" + + // Features to add to the dev container. More info: https://containers.dev/features. + // "features": {}, + + // Use 'forwardPorts' to make a list of ports inside the container available locally. + // "forwardPorts": [], + + // Use 'postCreateCommand' to run commands after the container is created. + // "postCreateCommand": "pip3 install --user -r requirements.txt", + + // Configure tool-specific properties. + // "customizations": {}, + + // Uncomment to connect as root instead. More info: https://aka.ms/dev-containers-non-root. + // "remoteUser": "root" +} diff --git a/.github/workflows/python-publish.yml b/.github/workflows/python-publish.yml new file mode 100644 index 000000000..bdb99fa26 --- /dev/null +++ b/.github/workflows/python-publish.yml @@ -0,0 +1,35 @@ +name: Python Package + +on: + workflow_dispatch: + +jobs: + deploy: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + with: + fetch-depth: 0 # Required for setuptools_scm to determine version + + - name: Set up Python + uses: actions/setup-python@v4 + with: + python-version: '3.10' + + - name: Install dependencies + run: | + python -m pip install --upgrade pip + pip install build twine + + - name: Build package + run: | + cd python + python -m build --sdist + + - name: Publish package + uses: pypa/gh-action-pypi-publish@release/v1 + with: + user: __token__ + password: ${{ secrets.PYPI_API_TOKEN }} + packages-dir: python/dist/ + skip-existing: true \ No newline at end of file diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 105672b13..eaf45b9e2 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -67,3 +67,42 @@ jobs: with: name: meshoptimizer-npm path: js/meshoptimizer-*.tgz + + python: + strategy: + matrix: + os: [ubuntu, macos, windows] + runs-on: ${{matrix.os}}-latest + steps: + - uses: actions/checkout@v4 + with: + fetch-depth: 0 # Required for setuptools_scm to determine version + + - name: Set up Python + uses: actions/setup-python@v4 + with: + python-version: '3.10' + + - name: Install package + run: | + python -m pip install --upgrade pip + pip install build + cd python + pip install -e . + + - name: Run tests + run: | + cd python + python -m unittest discover -v + + # - name: Build package + # run: | + # python -m build --sdist + # if: matrix.os == 'ubuntu' + + # - name: Store package + # uses: actions/upload-artifact@v4 + # with: + # name: python-package + # path: python/dist/* + # if: matrix.os == 'ubuntu' \ No newline at end of file diff --git a/.gitignore b/.gitignore index 75ea27b87..1b384bab9 100644 --- a/.gitignore +++ b/.gitignore @@ -1,3 +1,5 @@ +python/bindings/module.cpp +python/src # IDE integrations /.idea/ /.vs/ @@ -11,3 +13,278 @@ # Test files /data/ + +# Byte-compiled / optimized / DLL files +__pycache__/ +*.py[cod] +*$py.class + +# C extensions +*.so + +# Distribution / packaging +.Python +build/ +develop-eggs/ +dist/ +downloads/ +eggs/ +.eggs/ +lib/ +lib64/ +parts/ +sdist/ +var/ +wheels/ +share/python-wheels/ +*.egg-info/ +.installed.cfg +*.egg +MANIFEST + +# PyInstaller +# Usually these files are written by a python script from a template +# before PyInstaller builds the exe, so as to inject date/other infos into it. +*.manifest +*.spec + +# Installer logs +pip-log.txt +pip-delete-this-directory.txt + +# Unit test / coverage reports +htmlcov/ +.tox/ +.nox/ +.coverage +.coverage.* +.cache +nosetests.xml +coverage.xml +*.cover +*.py,cover +.hypothesis/ +.pytest_cache/ +cover/ + +# Translations +*.mo +*.pot + +# Django stuff: +*.log +local_settings.py +db.sqlite3 +db.sqlite3-journal + +# Flask stuff: +instance/ +.webassets-cache + +# Scrapy stuff: +.scrapy + +# Sphinx documentation +docs/_build/ + +# PyBuilder +.pybuilder/ +target/ + +# Jupyter Notebook +.ipynb_checkpoints + +# IPython +profile_default/ +ipython_config.py + +# pyenv +# For a library or package, you might want to ignore these files since the code is +# intended to run in multiple environments; otherwise, check them in: +# .python-version + +# pipenv +# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. +# However, in case of collaboration, if having platform-specific dependencies or dependencies +# having no cross-platform support, pipenv may install dependencies that don't work, or not +# install all needed dependencies. +#Pipfile.lock + +# UV +# Similar to Pipfile.lock, it is generally recommended to include uv.lock in version control. +# This is especially recommended for binary packages to ensure reproducibility, and is more +# commonly ignored for libraries. +#uv.lock + +# poetry +# Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control. +# This is especially recommended for binary packages to ensure reproducibility, and is more +# commonly ignored for libraries. +# https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control +#poetry.lock + +# pdm +# Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control. +#pdm.lock +# pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it +# in version control. +# https://pdm.fming.dev/latest/usage/project/#working-with-version-control +.pdm.toml +.pdm-python +.pdm-build/ + +# PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm +__pypackages__/ + +# Celery stuff +celerybeat-schedule +celerybeat.pid + +# SageMath parsed files +*.sage.py + +# Environments +.env +.venv +env/ +venv/ +ENV/ +env.bak/ +venv.bak/ + +# Spyder project settings +.spyderproject +.spyproject + +# Rope project settings +.ropeproject + +# mkdocs documentation +/site + +# mypy +.mypy_cache/ +.dmypy.json +dmypy.json + +# Pyre type checker +.pyre/ + +# pytype static type analyzer +.pytype/ + +# Cython debug symbols +cython_debug/ + +# PyCharm +# JetBrains specific template is maintained in a separate JetBrains.gitignore that can +# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore +# and can be added to the global gitignore or merged into this file. For a more nuclear +# option (not recommended) you can uncomment the following to ignore the entire idea folder. +#.idea/ + +# Ruff stuff: +.ruff_cache/ + +# PyPI configuration file +.pypirc + +# Prerequisites +*.d + +# Compiled Object files +*.slo +*.lo +*.o +*.obj + +# Precompiled Headers +*.gch +*.pch + +# Compiled Dynamic libraries +*.so +*.dylib +*.dll + +# Fortran module files +*.mod +*.smod + +# Compiled Static libraries +*.lai +*.la +*.a +*.lib + +# Executables +*.exe +*.out +*.app + +# General +.DS_Store +.AppleDouble +.LSOverride + +# Icon must end with two \r +Icon + + +# Thumbnails +._* + +# Files that might appear in the root of a volume +.DocumentRevisions-V100 +.fseventsd +.Spotlight-V100 +.TemporaryItems +.Trashes +.VolumeIcon.icns +.com.apple.timemachine.donotpresent + +# Directories potentially created on remote AFP share +.AppleDB +.AppleDesktop +Network Trash Folder +Temporary Items +.apdisk + +*~ + +# temporary files which can be created if a process still has a handle open of a deleted file +.fuse_hidden* + +# KDE directory preferences +.directory + +# Linux trash folder which might appear on any partition or disk +.Trash-* + +# .nfs files are created when an open file is removed but is still being accessed +.nfs* + +# Windows thumbnail cache files +Thumbs.db +Thumbs.db:encryptable +ehthumbs.db +ehthumbs_vista.db + +# Dump file +*.stackdump + +# Folder config file +[Dd]esktop.ini + +# Recycle Bin used on file shares +$RECYCLE.BIN/ + +# Windows Installer files +*.cab +*.msi +*.msix +*.msm +*.msp + +# Windows shortcuts +*.lnk diff --git a/PYPI.md b/PYPI.md new file mode 100644 index 000000000..1a1f58a54 --- /dev/null +++ b/PYPI.md @@ -0,0 +1,55 @@ +# Publishing to PyPI + +This project includes GitHub Actions workflows to automatically build and publish the Python package to PyPI. + +## Automatic Publishing + +The Python package is automatically built and published to PyPI when a new GitHub release is created. This is handled by the `.github/workflows/python-publish.yml` workflow. + +### Requirements + +To publish to PyPI, you need to set up a PyPI API token: + +1. Create an account on [PyPI](https://pypi.org/) if you don't have one +2. Go to your account settings and create an API token with upload permissions for the meshoptimizer project +3. Add the token as a GitHub repository secret named `PYPI_API_TOKEN` + +### Creating a Release + +To trigger the publishing workflow: + +1. Go to the GitHub repository page +2. Click on "Releases" in the right sidebar +3. Click "Create a new release" +4. Enter a tag version (e.g., `v0.22.0`) +5. Enter a release title and description +6. Click "Publish release" + +The workflow will automatically build the Python package and upload it to PyPI. + +## Manual Building + +If you want to build the package manually: + +```bash +cd python +python -m pip install --upgrade pip +pip install build +python -m build +``` + +This will create distribution packages in the `python/dist/` directory. + +## Manual Publishing + +To manually publish to PyPI: + +```bash +cd python +python -m pip install --upgrade pip +pip install build twine +python -m build +python -m twine upload dist/* +``` + +You will be prompted for your PyPI username and password. \ No newline at end of file diff --git a/README.md b/README.md index bee53c330..eec3fd375 100644 --- a/README.md +++ b/README.md @@ -4,7 +4,7 @@ When a GPU renders triangle meshes, various stages of the GPU pipeline have to process vertex and index data. The efficiency of these stages depends on the data you feed to them; this library provides algorithms to help optimize meshes for these stages, as well as algorithms to reduce the mesh complexity and storage overhead. -The library provides a C and C++ interface for all algorithms; you can use it from C/C++ or from other languages via FFI (such as P/Invoke). If you want to use this library from Rust, you should use [meshopt crate](https://crates.io/crates/meshopt). JavaScript interface for some algorithms is available through [meshoptimizer.js](https://www.npmjs.com/package/meshoptimizer). +The library provides a C and C++ interface for all algorithms; you can use it from C/C++ or from other languages via FFI (such as P/Invoke). If you want to use this library from Rust, you should use [meshopt crate](https://crates.io/crates/meshopt). JavaScript interface for some algorithms is available through [meshoptimizer.js](https://www.npmjs.com/package/meshoptimizer). Python interface is available through [pip install meshoptimizer](https://pypi.org/project/meshoptimizer/). [gltfpack](./gltf/README.md), which is a tool that can automatically optimize glTF files, is developed and distributed alongside the library. diff --git a/python/.vscode/settings.json b/python/.vscode/settings.json new file mode 100644 index 000000000..e9e6a805e --- /dev/null +++ b/python/.vscode/settings.json @@ -0,0 +1,11 @@ +{ + "python.testing.unittestArgs": [ + "-v", + "-s", + "./tests", + "-p", + "test_*.py" + ], + "python.testing.pytestEnabled": false, + "python.testing.unittestEnabled": true +} \ No newline at end of file diff --git a/python/MANIFEST.in b/python/MANIFEST.in new file mode 100644 index 000000000..85ea249cd --- /dev/null +++ b/python/MANIFEST.in @@ -0,0 +1,8 @@ +include README.md +include pyproject.toml +include module.template.cpp +recursive-include src *.cpp *.h +# Include the parent src directory if building from repository +graft ../src +prune .vscode +prune tests \ No newline at end of file diff --git a/python/README.md b/python/README.md new file mode 100644 index 000000000..178e04c3b --- /dev/null +++ b/python/README.md @@ -0,0 +1,176 @@ +# Meshoptimizer Python + +Python bindings for the [meshoptimizer](https://github.com/zeux/meshoptimizer) library, which provides algorithms for optimizing 3D meshes for GPU rendering. + +## Installation + +```bash +pip install meshoptimizer +``` + +Or install from source: + +```bash +git clone https://github.com/zeux/meshoptimizer.git +cd meshoptimizer/python +pip install -e . +``` + +## Features + +- Vertex cache optimization +- Overdraw optimization +- Vertex fetch optimization +- Mesh simplification +- Vertex/index buffer compression and decompression +- Zip file storage for encoded meshes +- Numpy array compression and storage +- And more... + +## Usage + +### Low-level API + +The meshoptimizer Python bindings provide a low-level API that directly maps to the C++ functions. All functions accept numpy arrays and have optional parameters that are automatically derived when not provided. + +```python +import numpy as np +from meshoptimizer import ( + optimize_vertex_cache, + optimize_overdraw, + optimize_vertex_fetch, + simplify, + encode_vertex_buffer, + decode_vertex_buffer, + encode_index_buffer, + decode_index_buffer +) + +# Create a simple mesh (a cube) +vertices = np.array([ + # positions + [-0.5, -0.5, -0.5], + [0.5, -0.5, -0.5], + [0.5, 0.5, -0.5], + [-0.5, 0.5, -0.5], + [-0.5, -0.5, 0.5], + [0.5, -0.5, 0.5], + [0.5, 0.5, 0.5], + [-0.5, 0.5, 0.5] +], dtype=np.float32) + +indices = np.array([ + 0, 1, 2, 2, 3, 0, # front + 1, 5, 6, 6, 2, 1, # right + 5, 4, 7, 7, 6, 5, # back + 4, 0, 3, 3, 7, 4, # left + 3, 2, 6, 6, 7, 3, # top + 4, 5, 1, 1, 0, 4 # bottom +], dtype=np.uint32) + +# Optimize vertex cache +optimized_indices = np.zeros_like(indices) +optimize_vertex_cache(optimized_indices, indices) # vertex_count is automatically derived + +# Optimize overdraw +optimized_indices2 = np.zeros_like(indices) +optimize_overdraw( + optimized_indices2, + optimized_indices, + vertices +) # index_count, vertex_count, and vertex_positions_stride are automatically derived + +# Optimize vertex fetch +optimized_vertices = np.zeros_like(vertices) +unique_vertex_count = optimize_vertex_fetch( + optimized_vertices, + optimized_indices2, + vertices +) # index_count, vertex_count, and vertex_size are automatically derived + +print(f"Optimized mesh has {unique_vertex_count} unique vertices") + +# Simplify the mesh +simplified_indices = np.zeros(len(indices), dtype=np.uint32) +target_index_count = len(indices) // 2 # Keep 50% of triangles + +simplified_index_count = simplify( + simplified_indices, + optimized_indices2, + vertices, + target_index_count=target_index_count +) # index_count, vertex_count, and vertex_positions_stride are automatically derived + +print(f"Simplified mesh has {simplified_index_count} indices") + +# Encode the mesh for efficient transmission +encoded_vertices = encode_vertex_buffer(optimized_vertices[:unique_vertex_count]) +encoded_indices = encode_index_buffer(simplified_indices[:simplified_index_count]) + +print(f"Encoded vertex buffer size: {len(encoded_vertices)} bytes") +print(f"Encoded index buffer size: {len(encoded_indices)} bytes") + +# Decode the mesh +decoded_vertices = decode_vertex_buffer( + unique_vertex_count, + vertices.itemsize * vertices.shape[1], + encoded_vertices +) + +decoded_indices = decode_index_buffer( + simplified_index_count, + 4, # 4 bytes per index (uint32) + encoded_indices +) + +# Verify the decoded data +print(f"Decoded vertices shape: {decoded_vertices.shape}") +print(f"Decoded indices shape: {decoded_indices.shape}") +``` + +## Notes on Index Encoding/Decoding + +When using the index buffer encoding and decoding functions, note that the decoded indices may not exactly match the original indices, even though the mesh geometry remains the same. This is due to how the meshoptimizer library internally encodes and decodes the indices. The triangles may be in a different order, but the resulting mesh is still valid and represents the same geometry. + +## API Reference + +### Vertex Remapping + +- `generate_vertex_remap(destination, indices=None, index_count=None, vertices=None, vertex_count=None, vertex_size=None)`: Generate vertex remap table. +- `remap_vertex_buffer(destination, vertices, vertex_count=None, vertex_size=None, remap=None)`: Remap vertex buffer. +- `remap_index_buffer(destination, indices, index_count=None, remap=None)`: Remap index buffer. + +### Optimization + +- `optimize_vertex_cache(destination, indices, index_count=None, vertex_count=None)`: Optimize vertex cache. +- `optimize_vertex_cache_strip(destination, indices, index_count=None, vertex_count=None)`: Optimize vertex cache for strip-like caches. +- `optimize_vertex_cache_fifo(destination, indices, index_count=None, vertex_count=None, cache_size=16)`: Optimize vertex cache for FIFO caches. +- `optimize_overdraw(destination, indices, vertex_positions, index_count=None, vertex_count=None, vertex_positions_stride=None, threshold=1.05)`: Optimize overdraw. +- `optimize_vertex_fetch(destination_vertices, indices, source_vertices, index_count=None, vertex_count=None, vertex_size=None)`: Optimize vertex fetch. +- `optimize_vertex_fetch_remap(destination, indices, index_count=None, vertex_count=None)`: Generate vertex remap to optimize vertex fetch. + +### Simplification + +- `simplify(destination, indices, vertex_positions, index_count=None, vertex_count=None, vertex_positions_stride=None, target_index_count=None, target_error=0.01, options=0, result_error=None)`: Simplify mesh. +- `simplify_with_attributes(destination, indices, vertex_positions, vertex_attributes, attribute_weights, index_count=None, vertex_count=None, vertex_positions_stride=None, vertex_attributes_stride=None, attribute_count=None, vertex_lock=None, target_index_count=None, target_error=0.01, options=0, result_error=None)`: Simplify mesh with attribute metric. +- `simplify_sloppy(destination, indices, vertex_positions, index_count=None, vertex_count=None, vertex_positions_stride=None, target_index_count=None, target_error=0.01, result_error=None)`: Simplify mesh (sloppy). +- `simplify_points(destination, vertex_positions, vertex_colors=None, vertex_count=None, vertex_positions_stride=None, vertex_colors_stride=None, color_weight=1.0, target_vertex_count=None)`: Simplify point cloud. +- `simplify_scale(vertex_positions, vertex_count=None, vertex_positions_stride=None)`: Get the scale factor for simplification error. + +### Encoding/Decoding + +- `encode_vertex_buffer(vertices, vertex_count=None, vertex_size=None)`: Encode vertex buffer. +- `encode_index_buffer(indices, index_count=None, vertex_count=None)`: Encode index buffer. +- `encode_vertex_version(version)`: Set vertex encoder format version. +- `encode_index_version(version)`: Set index encoder format version. +- `decode_vertex_buffer(vertex_count, vertex_size, buffer)`: Decode vertex buffer. +- `decode_index_buffer(index_count, index_size, buffer)`: Decode index buffer. +- `decode_vertex_version(buffer)`: Get encoded vertex format version. +- `decode_index_version(buffer)`: Get encoded index format version. +- `decode_filter_oct(buffer, count, stride)`: Apply octahedral filter to decoded data. +- `decode_filter_quat(buffer, count, stride)`: Apply quaternion filter to decoded data. +- `decode_filter_exp(buffer, count, stride)`: Apply exponential filter to decoded data. + +## License + +MIT License \ No newline at end of file diff --git a/python/meshoptimizer/__init__.py b/python/meshoptimizer/__init__.py new file mode 100644 index 000000000..bc706fb68 --- /dev/null +++ b/python/meshoptimizer/__init__.py @@ -0,0 +1,55 @@ +""" +Python wrapper for the meshoptimizer library. + +This package provides Python bindings for the meshoptimizer C++ library, +which offers various algorithms for optimizing 3D meshes for GPU rendering. +It also provides utilities for compressing and storing numpy arrays. + +High-level functionality is available in the 'export' submodule. +""" + +from .encoder import ( + encode_vertex_buffer, + encode_index_buffer, + encode_index_sequence, + encode_vertex_version, + encode_index_version, +) + +from .decoder import ( + decode_vertex_buffer, + decode_index_buffer, + decode_index_sequence, + decode_vertex_version, + decode_index_version, + decode_filter_oct, + decode_filter_quat, + decode_filter_exp, +) + +from .optimizer import ( + optimize_vertex_cache, + optimize_vertex_cache_strip, + optimize_vertex_cache_fifo, + optimize_overdraw, + optimize_vertex_fetch, + optimize_vertex_fetch_remap, +) + +from .simplifier import ( + simplify, + simplify_with_attributes, + simplify_sloppy, + simplify_points, + simplify_scale, + SIMPLIFY_LOCK_BORDER, + SIMPLIFY_SPARSE, + SIMPLIFY_ERROR_ABSOLUTE, + SIMPLIFY_PRUNE, +) + +from .utils import ( + generate_vertex_remap, + remap_vertex_buffer, + remap_index_buffer, +) diff --git a/python/meshoptimizer/_loader.py b/python/meshoptimizer/_loader.py new file mode 100644 index 000000000..90c7242d5 --- /dev/null +++ b/python/meshoptimizer/_loader.py @@ -0,0 +1,250 @@ +""" +Library loader for meshoptimizer. +""" +import ctypes +import os +import sys +import platform +import glob +from typing import Optional, List, Any +import numpy as np + +def find_library() -> str: + """Find the meshoptimizer shared library.""" + # Get the directory of this file + this_dir = os.path.dirname(os.path.abspath(__file__)) + + # Look for any _meshoptimizer*.so or _meshoptimizer*.pyd file + if platform.system() == 'Windows': + pattern = os.path.join(this_dir, '_meshoptimizer*.pyd') + else: + pattern = os.path.join(this_dir, '_meshoptimizer*.so') + + lib_files = glob.glob(pattern) + + if lib_files: + return lib_files[0] + + raise ImportError(f"Could not find meshoptimizer library in {this_dir}") + +# Load the library +try: + lib_path = find_library() + lib = ctypes.CDLL(lib_path) +except ImportError as e: + # If the library is not found, provide a helpful error message + print(f"Error loading meshoptimizer library: {e}") + print("Make sure the library is properly installed.") + raise + +# Define function signatures +def setup_function_signatures() -> None: + """Set up the function signatures for the library.""" + # Vertex remap functions + lib.meshopt_generateVertexRemap.argtypes = [ + ctypes.POINTER(ctypes.c_uint), # destination + ctypes.POINTER(ctypes.c_uint), # indices + ctypes.c_size_t, # index_count + ctypes.c_void_p, # vertices + ctypes.c_size_t, # vertex_count + ctypes.c_size_t # vertex_size + ] + lib.meshopt_generateVertexRemap.restype = ctypes.c_size_t + + lib.meshopt_remapVertexBuffer.argtypes = [ + ctypes.c_void_p, # destination + ctypes.c_void_p, # vertices + ctypes.c_size_t, # vertex_count + ctypes.c_size_t, # vertex_size + ctypes.POINTER(ctypes.c_uint) # remap + ] + lib.meshopt_remapVertexBuffer.restype = None + + lib.meshopt_remapIndexBuffer.argtypes = [ + ctypes.POINTER(ctypes.c_uint), # destination + ctypes.POINTER(ctypes.c_uint), # indices + ctypes.c_size_t, # index_count + ctypes.POINTER(ctypes.c_uint) # remap + ] + lib.meshopt_remapIndexBuffer.restype = None + + # Vertex cache optimization + lib.meshopt_optimizeVertexCache.argtypes = [ + ctypes.POINTER(ctypes.c_uint), # destination + ctypes.POINTER(ctypes.c_uint), # indices + ctypes.c_size_t, # index_count + ctypes.c_size_t # vertex_count + ] + lib.meshopt_optimizeVertexCache.restype = None + + # Overdraw optimization + lib.meshopt_optimizeOverdraw.argtypes = [ + ctypes.POINTER(ctypes.c_uint), # destination + ctypes.POINTER(ctypes.c_uint), # indices + ctypes.c_size_t, # index_count + ctypes.POINTER(ctypes.c_float), # vertex_positions + ctypes.c_size_t, # vertex_count + ctypes.c_size_t, # vertex_positions_stride + ctypes.c_float # threshold + ] + lib.meshopt_optimizeOverdraw.restype = None + + # Vertex fetch optimization + lib.meshopt_optimizeVertexFetch.argtypes = [ + ctypes.c_void_p, # destination + ctypes.POINTER(ctypes.c_uint), # indices + ctypes.c_size_t, # index_count + ctypes.c_void_p, # vertices + ctypes.c_size_t, # vertex_count + ctypes.c_size_t # vertex_size + ] + lib.meshopt_optimizeVertexFetch.restype = ctypes.c_size_t + + # Simplification + lib.meshopt_simplify.argtypes = [ + ctypes.POINTER(ctypes.c_uint), # destination + ctypes.POINTER(ctypes.c_uint), # indices + ctypes.c_size_t, # index_count + ctypes.POINTER(ctypes.c_float), # vertex_positions + ctypes.c_size_t, # vertex_count + ctypes.c_size_t, # vertex_positions_stride + ctypes.c_size_t, # target_index_count + ctypes.c_float, # target_error + ctypes.c_uint, # options + ctypes.POINTER(ctypes.c_float) # result_error + ] + lib.meshopt_simplify.restype = ctypes.c_size_t + + # Simplification scale + lib.meshopt_simplifyScale.argtypes = [ + ctypes.POINTER(ctypes.c_float), # vertex_positions + ctypes.c_size_t, # vertex_count + ctypes.c_size_t # vertex_positions_stride + ] + lib.meshopt_simplifyScale.restype = ctypes.c_float # Return type is float + + # Encoding + lib.meshopt_encodeVertexBufferBound.argtypes = [ + ctypes.c_size_t, # vertex_count + ctypes.c_size_t # vertex_size + ] + lib.meshopt_encodeVertexBufferBound.restype = ctypes.c_size_t + + lib.meshopt_encodeVertexBuffer.argtypes = [ + ctypes.POINTER(ctypes.c_ubyte), # buffer + ctypes.c_size_t, # buffer_size + ctypes.c_void_p, # vertices + ctypes.c_size_t, # vertex_count + ctypes.c_size_t # vertex_size + ] + lib.meshopt_encodeVertexBuffer.restype = ctypes.c_size_t + + lib.meshopt_encodeIndexBufferBound.argtypes = [ + ctypes.c_size_t, # index_count + ctypes.c_size_t # vertex_count + ] + lib.meshopt_encodeIndexBufferBound.restype = ctypes.c_size_t + + lib.meshopt_encodeIndexBuffer.argtypes = [ + ctypes.POINTER(ctypes.c_ubyte), # buffer + ctypes.c_size_t, # buffer_size + ctypes.POINTER(ctypes.c_uint), # indices + ctypes.c_size_t # index_count + ] + lib.meshopt_encodeIndexBuffer.restype = ctypes.c_size_t + + lib.meshopt_encodeIndexSequenceBound.argtypes = [ + ctypes.c_size_t, # index_count + ctypes.c_size_t # vertex_count + ] + lib.meshopt_encodeIndexSequenceBound.restype = ctypes.c_size_t + + lib.meshopt_encodeIndexSequence.argtypes = [ + ctypes.POINTER(ctypes.c_ubyte), # buffer + ctypes.c_size_t, # buffer_size + ctypes.POINTER(ctypes.c_uint), # indices + ctypes.c_size_t # index_count + ] + lib.meshopt_encodeIndexSequence.restype = ctypes.c_size_t + + # Decoding + lib.meshopt_decodeVertexBuffer.argtypes = [ + ctypes.c_void_p, # destination + ctypes.c_size_t, # vertex_count + ctypes.c_size_t, # vertex_size + ctypes.POINTER(ctypes.c_ubyte), # buffer + ctypes.c_size_t # buffer_size + ] + lib.meshopt_decodeVertexBuffer.restype = ctypes.c_int + + lib.meshopt_decodeIndexBuffer.argtypes = [ + ctypes.c_void_p, # destination + ctypes.c_size_t, # index_count + ctypes.c_size_t, # index_size + ctypes.POINTER(ctypes.c_ubyte), # buffer + ctypes.c_size_t # buffer_size + ] + lib.meshopt_decodeIndexBuffer.restype = ctypes.c_int + + lib.meshopt_decodeIndexSequence.argtypes = [ + ctypes.c_void_p, # destination + ctypes.c_size_t, # index_count + ctypes.c_size_t, # index_size + ctypes.POINTER(ctypes.c_ubyte), # buffer + ctypes.c_size_t # buffer_size + ] + lib.meshopt_decodeIndexSequence.restype = ctypes.c_int + + # Encoding/Decoding versions + lib.meshopt_encodeVertexVersion.argtypes = [ctypes.c_int] + lib.meshopt_encodeVertexVersion.restype = None + + lib.meshopt_encodeIndexVersion.argtypes = [ctypes.c_int] + lib.meshopt_encodeIndexVersion.restype = None + + lib.meshopt_decodeVertexVersion.argtypes = [ + ctypes.POINTER(ctypes.c_ubyte), # buffer + ctypes.c_size_t # buffer_size + ] + lib.meshopt_decodeVertexVersion.restype = ctypes.c_int + + lib.meshopt_decodeIndexVersion.argtypes = [ + ctypes.POINTER(ctypes.c_ubyte), # buffer + ctypes.c_size_t # buffer_size + ] + lib.meshopt_decodeIndexVersion.restype = ctypes.c_int + + # Simplify sloppy + lib.meshopt_simplifySloppy.argtypes = [ + ctypes.POINTER(ctypes.c_uint), # destination + ctypes.POINTER(ctypes.c_uint), # indices + ctypes.c_size_t, # index_count + ctypes.POINTER(ctypes.c_float), # vertex_positions + ctypes.c_size_t, # vertex_count + ctypes.c_size_t, # vertex_positions_stride + ctypes.c_size_t, # target_index_count + ctypes.c_float, # target_error + ctypes.POINTER(ctypes.c_float) # result_error + ] + lib.meshopt_simplifySloppy.restype = ctypes.c_size_t + + # Simplify points + lib.meshopt_simplifyPoints.argtypes = [ + ctypes.POINTER(ctypes.c_uint), # destination + ctypes.POINTER(ctypes.c_float), # vertex_positions + ctypes.c_size_t, # vertex_count + ctypes.c_size_t, # vertex_positions_stride + ctypes.POINTER(ctypes.c_float), # vertex_colors + ctypes.c_size_t, # vertex_colors_stride + ctypes.c_float, # color_weight + ctypes.c_size_t # target_vertex_count + ] + lib.meshopt_simplifyPoints.restype = ctypes.c_size_t + +# Set up function signatures +try: + setup_function_signatures() +except AttributeError as e: + print(f"Error setting up function signatures: {e}") + print("The library might be missing some expected functions.") + raise \ No newline at end of file diff --git a/python/meshoptimizer/decoder.py b/python/meshoptimizer/decoder.py new file mode 100644 index 000000000..7bcfbdf4f --- /dev/null +++ b/python/meshoptimizer/decoder.py @@ -0,0 +1,221 @@ +""" +Decoder functions for meshoptimizer. +""" +import ctypes +from typing import Union +import numpy as np +from ._loader import lib + +def decode_vertex_buffer(vertex_count: int, + vertex_size: int, + buffer: Union[bytes, np.ndarray]) -> np.ndarray: + """ + Decode vertex buffer data. + + Args: + vertex_count: number of vertices + vertex_size: size of each vertex in bytes + buffer: encoded buffer as bytes + + Returns: + Numpy array containing the decoded vertex data + """ + # Convert buffer to numpy array if it's not already + buffer_array = np.frombuffer(buffer, dtype=np.uint8) + + # Create destination array + # Calculate the number of float32 elements needed + float_count = vertex_count * vertex_size // 4 + destination = np.zeros(float_count, dtype=np.float32) + + # Call C function + result = lib.meshopt_decodeVertexBuffer( + destination.ctypes.data_as(ctypes.c_void_p), + vertex_count, + vertex_size, + buffer_array.ctypes.data_as(ctypes.POINTER(ctypes.c_ubyte)), + len(buffer_array) + ) + + if result != 0: + raise RuntimeError(f"Failed to decode vertex buffer: error code {result}") + + # Reshape the array if vertex_size indicates multiple components per vertex + components_per_vertex = vertex_size // 4 # Assuming float32 (4 bytes) components + if components_per_vertex > 1: + destination = destination.reshape(vertex_count, components_per_vertex) + + return destination + +def decode_index_buffer(index_count: int, + index_size: int, + buffer: Union[bytes, np.ndarray]) -> np.ndarray: + """ + Decode index buffer data. + + Args: + index_count: number of indices + index_size: size of each index in bytes (2 or 4) + buffer: encoded buffer as bytes + + Returns: + Numpy array containing the decoded index data + """ + # Convert buffer to numpy array if it's not already + buffer_array = np.frombuffer(buffer, dtype=np.uint8) + + # Create destination array + destination = np.zeros(index_count, dtype=np.uint32) + + # Call C function + result = lib.meshopt_decodeIndexBuffer( + destination.ctypes.data_as(ctypes.c_void_p), + index_count, + index_size, + buffer_array.ctypes.data_as(ctypes.POINTER(ctypes.c_ubyte)), + len(buffer_array) + ) + + if result != 0: + raise RuntimeError(f"Failed to decode index buffer: error code {result}") + + return destination + +def decode_vertex_version(buffer: Union[bytes, np.ndarray]) -> int: + """ + Get encoded vertex format version. + + Args: + buffer: encoded buffer as bytes + + Returns: + Format version of the encoded vertex buffer, or -1 if the buffer header is invalid + """ + # Convert buffer to numpy array if it's not already + buffer_array = np.frombuffer(buffer, dtype=np.uint8) + + return lib.meshopt_decodeVertexVersion( + buffer_array.ctypes.data_as(ctypes.POINTER(ctypes.c_ubyte)), + len(buffer_array) + ) + +def decode_index_version(buffer: Union[bytes, np.ndarray]) -> int: + """ + Get encoded index format version. + + Args: + buffer: encoded buffer as bytes + + Returns: + Format version of the encoded index buffer, or -1 if the buffer header is invalid + """ + # Convert buffer to numpy array if it's not already + buffer_array = np.frombuffer(buffer, dtype=np.uint8) + + return lib.meshopt_decodeIndexVersion( + buffer_array.ctypes.data_as(ctypes.POINTER(ctypes.c_ubyte)), + len(buffer_array) + ) + +def decode_index_sequence(index_count: int, + index_size: int, + buffer: Union[bytes, np.ndarray]) -> np.ndarray: + """ + Decode index sequence data. + + Args: + index_count: number of indices + index_size: size of each index in bytes (2 or 4) + buffer: encoded buffer as bytes + + Returns: + Numpy array containing the decoded index data + """ + # Convert buffer to numpy array if it's not already + buffer_array = np.frombuffer(buffer, dtype=np.uint8) + + # Create destination array + destination = np.zeros(index_count, dtype=np.uint32) + + # Call C function + result = lib.meshopt_decodeIndexSequence( + destination.ctypes.data_as(ctypes.c_void_p), + index_count, + index_size, + buffer_array.ctypes.data_as(ctypes.POINTER(ctypes.c_ubyte)), + len(buffer_array) + ) + + if result != 0: + raise RuntimeError(f"Failed to decode index sequence: error code {result}") + + return destination + +def decode_filter_oct(buffer: np.ndarray, count: int, stride: int) -> np.ndarray: + """ + Apply octahedral filter to decoded data. + + Args: + buffer: numpy array of decoded data + count: number of elements + stride: stride between elements in bytes + + Returns: + Numpy array with the filter applied (a copy of the input buffer) + """ + # Create a copy of the buffer to avoid modifying the original + result_buffer = buffer.copy() + + lib.meshopt_decodeFilterOct( + result_buffer.ctypes.data_as(ctypes.c_void_p), + count, + stride + ) + + return result_buffer + +def decode_filter_quat(buffer: np.ndarray, count: int, stride: int) -> np.ndarray: + """ + Apply quaternion filter to decoded data. + + Args: + buffer: numpy array of decoded data + count: number of elements + stride: stride between elements in bytes + + Returns: + Numpy array with the filter applied (a copy of the input buffer) + """ + # Create a copy of the buffer to avoid modifying the original + result_buffer = buffer.copy() + + lib.meshopt_decodeFilterQuat( + result_buffer.ctypes.data_as(ctypes.c_void_p), + count, + stride + ) + + return result_buffer + +def decode_filter_exp(buffer: np.ndarray, count: int, stride: int) -> np.ndarray: + """ + Apply exponential filter to decoded data. + + Args: + buffer: numpy array of decoded data + count: number of elements + stride: stride between elements in bytes + + Returns: + Numpy array with the filter applied (a copy of the input buffer) + """ + # Create a copy of the buffer to avoid modifying the original + result_buffer = buffer.copy() + + lib.meshopt_decodeFilterExp( + result_buffer.ctypes.data_as(ctypes.c_void_p), + count, + stride + ) + + return result_buffer \ No newline at end of file diff --git a/python/meshoptimizer/encoder.py b/python/meshoptimizer/encoder.py new file mode 100644 index 000000000..cafc5b238 --- /dev/null +++ b/python/meshoptimizer/encoder.py @@ -0,0 +1,166 @@ +""" +Encoder functions for meshoptimizer. +""" +import ctypes +from typing import Optional +import numpy as np +from ._loader import lib + +def encode_vertex_buffer(vertices: np.ndarray, + vertex_count: Optional[int] = None, + vertex_size: Optional[int] = None) -> bytes: + """ + Encode vertex buffer data. + + Args: + vertices: numpy array of vertex data + vertex_count: number of vertices (optional, derived from vertices if not provided) + vertex_size: size of each vertex in bytes (optional, derived from vertices if not provided) + + Returns: + Encoded buffer as bytes + """ + # Convert vertices to numpy array if it's not already + vertices = np.asarray(vertices, dtype=np.float32) + + # Derive vertex_count and vertex_size if not provided + if vertex_count is None: + vertex_count = len(vertices) + + if vertex_size is None: + vertex_size = vertices.itemsize * vertices.shape[1] if len(vertices.shape) > 1 else vertices.itemsize + + # Calculate buffer size + bound = lib.meshopt_encodeVertexBufferBound(vertex_count, vertex_size) + + # Allocate buffer + buffer = np.zeros(bound, dtype=np.uint8) + + # Call C function + result_size = lib.meshopt_encodeVertexBuffer( + buffer.ctypes.data_as(ctypes.POINTER(ctypes.c_ubyte)), + bound, + vertices.ctypes.data_as(ctypes.c_void_p), + vertex_count, + vertex_size + ) + + if result_size == 0: + raise RuntimeError("Failed to encode vertex buffer") + + # Return only the used portion of the buffer + return bytes(buffer[:result_size]) + +def encode_index_buffer(indices: np.ndarray, + index_count: Optional[int] = None, + vertex_count: Optional[int] = None) -> bytes: + """ + Encode index buffer data. + + Args: + indices: numpy array of index data + index_count: number of indices (optional, derived from indices if not provided) + vertex_count: number of vertices (optional, derived from indices if not provided) + + Returns: + Encoded buffer as bytes + """ + # Convert indices to numpy array if it's not already + indices = np.asarray(indices, dtype=np.uint32) + + # Derive index_count if not provided + if index_count is None: + index_count = len(indices) + + # Derive vertex_count if not provided + if vertex_count is None: + vertex_count = np.max(indices) + 1 + + # Calculate buffer size + bound = lib.meshopt_encodeIndexBufferBound(index_count, vertex_count) + + # Allocate buffer + buffer = np.zeros(bound, dtype=np.uint8) + + # Call C function + result_size = lib.meshopt_encodeIndexBuffer( + buffer.ctypes.data_as(ctypes.POINTER(ctypes.c_ubyte)), + bound, + indices.ctypes.data_as(ctypes.POINTER(ctypes.c_uint)), + index_count + ) + + if result_size == 0: + raise RuntimeError("Failed to encode index buffer") + + # Return only the used portion of the buffer + return bytes(buffer[:result_size]) + +def encode_vertex_version(version: int) -> None: + """ + Set vertex encoder format version. + + Args: + version: version number (0 or 1) + """ + if version not in (0, 1): + raise ValueError("Version must be 0 or 1") + + lib.meshopt_encodeVertexVersion(version) + +def encode_index_version(version: int) -> None: + """ + Set index encoder format version. + + Args: + version: version number (0 or 1) + """ + if version not in (0, 1): + raise ValueError("Version must be 0 or 1") + + lib.meshopt_encodeIndexVersion(version) + +def encode_index_sequence(indices: np.ndarray, + index_count: Optional[int] = None, + vertex_count: Optional[int] = None) -> bytes: + """ + Encode index sequence data. + + Args: + indices: numpy array of index data + index_count: number of indices (optional, derived from indices if not provided) + vertex_count: number of vertices (optional, derived from indices if not provided) + + Returns: + Encoded buffer as bytes + """ + # Convert indices to numpy array if it's not already + indices = np.asarray(indices, dtype=np.uint32) + + # Derive index_count if not provided + if index_count is None: + index_count = len(indices) + + # Derive vertex_count if not provided + if vertex_count is None: + vertex_count = np.max(indices) + 1 + + # Calculate buffer size + bound = lib.meshopt_encodeIndexSequenceBound(index_count, vertex_count) + + # Allocate buffer + buffer = np.zeros(bound, dtype=np.uint8) + + # Call C function + result_size = lib.meshopt_encodeIndexSequence( + buffer.ctypes.data_as(ctypes.POINTER(ctypes.c_ubyte)), + bound, + indices.ctypes.data_as(ctypes.POINTER(ctypes.c_uint)), + index_count + ) + + if result_size == 0: + raise RuntimeError("Failed to encode index sequence") + + # Return only the used portion of the buffer + return bytes(buffer[:result_size]) \ No newline at end of file diff --git a/python/meshoptimizer/optimizer.py b/python/meshoptimizer/optimizer.py new file mode 100644 index 000000000..be241c9fb --- /dev/null +++ b/python/meshoptimizer/optimizer.py @@ -0,0 +1,247 @@ +""" +Optimization functions for meshoptimizer. +""" +import ctypes +from typing import Optional, Union, Tuple +import numpy as np +from ._loader import lib + +def optimize_vertex_cache(destination: np.ndarray, indices: np.ndarray, + index_count: Optional[int] = None, + vertex_count: Optional[int] = None) -> None: + """ + Optimize vertex cache. + + Args: + destination: numpy array to store the optimized indices + indices: numpy array of index data + index_count: number of indices (optional, derived from indices if not provided) + vertex_count: number of vertices (optional, derived from indices if not provided) + + Returns: + None (destination is modified in-place) + """ + # Convert indices to numpy array if it's not already + indices = np.asarray(indices, dtype=np.uint32) + + # Derive index_count if not provided + if index_count is None: + index_count = len(indices) + + # Derive vertex_count if not provided + if vertex_count is None: + vertex_count = np.max(indices) + 1 + + # Call C function + lib.meshopt_optimizeVertexCache( + destination.ctypes.data_as(ctypes.POINTER(ctypes.c_uint)), + indices.ctypes.data_as(ctypes.POINTER(ctypes.c_uint)), + index_count, + vertex_count + ) + +def optimize_vertex_cache_strip(destination: np.ndarray, indices: np.ndarray, + index_count: Optional[int] = None, + vertex_count: Optional[int] = None) -> None: + """ + Optimize vertex cache for strip-like caches. + + Args: + destination: numpy array to store the optimized indices + indices: numpy array of index data + index_count: number of indices (optional, derived from indices if not provided) + vertex_count: number of vertices (optional, derived from indices if not provided) + + Returns: + None (destination is modified in-place) + """ + # Convert indices to numpy array if it's not already + indices = np.asarray(indices, dtype=np.uint32) + + # Derive index_count if not provided + if index_count is None: + index_count = len(indices) + + # Derive vertex_count if not provided + if vertex_count is None: + vertex_count = np.max(indices) + 1 + + # Call C function + lib.meshopt_optimizeVertexCacheStrip( + destination.ctypes.data_as(ctypes.POINTER(ctypes.c_uint)), + indices.ctypes.data_as(ctypes.POINTER(ctypes.c_uint)), + index_count, + vertex_count + ) + +def optimize_vertex_cache_fifo(destination: np.ndarray, indices: np.ndarray, + index_count: Optional[int] = None, + vertex_count: Optional[int] = None, + cache_size: int = 16) -> None: + """ + Optimize vertex cache for FIFO caches. + + Args: + destination: numpy array to store the optimized indices + indices: numpy array of index data + index_count: number of indices (optional, derived from indices if not provided) + vertex_count: number of vertices (optional, derived from indices if not provided) + cache_size: size of the cache (default: 16) + + Returns: + None (destination is modified in-place) + """ + # Convert indices to numpy array if it's not already + indices = np.asarray(indices, dtype=np.uint32) + + # Derive index_count if not provided + if index_count is None: + index_count = len(indices) + + # Derive vertex_count if not provided + if vertex_count is None: + vertex_count = np.max(indices) + 1 + + # Call C function + lib.meshopt_optimizeVertexCacheFifo( + destination.ctypes.data_as(ctypes.POINTER(ctypes.c_uint)), + indices.ctypes.data_as(ctypes.POINTER(ctypes.c_uint)), + index_count, + vertex_count, + cache_size + ) + +def optimize_overdraw(destination: np.ndarray, indices: np.ndarray, + vertex_positions: np.ndarray, + index_count: Optional[int] = None, + vertex_count: Optional[int] = None, + vertex_positions_stride: Optional[int] = None, + threshold: float = 1.05) -> None: + """ + Optimize overdraw. + + Args: + destination: numpy array to store the optimized indices + indices: numpy array of index data + vertex_positions: numpy array of vertex position data + index_count: number of indices (optional, derived from indices if not provided) + vertex_count: number of vertices (optional, derived from vertex_positions if not provided) + vertex_positions_stride: stride of vertex positions in bytes (optional, derived from vertex_positions if not provided) + threshold: threshold for optimization (default: 1.05) + + Returns: + None (destination is modified in-place) + """ + # Convert indices to numpy array if it's not already + indices = np.asarray(indices, dtype=np.uint32) + + # Convert vertex_positions to numpy array if it's not already + vertex_positions = np.asarray(vertex_positions, dtype=np.float32) + + # Derive index_count if not provided + if index_count is None: + index_count = len(indices) + + # Derive vertex_count if not provided + if vertex_count is None: + vertex_count = len(vertex_positions) + + # Derive vertex_positions_stride if not provided + if vertex_positions_stride is None: + vertex_positions_stride = vertex_positions.itemsize * vertex_positions.shape[1] if len(vertex_positions.shape) > 1 else vertex_positions.itemsize + + # Call C function + lib.meshopt_optimizeOverdraw( + destination.ctypes.data_as(ctypes.POINTER(ctypes.c_uint)), + indices.ctypes.data_as(ctypes.POINTER(ctypes.c_uint)), + index_count, + vertex_positions.ctypes.data_as(ctypes.POINTER(ctypes.c_float)), + vertex_count, + vertex_positions_stride, + threshold + ) + +def optimize_vertex_fetch(destination_vertices: np.ndarray, indices: np.ndarray, + source_vertices: np.ndarray, + index_count: Optional[int] = None, + vertex_count: Optional[int] = None, + vertex_size: Optional[int] = None) -> int: + """ + Optimize vertex fetch. + + Args: + destination_vertices: numpy array to store the optimized vertices + indices: numpy array of index data + source_vertices: numpy array of vertex data + index_count: number of indices (optional, derived from indices if not provided) + vertex_count: number of vertices (optional, derived from source_vertices if not provided) + vertex_size: size of each vertex in bytes (optional, derived from source_vertices if not provided) + + Returns: + Number of unique vertices + """ + # Convert indices to numpy array if it's not already + indices = np.asarray(indices, dtype=np.uint32) + + # Convert source_vertices to numpy array if it's not already + source_vertices = np.asarray(source_vertices) + + # Derive index_count if not provided + if index_count is None: + index_count = len(indices) + + # Derive vertex_count if not provided + if vertex_count is None: + vertex_count = len(source_vertices) + + # Derive vertex_size if not provided + if vertex_size is None: + vertex_size = source_vertices.itemsize * source_vertices.shape[1] if len(source_vertices.shape) > 1 else source_vertices.itemsize + + # Call C function + result = lib.meshopt_optimizeVertexFetch( + destination_vertices.ctypes.data_as(ctypes.c_void_p), + indices.ctypes.data_as(ctypes.POINTER(ctypes.c_uint)), + index_count, + source_vertices.ctypes.data_as(ctypes.c_void_p), + vertex_count, + vertex_size + ) + + return result + +def optimize_vertex_fetch_remap(destination: np.ndarray, indices: np.ndarray, + index_count: Optional[int] = None, + vertex_count: Optional[int] = None) -> int: + """ + Generate vertex remap to optimize vertex fetch. + + Args: + destination: numpy array to store the remap table + indices: numpy array of index data + index_count: number of indices (optional, derived from indices if not provided) + vertex_count: number of vertices (optional, derived from indices if not provided) + + Returns: + Number of unique vertices + """ + # Convert indices to numpy array if it's not already + indices = np.asarray(indices, dtype=np.uint32) + + # Derive index_count if not provided + if index_count is None: + index_count = len(indices) + + # Derive vertex_count if not provided + if vertex_count is None: + vertex_count = np.max(indices) + 1 + + # Call C function + result = lib.meshopt_optimizeVertexFetchRemap( + destination.ctypes.data_as(ctypes.POINTER(ctypes.c_uint)), + indices.ctypes.data_as(ctypes.POINTER(ctypes.c_uint)), + index_count, + vertex_count + ) + + return result \ No newline at end of file diff --git a/python/meshoptimizer/simplifier.py b/python/meshoptimizer/simplifier.py new file mode 100644 index 000000000..58d829c0c --- /dev/null +++ b/python/meshoptimizer/simplifier.py @@ -0,0 +1,359 @@ +""" +Simplification functions for meshoptimizer. +""" +import ctypes +from typing import Optional +import numpy as np +from ._loader import lib + +# Simplification options +SIMPLIFY_LOCK_BORDER = 1 << 0 +SIMPLIFY_SPARSE = 1 << 1 +SIMPLIFY_ERROR_ABSOLUTE = 1 << 2 +SIMPLIFY_PRUNE = 1 << 3 + +def simplify(destination: np.ndarray, indices: np.ndarray, vertex_positions: np.ndarray, + index_count: Optional[int] = None, vertex_count: Optional[int] = None, + vertex_positions_stride: Optional[int] = None, target_index_count: Optional[int] = None, + target_error: float = 0.01, options: int = 0, + result_error: Optional[np.ndarray] = None) -> int: + """ + Simplify mesh. + + Args: + destination: numpy array to store the simplified indices + indices: numpy array of index data + vertex_positions: numpy array of vertex position data + index_count: number of indices (optional, derived from indices if not provided) + vertex_count: number of vertices (optional, derived from vertex_positions if not provided) + vertex_positions_stride: stride of vertex positions in bytes (optional, derived from vertex_positions if not provided) + target_index_count: target number of indices (optional, defaults to 25% of original) + target_error: target error (default: 0.01) + options: simplification options (default: 0) + result_error: optional float to store the resulting error + + Returns: + Number of indices in the simplified mesh + """ + # Convert indices to numpy array if it's not already + indices = np.asarray(indices, dtype=np.uint32) + + # Convert vertex_positions to numpy array if it's not already + vertex_positions = np.asarray(vertex_positions, dtype=np.float32) + + # Derive index_count if not provided + if index_count is None: + index_count = len(indices) + + # Derive vertex_count if not provided + if vertex_count is None: + vertex_count = len(vertex_positions) + + # Derive vertex_positions_stride if not provided + if vertex_positions_stride is None: + vertex_positions_stride = vertex_positions.itemsize * vertex_positions.shape[1] if len(vertex_positions.shape) > 1 else vertex_positions.itemsize + + # Derive target_index_count if not provided + if target_index_count is None: + target_index_count = index_count // 4 # 25% of original + + # Create result_error_ptr if result_error is provided + if result_error is not None: + result_error_ptr = ctypes.pointer(ctypes.c_float(0.0)) + else: + result_error_ptr = ctypes.POINTER(ctypes.c_float)() + + # Call C function + result = lib.meshopt_simplify( + destination.ctypes.data_as(ctypes.POINTER(ctypes.c_uint)), + indices.ctypes.data_as(ctypes.POINTER(ctypes.c_uint)), + index_count, + vertex_positions.ctypes.data_as(ctypes.POINTER(ctypes.c_float)), + vertex_count, + vertex_positions_stride, + target_index_count, + target_error, + options, + result_error_ptr + ) + + # Update result_error if provided + if result_error is not None: + result_error[0] = result_error_ptr.contents.value + + return result + +def simplify_with_attributes(destination: np.ndarray, indices: np.ndarray, vertex_positions: np.ndarray, + vertex_attributes: np.ndarray, attribute_weights: np.ndarray, + index_count: Optional[int] = None, vertex_count: Optional[int] = None, + vertex_positions_stride: Optional[int] = None, + vertex_attributes_stride: Optional[int] = None, + attribute_count: Optional[int] = None, + vertex_lock: Optional[np.ndarray] = None, + target_index_count: Optional[int] = None, + target_error: float = 0.01, options: int = 0, + result_error: Optional[np.ndarray] = None) -> int: + """ + Simplify mesh with attribute metric. + + Args: + destination: numpy array to store the simplified indices + indices: numpy array of index data + vertex_positions: numpy array of vertex position data + vertex_attributes: numpy array of vertex attribute data + attribute_weights: numpy array of attribute weights + index_count: number of indices (optional, derived from indices if not provided) + vertex_count: number of vertices (optional, derived from vertex_positions if not provided) + vertex_positions_stride: stride of vertex positions in bytes (optional, derived from vertex_positions if not provided) + vertex_attributes_stride: stride of vertex attributes in bytes (optional, derived from vertex_attributes if not provided) + attribute_count: number of attributes (optional, derived from attribute_weights if not provided) + vertex_lock: optional numpy array of vertex lock flags + target_index_count: target number of indices (optional, defaults to 25% of original) + target_error: target error (default: 0.01) + options: simplification options (default: 0) + result_error: optional float to store the resulting error + + Returns: + Number of indices in the simplified mesh + """ + # Convert indices to numpy array if it's not already + indices = np.asarray(indices, dtype=np.uint32) + + # Convert vertex_positions to numpy array if it's not already + vertex_positions = np.asarray(vertex_positions, dtype=np.float32) + + # Convert vertex_attributes to numpy array if it's not already + vertex_attributes = np.asarray(vertex_attributes, dtype=np.float32) + + # Convert attribute_weights to numpy array if it's not already + attribute_weights = np.asarray(attribute_weights, dtype=np.float32) + + # Derive index_count if not provided + if index_count is None: + index_count = len(indices) + + # Derive vertex_count if not provided + if vertex_count is None: + vertex_count = len(vertex_positions) + + # Derive vertex_positions_stride if not provided + if vertex_positions_stride is None: + vertex_positions_stride = vertex_positions.itemsize * vertex_positions.shape[1] if len(vertex_positions.shape) > 1 else vertex_positions.itemsize + + # Derive vertex_attributes_stride if not provided + if vertex_attributes_stride is None: + vertex_attributes_stride = vertex_attributes.itemsize * vertex_attributes.shape[1] if len(vertex_attributes.shape) > 1 else vertex_attributes.itemsize + + # Derive attribute_count if not provided + if attribute_count is None: + attribute_count = len(attribute_weights) + + # Derive target_index_count if not provided + if target_index_count is None: + target_index_count = index_count // 4 # 25% of original + + # Create result_error_ptr if result_error is provided + if result_error is not None: + result_error_ptr = ctypes.pointer(ctypes.c_float(0.0)) + else: + result_error_ptr = ctypes.POINTER(ctypes.c_float)() + + # Create vertex_lock_ptr if vertex_lock is provided + if vertex_lock is not None: + vertex_lock = np.asarray(vertex_lock, dtype=np.uint8) + vertex_lock_ptr = vertex_lock.ctypes.data_as(ctypes.POINTER(ctypes.c_ubyte)) + else: + vertex_lock_ptr = ctypes.POINTER(ctypes.c_ubyte)() + + # Call C function + result = lib.meshopt_simplifyWithAttributes( + destination.ctypes.data_as(ctypes.POINTER(ctypes.c_uint)), + indices.ctypes.data_as(ctypes.POINTER(ctypes.c_uint)), + index_count, + vertex_positions.ctypes.data_as(ctypes.POINTER(ctypes.c_float)), + vertex_count, + vertex_positions_stride, + vertex_attributes.ctypes.data_as(ctypes.POINTER(ctypes.c_float)), + vertex_attributes_stride, + attribute_weights.ctypes.data_as(ctypes.POINTER(ctypes.c_float)), + attribute_count, + vertex_lock_ptr, + target_index_count, + target_error, + options, + result_error_ptr + ) + + # Update result_error if provided + if result_error is not None: + result_error[0] = result_error_ptr.contents.value + + return result + +def simplify_sloppy(destination: np.ndarray, indices: np.ndarray, vertex_positions: np.ndarray, + index_count: Optional[int] = None, vertex_count: Optional[int] = None, + vertex_positions_stride: Optional[int] = None, + target_index_count: Optional[int] = None, target_error: float = 0.01, + result_error: Optional[np.ndarray] = None) -> int: + """ + Simplify mesh (sloppy). + + Args: + destination: numpy array to store the simplified indices + indices: numpy array of index data + vertex_positions: numpy array of vertex position data + index_count: number of indices (optional, derived from indices if not provided) + vertex_count: number of vertices (optional, derived from vertex_positions if not provided) + vertex_positions_stride: stride of vertex positions in bytes (optional, derived from vertex_positions if not provided) + target_index_count: target number of indices (optional, defaults to 25% of original) + target_error: target error (default: 0.01) + result_error: optional float to store the resulting error + + Returns: + Number of indices in the simplified mesh + """ + # Convert indices to numpy array if it's not already + indices = np.asarray(indices, dtype=np.uint32) + + # Convert vertex_positions to numpy array if it's not already + vertex_positions = np.asarray(vertex_positions, dtype=np.float32) + + # Derive index_count if not provided + if index_count is None: + index_count = len(indices) + + # Derive vertex_count if not provided + if vertex_count is None: + vertex_count = len(vertex_positions) + + # Derive vertex_positions_stride if not provided + if vertex_positions_stride is None: + vertex_positions_stride = vertex_positions.itemsize * vertex_positions.shape[1] if len(vertex_positions.shape) > 1 else vertex_positions.itemsize + + # Derive target_index_count if not provided + if target_index_count is None: + target_index_count = index_count // 4 # 25% of original + + # Create result_error_ptr if result_error is provided + if result_error is not None: + result_error_ptr = ctypes.pointer(ctypes.c_float(0.0)) + else: + result_error_ptr = ctypes.POINTER(ctypes.c_float)() + + # Call C function + result = lib.meshopt_simplifySloppy( + destination.ctypes.data_as(ctypes.POINTER(ctypes.c_uint)), + indices.ctypes.data_as(ctypes.POINTER(ctypes.c_uint)), + index_count, + vertex_positions.ctypes.data_as(ctypes.POINTER(ctypes.c_float)), + vertex_count, + vertex_positions_stride, + target_index_count, + ctypes.c_float(target_error), # Explicitly convert to c_float + result_error_ptr + ) + + # Update result_error if provided + if result_error is not None: + result_error[0] = result_error_ptr.contents.value + + return result + +def simplify_points(destination: np.ndarray, vertex_positions: np.ndarray, + vertex_colors: Optional[np.ndarray] = None, + vertex_count: Optional[int] = None, + vertex_positions_stride: Optional[int] = None, + vertex_colors_stride: Optional[int] = None, + color_weight: float = 1.0, + target_vertex_count: Optional[int] = None) -> int: + """ + Simplify point cloud. + + Args: + destination: numpy array to store the simplified point indices + vertex_positions: numpy array of vertex position data + vertex_colors: numpy array of vertex color data (optional) + vertex_count: number of vertices (optional, derived from vertex_positions if not provided) + vertex_positions_stride: stride of vertex positions in bytes (optional, derived from vertex_positions if not provided) + vertex_colors_stride: stride of vertex colors in bytes (optional, derived from vertex_colors if not provided) + color_weight: weight of color in simplification (default: 1.0) + target_vertex_count: target number of vertices (optional, defaults to 25% of original) + + Returns: + Number of vertices in the simplified point cloud + """ + # Convert vertex_positions to numpy array if it's not already + vertex_positions = np.asarray(vertex_positions, dtype=np.float32) + + # Derive vertex_count if not provided + if vertex_count is None: + vertex_count = len(vertex_positions) + + # Derive vertex_positions_stride if not provided + if vertex_positions_stride is None: + vertex_positions_stride = vertex_positions.itemsize * vertex_positions.shape[1] if len(vertex_positions.shape) > 1 else vertex_positions.itemsize + + # Derive target_vertex_count if not provided + if target_vertex_count is None: + target_vertex_count = vertex_count // 4 # 25% of original + + # Handle vertex_colors + if vertex_colors is not None: + vertex_colors = np.asarray(vertex_colors, dtype=np.float32) + + # Derive vertex_colors_stride if not provided + if vertex_colors_stride is None: + vertex_colors_stride = vertex_colors.itemsize * vertex_colors.shape[1] if len(vertex_colors.shape) > 1 else vertex_colors.itemsize + + vertex_colors_ptr = vertex_colors.ctypes.data_as(ctypes.POINTER(ctypes.c_float)) + else: + vertex_colors_ptr = ctypes.POINTER(ctypes.c_float)() + vertex_colors_stride = 0 + + # Call C function + result = lib.meshopt_simplifyPoints( + destination.ctypes.data_as(ctypes.POINTER(ctypes.c_uint)), + vertex_positions.ctypes.data_as(ctypes.POINTER(ctypes.c_float)), + vertex_count, + vertex_positions_stride, + vertex_colors_ptr, + vertex_colors_stride, + ctypes.c_float(color_weight), # Explicitly convert to c_float + target_vertex_count + ) + + return result + +def simplify_scale(vertex_positions: np.ndarray, + vertex_count: Optional[int] = None, + vertex_positions_stride: Optional[int] = None) -> float: + """ + Get the scale factor for simplification error. + + Args: + vertex_positions: numpy array of vertex position data + vertex_count: number of vertices (optional, derived from vertex_positions if not provided) + vertex_positions_stride: stride of vertex positions in bytes (optional, derived from vertex_positions if not provided) + + Returns: + Scale factor for simplification error + """ + # Convert vertex_positions to numpy array if it's not already + vertex_positions = np.asarray(vertex_positions, dtype=np.float32) + + # Derive vertex_count if not provided + if vertex_count is None: + vertex_count = len(vertex_positions) + + # Derive vertex_positions_stride if not provided + if vertex_positions_stride is None: + vertex_positions_stride = vertex_positions.itemsize * vertex_positions.shape[1] if len(vertex_positions.shape) > 1 else vertex_positions.itemsize + + # Call C function + result = lib.meshopt_simplifyScale( + vertex_positions.ctypes.data_as(ctypes.POINTER(ctypes.c_float)), + vertex_count, + vertex_positions_stride + ) + + return result \ No newline at end of file diff --git a/python/meshoptimizer/utils.py b/python/meshoptimizer/utils.py new file mode 100644 index 000000000..491129d2f --- /dev/null +++ b/python/meshoptimizer/utils.py @@ -0,0 +1,139 @@ +""" +Utility functions for meshoptimizer. +""" +import ctypes +from typing import Optional +import numpy as np +from ._loader import lib + +def generate_vertex_remap(destination: np.ndarray, + indices: Optional[np.ndarray] = None, + index_count: Optional[int] = None, + vertices: Optional[np.ndarray] = None, + vertex_count: Optional[int] = None, + vertex_size: Optional[int] = None) -> int: + """ + Generate vertex remap table. + + Args: + destination: numpy array to store the remap table + indices: numpy array of index data (can be None for unindexed geometry) + index_count: number of indices (optional, derived from indices if not provided) + vertices: numpy array of vertex data + vertex_count: number of vertices (optional, derived from vertices if not provided) + vertex_size: size of each vertex in bytes (optional, derived from vertices if not provided) + + Returns: + Number of unique vertices + """ + # Convert indices to numpy array if it's not already and not None + if indices is not None: + indices = np.asarray(indices, dtype=np.uint32) + + # Derive index_count if not provided + if index_count is None: + index_count = len(indices) + else: + # If indices is None, index_count must be 0 + index_count = 0 + + # Convert vertices to numpy array if it's not already + if vertices is not None: + vertices = np.asarray(vertices) + + # Derive vertex_count if not provided + if vertex_count is None: + vertex_count = len(vertices) + + # Derive vertex_size if not provided + if vertex_size is None: + vertex_size = vertices.itemsize * vertices.shape[1] if len(vertices.shape) > 1 else vertices.itemsize + + # Call C function + result = lib.meshopt_generateVertexRemap( + destination.ctypes.data_as(ctypes.POINTER(ctypes.c_uint)), + indices.ctypes.data_as(ctypes.POINTER(ctypes.c_uint)) if indices is not None else None, + index_count, + vertices.ctypes.data_as(ctypes.c_void_p) if vertices is not None else None, + vertex_count, + vertex_size + ) + + return result + +def remap_vertex_buffer(destination: np.ndarray, + vertices: np.ndarray, + vertex_count: Optional[int] = None, + vertex_size: Optional[int] = None, + remap: Optional[np.ndarray] = None) -> None: + """ + Remap vertex buffer. + + Args: + destination: numpy array to store the remapped vertices + vertices: numpy array of vertex data + vertex_count: number of vertices (optional, derived from vertices if not provided) + vertex_size: size of each vertex in bytes (optional, derived from vertices if not provided) + remap: numpy array of remap data + + Returns: + None (destination is modified in-place) + """ + # Convert vertices to numpy array if it's not already + vertices = np.asarray(vertices) + + # Derive vertex_count if not provided + if vertex_count is None: + vertex_count = len(vertices) + + # Derive vertex_size if not provided + if vertex_size is None: + vertex_size = vertices.itemsize * vertices.shape[1] if len(vertices.shape) > 1 else vertices.itemsize + + # Convert remap to numpy array if it's not already and not None + if remap is not None: + remap = np.asarray(remap, dtype=np.uint32) + + # Call C function + lib.meshopt_remapVertexBuffer( + destination.ctypes.data_as(ctypes.c_void_p), + vertices.ctypes.data_as(ctypes.c_void_p), + vertex_count, + vertex_size, + remap.ctypes.data_as(ctypes.POINTER(ctypes.c_uint)) if remap is not None else None + ) + +def remap_index_buffer(destination: np.ndarray, + indices: np.ndarray, + index_count: Optional[int] = None, + remap: Optional[np.ndarray] = None) -> None: + """ + Remap index buffer. + + Args: + destination: numpy array to store the remapped indices + indices: numpy array of index data + index_count: number of indices (optional, derived from indices if not provided) + remap: numpy array of remap data + + Returns: + None (destination is modified in-place) + """ + # Convert indices to numpy array if it's not already + indices = np.asarray(indices, dtype=np.uint32) + + # Derive index_count if not provided + if index_count is None: + index_count = len(indices) + + # Convert remap to numpy array if it's not already and not None + if remap is not None: + remap = np.asarray(remap, dtype=np.uint32) + + # Call C function + lib.meshopt_remapIndexBuffer( + destination.ctypes.data_as(ctypes.POINTER(ctypes.c_uint)), + indices.ctypes.data_as(ctypes.POINTER(ctypes.c_uint)), + index_count, + remap.ctypes.data_as(ctypes.POINTER(ctypes.c_uint)) if remap is not None else None + ) \ No newline at end of file diff --git a/python/module.template.cpp b/python/module.template.cpp new file mode 100644 index 000000000..c4d045e38 --- /dev/null +++ b/python/module.template.cpp @@ -0,0 +1,58 @@ +#include +#include +#include + +// Define implementation before including the header +#define MESHOPTIMIZER_NO_RESET_OVERRIDE +#define MESHOPTIMIZER_IMPLEMENTATION +#include "meshoptimizer.h" + +// Include all implementation files directly +{{SOURCE_IMPORTS}} + +// Prevent namespace pollution +namespace { + +void* fallback_allocate(size_t size) { + return PyMem_Malloc(size); +} + +void fallback_deallocate(void* ptr) { + PyMem_Free(ptr); +} + +void* (*allocate_fun)(size_t) = fallback_allocate; +void (*deallocate_fun)(void*) = fallback_deallocate; + +PyObject* meshopt_set_allocator(PyObject* self, PyObject* args) { + meshopt_setAllocator(allocate_fun, deallocate_fun); + Py_RETURN_NONE; +} + +PyMethodDef MeshoptMethods[] = { + {"set_allocator", meshopt_set_allocator, METH_NOARGS, + "Set the default memory allocator"}, + {NULL, NULL, 0, NULL} +}; + +struct PyModuleDef meshopt_module = { + PyModuleDef_HEAD_INIT, + "_meshoptimizer", + "Python binding for meshoptimizer library", + -1, + MeshoptMethods +}; + +} // anonymous namespace + +PyMODINIT_FUNC PyInit__meshoptimizer(void) { + import_array(); + + PyObject* m = PyModule_Create(&meshopt_module); + if (m == NULL) + return NULL; + + meshopt_setAllocator(allocate_fun, deallocate_fun); + + return m; +} diff --git a/python/pyproject.toml b/python/pyproject.toml new file mode 100644 index 000000000..84513fe46 --- /dev/null +++ b/python/pyproject.toml @@ -0,0 +1,6 @@ +[build-system] +requires = ["setuptools>=42", "wheel", "setuptools_scm>=6.0", "numpy>=1.19.0"] +build-backend = "setuptools.build_meta" + +[tool.setuptools_scm] +root = ".." \ No newline at end of file diff --git a/python/setup.py b/python/setup.py new file mode 100644 index 000000000..83463c72c --- /dev/null +++ b/python/setup.py @@ -0,0 +1,232 @@ +from setuptools import setup, Extension, find_packages +import os +import platform +import sys + +# Get the directory containing this file (setup.py) +SETUP_DIR = os.path.dirname(os.path.abspath(__file__)) +SRC_DIR = os.path.join(SETUP_DIR, 'src') + +# Create source directory if it doesn't exist +if not os.path.exists(SRC_DIR): + os.makedirs(SRC_DIR) + +# Copy meshoptimizer.h header if it doesn't exist in src directory +def ensure_header_file(): + header_dest = os.path.join(SRC_DIR, 'meshoptimizer.h') + if not os.path.exists(header_dest): + # Try to find the header file + header_src = os.path.join('..', 'src', 'meshoptimizer.h') + if os.path.exists(header_src): + # Copy from parent directory + with open(header_src, 'r') as f: + content = f.read() + with open(header_dest, 'w') as f: + f.write(content) + print(f"Copied meshoptimizer.h from {header_src} to {header_dest}") + else: + # Check if it's in the current directory + header_src = os.path.join('src', 'meshoptimizer.h') + if os.path.exists(header_src): + with open(header_src, 'r') as f: + content = f.read() + with open(header_dest, 'w') as f: + f.write(content) + print(f"Copied meshoptimizer.h from {header_src} to {header_dest}") + else: + print("Warning: Could not find meshoptimizer.h header file") + +# Get long description from README +def get_long_description(): + try: + readme_path = os.path.join(SETUP_DIR, 'README.md') + if os.path.exists(readme_path): + with open(readme_path, 'r', encoding='utf-8') as f: + return f.read() + except Exception as e: + print(f"Warning: Could not read README.md: {e}") + return 'Python wrapper for meshoptimizer library' + +# Define source files explicitly to ensure they're included in the build +def get_source_files(): + # These are the source files needed for the Python extension + source_files = [ + 'src/allocator.cpp', + 'src/clusterizer.cpp', + 'src/indexcodec.cpp', + 'src/indexgenerator.cpp', + 'src/overdrawanalyzer.cpp', + 'src/overdrawoptimizer.cpp', + 'src/partition.cpp', + 'src/quantization.cpp', + 'src/simplifier.cpp', + 'src/spatialorder.cpp', + 'src/stripifier.cpp', + 'src/vcacheanalyzer.cpp', + 'src/vcacheoptimizer.cpp', + 'src/vertexcodec.cpp', + 'src/vertexfilter.cpp', + 'src/vfetchanalyzer.cpp', + 'src/vfetchoptimizer.cpp' + ] + + # Check if we're building from an sdist package + if not os.path.exists(os.path.join('..', 'src')): + # We're in an sdist package, source files should be in the package + return source_files + + # We're building from the repository, verify files exist + for i, src_file in enumerate(source_files): + # Check if file exists in parent directory + if os.path.exists(os.path.join('..', src_file)): + continue + # If not, check if it exists in the current directory + elif os.path.exists(src_file): + source_files[i] = src_file + else: + print(f"Warning: Source file {src_file} not found") + + return source_files + +# Determine source files and generate module file +def generate_module_file(): + # Get source files + source_files = get_source_files() + # Create the module.cpp file from template + module_template_path = os.path.join(SETUP_DIR, 'module.template.cpp') + if not os.path.exists(module_template_path): + return [] + # Create directory if it doesn't exist + + output_module_path = os.path.join(SRC_DIR, 'module.cpp') + + # Read template and insert source imports + with open(module_template_path, 'r') as template_file: + template_content = template_file.read() + + # Copy source files to src directory if needed + for src_file in source_files: + src_basename = os.path.basename(src_file) + dest_path = os.path.join(SRC_DIR, src_basename) + + # If we're building from the repository, copy the files + if os.path.exists(os.path.join('..', src_file)): + with open(os.path.join('..', src_file), 'r') as f: + content = f.read() + with open(dest_path, 'w') as f: + f.write(content) + # If we're in an sdist package, the files might be in the current directory + elif os.path.exists(src_file): + with open(src_file, 'r') as f: + content = f.read() + with open(dest_path, 'w') as f: + f.write(content) + + # Generate includes for the module.cpp file + source_imports = '\n'.join([f'#include "{os.path.basename(src)}"' for src in source_files]) + module_content = template_content.replace('{{SOURCE_IMPORTS}}', source_imports) + + # Write the resulting module file + with open(output_module_path, 'w') as module_file: + # Add a comment indicating this file is generated + module_file.write("// This file is automatically generated by setup.py\n") + module_file.write(module_content) + + return source_files + +# Platform-specific compile and link arguments +def get_build_args(): + is_windows = platform.system() == 'Windows' + is_macos = platform.system() == 'Darwin' + + extra_compile_args = [] + extra_link_args = [] + define_macros = [] + + # Define macros for all platforms + define_macros = [ + ('MESHOPTIMIZER_IMPLEMENTATION', '1') # Include implementation in the build + ] + + if is_windows: + # Windows-specific flags (MSVC) + extra_compile_args = ['/std:c++14', '/O2', '/EHsc'] + # Export functions for DLL + define_macros.extend([ + ('MESHOPTIMIZER_API', '__declspec(dllexport)'), + ('MESHOPTIMIZER_EXPERIMENTAL', '__declspec(dllexport)') + ]) + extra_link_args = ['/DLL'] + else: + # Unix-like systems (Linux/Mac) + extra_compile_args = ['-std=c++11', '-O3', '-fPIC'] + if is_macos: + extra_compile_args.extend(['-stdlib=libc++', '-mmacosx-version-min=10.9']) + + return extra_compile_args, extra_link_args, define_macros + +# Import numpy for include directory +import numpy as np + +# Ensure header file is available +ensure_header_file() + +# Generate the module file with source files +source_files = generate_module_file() + +# Get the source files and build arguments +include_dirs = [SRC_DIR, np.get_include()] +# Also include parent src directory if it exists +if os.path.exists(os.path.join('..', 'src')): + include_dirs.append(os.path.join('..', 'src')) + +extra_compile_args, extra_link_args, define_macros = get_build_args() + +# Define the extension module +meshoptimizer_module = Extension( + 'meshoptimizer._meshoptimizer', + sources=["src/module.cpp"], + include_dirs=include_dirs, + extra_compile_args=extra_compile_args, + extra_link_args=extra_link_args, + define_macros=define_macros, + language='c++', +) + +setup( + name='meshoptimizer', + version="0.2.20a5", + description='Python wrapper for meshoptimizer library', + long_description=get_long_description(), + long_description_content_type='text/markdown', + url='https://github.com/zeux/meshoptimizer', + packages=find_packages(), + ext_modules=[meshoptimizer_module], + install_requires=[ + 'numpy>=1.19.0', + ], + setup_requires=[ + 'setuptools>=42', + 'wheel', + 'numpy>=1.19.0', + ], + python_requires='>=3.6', + package_data={ + '': ['src/*.cpp', 'src/*.h'], + }, + include_package_data=True, + classifiers=[ + 'Development Status :: 4 - Beta', + 'Intended Audience :: Developers', + 'Topic :: Multimedia :: Graphics :: 3D Modeling', + 'License :: OSI Approved :: MIT License', + 'Programming Language :: Python :: 3', + 'Programming Language :: Python :: 3.6', + 'Programming Language :: Python :: 3.7', + 'Programming Language :: Python :: 3.8', + 'Programming Language :: Python :: 3.9', + 'Programming Language :: Python :: 3.10', + 'Programming Language :: Python :: 3.11', + ], + keywords='mesh optimization graphics 3d', +) \ No newline at end of file diff --git a/python/tests/__init__.py b/python/tests/__init__.py new file mode 100644 index 000000000..3c4693b71 --- /dev/null +++ b/python/tests/__init__.py @@ -0,0 +1,3 @@ +""" +Tests package for the meshoptimizer Python wrapper. +""" \ No newline at end of file diff --git a/python/tests/test_encoding.py b/python/tests/test_encoding.py new file mode 100644 index 000000000..2e2830166 --- /dev/null +++ b/python/tests/test_encoding.py @@ -0,0 +1,125 @@ +""" +Tests for the meshoptimizer Python wrapper. + +This file contains tests to verify that the encoding/decoding process +preserves the mesh geometry correctly. +""" +import numpy as np +import unittest +from meshoptimizer import ( + encode_vertex_buffer, decode_vertex_buffer, + encode_index_buffer, decode_index_buffer, + encode_index_sequence, decode_index_sequence +) + +class TestEncoding(unittest.TestCase): + """Test encoding and decoding functionality.""" + + def setUp(self): + """Set up test data.""" + # Create a simple mesh (a cube) + self.vertices = np.array([ + # positions + [-0.5, -0.5, -0.5], + [0.5, -0.5, -0.5], + [0.5, 0.5, -0.5], + [-0.5, 0.5, -0.5], + [-0.5, -0.5, 0.5], + [0.5, -0.5, 0.5], + [0.5, 0.5, 0.5], + [-0.5, 0.5, 0.5] + ], dtype=np.float32) + + self.indices = np.array([ + 0, 1, 2, 2, 3, 0, # front + 1, 5, 6, 6, 2, 1, # right + 5, 4, 7, 7, 6, 5, # back + 4, 0, 3, 3, 7, 4, # left + 3, 2, 6, 6, 7, 3, # top + 4, 5, 1, 1, 0, 4 # bottom + ], dtype=np.uint32) + + + def get_triangles_set(self, vertices, indices): + """ + Get a set of triangles from vertices and indices. + Each triangle is represented as a frozenset of tuples of vertex coordinates. + This makes the comparison invariant to vertex order within triangles. + """ + triangles = set() + for i in range(0, len(indices), 3): + # Get the three vertices of the triangle + v1 = tuple(vertices[indices[i]]) + v2 = tuple(vertices[indices[i+1]]) + v3 = tuple(vertices[indices[i+2]]) + # Create a frozenset of the vertices (order-invariant) + triangle = frozenset([v1, v2, v3]) + triangles.add(triangle) + return triangles + + def test_encode_decode_vertices(self): + """Test that encoding and decoding vertices preserves the data.""" + # Encode vertices + encoded_vertices = encode_vertex_buffer( + self.vertices, + len(self.vertices), + self.vertices.itemsize * self.vertices.shape[1] + ) + + # Decode vertices using the new function that returns a numpy array + decoded_vertices = decode_vertex_buffer( + len(self.vertices), + self.vertices.itemsize * self.vertices.shape[1], + encoded_vertices + ) + + # Check that the decoded vertices match the original + np.testing.assert_array_almost_equal(self.vertices, decoded_vertices) + + def test_encode_decode_index_buffer(self): + """Test that encoding and decoding indices preserves the data.""" + # Encode indices + encoded_indices = encode_index_buffer( + self.indices, + len(self.indices), + len(self.vertices) + ) + + # Decode indices + decoded_indices = decode_index_buffer( + len(self.indices), + 4, # 4 bytes for uint32 + encoded_indices + ) + + # The encoding/decoding process may reorder indices for optimization + # So we don't check that the indices match exactly, but that they represent the same triangles + original_triangles = self.get_triangles_set(self.vertices, self.indices) + decoded_triangles = self.get_triangles_set(self.vertices, decoded_indices) + self.assertEqual(original_triangles, decoded_triangles) + + def test_encode_decode_index_sequence(self): + """Test that encoding and decoding index sequence preserves the data.""" + # Encode index sequence + encoded_sequence = encode_index_sequence( + self.indices, + len(self.indices), + len(self.vertices) + ) + + # Decode index sequence + decoded_sequence = decode_index_sequence( + len(self.indices), + 4, # 4 bytes for uint32 + encoded_sequence + ) + + # The encoding/decoding process may reorder indices for optimization + # So we don't check that the indices match exactly, but that they represent the same triangles + original_triangles = self.get_triangles_set(self.vertices, self.indices) + decoded_triangles = self.get_triangles_set(self.vertices, decoded_sequence) + self.assertEqual(original_triangles, decoded_triangles) + + +if __name__ == '__main__': + unittest.main() diff --git a/python/tests/test_optimization.py b/python/tests/test_optimization.py new file mode 100644 index 000000000..526b605bc --- /dev/null +++ b/python/tests/test_optimization.py @@ -0,0 +1,187 @@ +""" +Tests for the meshoptimizer Python wrapper. + +This file contains tests to verify that the optimization functions +work correctly and preserve the mesh geometry. +""" +import numpy as np +import unittest +from meshoptimizer import ( + optimize_vertex_cache, + optimize_overdraw, + optimize_vertex_fetch, + generate_vertex_remap, + remap_vertex_buffer, + remap_index_buffer +) + +class TestOptimization(unittest.TestCase): + """Test optimization functionality.""" + + def setUp(self): + """Set up test data.""" + # Create a simple mesh (a cube) + self.vertices = np.array([ + # positions + [-0.5, -0.5, -0.5], + [0.5, -0.5, -0.5], + [0.5, 0.5, -0.5], + [-0.5, 0.5, -0.5], + [-0.5, -0.5, 0.5], + [0.5, -0.5, 0.5], + [0.5, 0.5, 0.5], + [-0.5, 0.5, 0.5] + ], dtype=np.float32) + + self.indices = np.array([ + 0, 1, 2, 2, 3, 0, # front + 1, 5, 6, 6, 2, 1, # right + 5, 4, 7, 7, 6, 5, # back + 4, 0, 3, 3, 7, 4, # left + 3, 2, 6, 6, 7, 3, # top + 4, 5, 1, 1, 0, 4 # bottom + ], dtype=np.uint32) + + + def get_triangles_set(self, vertices, indices): + """ + Get a set of triangles from vertices and indices. + Each triangle is represented as a frozenset of tuples of vertex coordinates. + This makes the comparison invariant to vertex order within triangles. + """ + triangles = set() + for i in range(0, len(indices), 3): + # Get the three vertices of the triangle + v1 = tuple(vertices[indices[i]]) + v2 = tuple(vertices[indices[i+1]]) + v3 = tuple(vertices[indices[i+2]]) + # Create a frozenset of the vertices (order-invariant) + triangle = frozenset([v1, v2, v3]) + triangles.add(triangle) + return triangles + + def test_vertex_cache_optimization(self): + """Test vertex cache optimization.""" + # Optimize vertex cache + optimized_indices = np.zeros_like(self.indices) + optimize_vertex_cache( + optimized_indices, + self.indices, + len(self.indices), + len(self.vertices) + ) + + # Check that the number of indices is the same + self.assertEqual(len(self.indices), len(optimized_indices)) + + # Get the triangles from the original and optimized meshes + original_triangles = self.get_triangles_set(self.vertices, self.indices) + optimized_triangles = self.get_triangles_set(self.vertices, optimized_indices) + + # Check that the triangles match + self.assertEqual(original_triangles, optimized_triangles) + + def test_overdraw_optimization(self): + """Test overdraw optimization.""" + # Optimize overdraw + optimized_indices = np.zeros_like(self.indices) + optimize_overdraw( + optimized_indices, + self.indices, + self.vertices, + len(self.indices), + len(self.vertices), + self.vertices.itemsize * self.vertices.shape[1], + 1.05 + ) + + # Check that the number of indices is the same + self.assertEqual(len(self.indices), len(optimized_indices)) + + # Get the triangles from the original and optimized meshes + original_triangles = self.get_triangles_set(self.vertices, self.indices) + optimized_triangles = self.get_triangles_set(self.vertices, optimized_indices) + + # Check that the triangles match + self.assertEqual(original_triangles, optimized_triangles) + + def test_vertex_fetch_optimization(self): + """Test vertex fetch optimization.""" + # Optimize vertex fetch + optimized_vertices = np.zeros_like(self.vertices) + unique_vertex_count = optimize_vertex_fetch( + optimized_vertices, + self.indices, + self.vertices, + len(self.indices), + len(self.vertices), + self.vertices.itemsize * self.vertices.shape[1] + ) + + # Check that the number of unique vertices is less than or equal to the original + self.assertLessEqual(unique_vertex_count, len(self.vertices)) + + # For vertex fetch optimization, we can't directly compare triangles because + # the optimization reorders vertices for better cache locality. + # Instead, we'll check that the number of triangles is the same and + # that each vertex in the optimized mesh is present in the original mesh. + + # Check that all optimized vertices are present in the original vertices + for i in range(unique_vertex_count): + vertex = tuple(optimized_vertices[i]) + # Check if this vertex exists in the original vertices + found = False + for j in range(len(self.vertices)): + if np.allclose(self.vertices[j], optimized_vertices[i]): + found = True + break + self.assertTrue(found, f"Vertex {vertex} not found in original vertices") + + # Check that the number of triangles is the same + self.assertEqual(len(self.indices) // 3, len(self.indices) // 3) + + def test_vertex_remap(self): + """Test vertex remapping.""" + # Generate vertex remap + remap = np.zeros(len(self.vertices), dtype=np.uint32) + unique_vertex_count = generate_vertex_remap( + remap, + self.indices, + len(self.indices), + self.vertices, + len(self.vertices), + self.vertices.itemsize * self.vertices.shape[1] + ) + + # Check that the number of unique vertices is less than or equal to the original + self.assertLessEqual(unique_vertex_count, len(self.vertices)) + + # Remap vertices + remapped_vertices = np.zeros_like(self.vertices) + remap_vertex_buffer( + remapped_vertices, + self.vertices, + len(self.vertices), + self.vertices.itemsize * self.vertices.shape[1], + remap + ) + + # Remap indices + remapped_indices = np.zeros_like(self.indices) + remap_index_buffer( + remapped_indices, + self.indices, + len(self.indices), + remap + ) + + # Get the triangles from the original and remapped meshes + original_triangles = self.get_triangles_set(self.vertices, self.indices) + remapped_triangles = self.get_triangles_set(remapped_vertices, remapped_indices) + + # Check that the triangles match + self.assertEqual(original_triangles, remapped_triangles) + + +if __name__ == '__main__': + unittest.main() \ No newline at end of file diff --git a/python/tests/test_simplification.py b/python/tests/test_simplification.py new file mode 100644 index 000000000..06a2f518c --- /dev/null +++ b/python/tests/test_simplification.py @@ -0,0 +1,228 @@ +""" +Tests for the meshoptimizer Python wrapper. + +This file contains tests to verify that the simplification functions +work correctly and preserve the mesh geometry as much as possible. +""" +import numpy as np +import unittest +from meshoptimizer import ( + simplify, + simplify_sloppy, + simplify_points, + simplify_scale, + SIMPLIFY_LOCK_BORDER, + SIMPLIFY_SPARSE, +) + +class TestSimplification(unittest.TestCase): + """Test simplification functionality.""" + + def setUp(self): + """Set up test data.""" + # Create a simple mesh (a cube) + self.vertices = np.array([ + # positions + [-0.5, -0.5, -0.5], + [0.5, -0.5, -0.5], + [0.5, 0.5, -0.5], + [-0.5, 0.5, -0.5], + [-0.5, -0.5, 0.5], + [0.5, -0.5, 0.5], + [0.5, 0.5, 0.5], + [-0.5, 0.5, 0.5] + ], dtype=np.float32) + + self.indices = np.array([ + 0, 1, 2, 2, 3, 0, # front + 1, 5, 6, 6, 2, 1, # right + 5, 4, 7, 7, 6, 5, # back + 4, 0, 3, 3, 7, 4, # left + 3, 2, 6, 6, 7, 3, # top + 4, 5, 1, 1, 0, 4 # bottom + ], dtype=np.uint32) + + + # Create a more complex mesh (a sphere) + # Generate a sphere with 8 segments and 8 rings + segments = 8 + rings = 8 + vertices = [] + indices = [] + + # Generate vertices + for i in range(rings + 1): + v = i / rings + phi = v * np.pi + + for j in range(segments): + u = j / segments + theta = u * 2 * np.pi + + x = np.sin(phi) * np.cos(theta) + y = np.sin(phi) * np.sin(theta) + z = np.cos(phi) + + vertices.append([x, y, z]) + + # Generate indices + for i in range(rings): + for j in range(segments): + a = i * segments + j + b = i * segments + (j + 1) % segments + c = (i + 1) * segments + (j + 1) % segments + d = (i + 1) * segments + j + + # Two triangles per quad + indices.extend([a, b, c]) + indices.extend([a, c, d]) + + self.sphere_vertices = np.array(vertices, dtype=np.float32) + self.sphere_indices = np.array(indices, dtype=np.uint32) + + def test_simplify_basic(self): + """Test basic simplification.""" + # Simplify the mesh + simplified_indices = np.zeros_like(self.indices) + result_error = np.array([0.0], dtype=np.float32) + + new_index_count = simplify( + simplified_indices, + self.indices, + self.vertices, + len(self.indices), + len(self.vertices), + self.vertices.itemsize * self.vertices.shape[1], + len(self.indices) // 2, # Target 50% reduction + 0.01, # Target error + 0, # No options + result_error + ) + + # Check that the number of indices is reduced + self.assertLessEqual(new_index_count, len(self.indices)) + + # Check that the error is reasonable + self.assertGreaterEqual(result_error[0], 0.0) + + def test_simplify_options(self): + """Test simplification with different options.""" + # Test with SIMPLIFY_LOCK_BORDER option + simplified_indices = np.zeros_like(self.indices) + result_error = np.array([0.0], dtype=np.float32) + + new_index_count = simplify( + simplified_indices, + self.indices, + self.vertices, + len(self.indices), + len(self.vertices), + self.vertices.itemsize * self.vertices.shape[1], + len(self.indices) // 2, # Target 50% reduction + 0.01, # Target error + SIMPLIFY_LOCK_BORDER, # Lock border vertices + result_error + ) + + # Check that the number of indices is reduced + self.assertLessEqual(new_index_count, len(self.indices)) + + # Test with SIMPLIFY_SPARSE option + simplified_indices = np.zeros_like(self.indices) + result_error = np.array([0.0], dtype=np.float32) + + new_index_count = simplify( + simplified_indices, + self.indices, + self.vertices, + len(self.indices), + len(self.vertices), + self.vertices.itemsize * self.vertices.shape[1], + len(self.indices) // 2, # Target 50% reduction + 0.01, # Target error + SIMPLIFY_SPARSE, # Sparse simplification + result_error + ) + + # Check that the number of indices is reduced + self.assertLessEqual(new_index_count, len(self.indices)) + + def test_simplify_sloppy(self): + """Test sloppy simplification.""" + # Simplify the mesh (sloppy) + simplified_indices = np.zeros_like(self.sphere_indices) + result_error = np.array([0.0], dtype=np.float32) + + new_index_count = simplify_sloppy( + simplified_indices, + self.sphere_indices, + self.sphere_vertices, + len(self.sphere_indices), + len(self.sphere_vertices), + self.sphere_vertices.itemsize * self.sphere_vertices.shape[1], + len(self.sphere_indices) // 4, # Target 75% reduction + 0.01, # Target error + result_error + ) + + # Check that the number of indices is reduced + self.assertLessEqual(new_index_count, len(self.sphere_indices)) + + # Check that the error is reasonable + self.assertGreaterEqual(result_error[0], 0.0) + + def test_simplify_points(self): + """Test point cloud simplification.""" + # Create a point cloud + points = np.random.rand(100, 3).astype(np.float32) + + # Simplify the point cloud + simplified_points = np.zeros(50, dtype=np.uint32) + + new_point_count = simplify_points( + simplified_points, + points, + None, # No colors + len(points), + points.itemsize * points.shape[1], + 0, # No colors stride + 0.0, # No color weight + 50 # Target 50% reduction + ) + + # Check that the number of points is reduced + self.assertLessEqual(new_point_count, 50) + + # Test with colors + colors = np.random.rand(100, 3).astype(np.float32) + + simplified_points = np.zeros(50, dtype=np.uint32) + + new_point_count = simplify_points( + simplified_points, + points, + colors, + len(points), + points.itemsize * points.shape[1], + colors.itemsize * colors.shape[1], + 1.0, # Equal weight for colors + 50 # Target 50% reduction + ) + + # Check that the number of points is reduced + self.assertLessEqual(new_point_count, 50) + + def test_simplify_scale(self): + """Test simplification scale calculation.""" + # Calculate the scale + scale = simplify_scale( + self.vertices, + len(self.vertices), + self.vertices.itemsize * self.vertices.shape[1] + ) + + # Check that the scale is positive + self.assertGreater(scale, 0.0) + +if __name__ == '__main__': + unittest.main() \ No newline at end of file