custom levels: add draco lib to support compressed glb files (#3723)
Some checks are pending
Build / 🖥️ Windows (push) Waiting to run
Build / 🐧 Linux (push) Waiting to run
Build / 🍎 MacOS (push) Waiting to run
Lint / 📝 Formatting (push) Waiting to run
Lint / 📝 Required Checks (push) Waiting to run
Lint / 📝 Optional Checks (push) Waiting to run

By adding the `draco` library as a dependency, `tinygltf` can support
GLB files compressed with the Draco compression algorithm which allows
for drastically reduced file sizes for custom levels (TFL's Crescent Top
GLB for example went from 135 MB to 37 MB).
This commit is contained in:
Hat Kid 2024-10-28 21:11:19 +01:00 committed by GitHub
parent dff9ac163a
commit 348bf83b89
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
748 changed files with 365465 additions and 1 deletions

View File

@ -283,6 +283,10 @@ include(test/CMakeLists.txt)
build_third_party_lib(lzokay lzokay)
build_third_party_lib(stb_image stb_image)
# build draco library for tinygltf compression support
include_directories(third-party/draco/src)
add_subdirectory(third-party/draco)
add_compile_definitions(TINYGLTF_ENABLE_DRACO)
build_third_party_lib(tiny_gltf tiny_gltf)
build_third_party_lib(xdelta3 xdelta3)

View File

@ -19,4 +19,5 @@ void extract_collide_frags(const level_tools::CollideHash& chash,
const decompiler::DecompilerTypeSystem& dts,
tfrag3::Level& out);
void set_vertices_for_tri(tfrag3::CollisionMesh::Vertex* out, const math::Vector4f* in);
} // namespace decompiler

View File

@ -4,6 +4,7 @@
#include "decompiler/extractor/extractor_util.h"
#include "decompiler/level_extractor/BspHeader.h"
#include "decompiler/level_extractor/extract_collide_frags.h"
#include "decompiler/level_extractor/extract_level.h"
#include "decompiler/level_extractor/extract_merc.h"
#include "goalc/build_level/collide/jak1/collide_bvh.h"
@ -104,6 +105,22 @@ bool run_build_level(const std::string& input_file,
auto& collide_drawable_tree = file.drawable_trees.collides.emplace_back();
collide_drawable_tree.bvh = collide::construct_collide_bvh(mesh_extract_out.collide.faces);
collide_drawable_tree.packed_frags = pack_collide_frags(collide_drawable_tree.bvh.frags.frags);
// for collision renderer
for (auto& face : mesh_extract_out.collide.faces) {
math::Vector4f verts[3];
for (int i = 0; i < 3; i++) {
verts[i].x() = face.v[i].x();
verts[i].y() = face.v[i].y();
verts[i].z() = face.v[i].z();
verts[i].w() = 1.f;
}
tfrag3::CollisionMesh::Vertex out_verts[3];
decompiler::set_vertices_for_tri(out_verts, verts);
for (auto& out : out_verts) {
out.pat = face.pat.val;
pc_level.collision.vertices.push_back(out);
}
}
}
auto sky_name = level_json.value("sky", "none");

5
third-party/draco/.clang-format generated vendored Normal file
View File

@ -0,0 +1,5 @@
---
Language: Cpp
BasedOnStyle: Google
PointerAlignment: Right
...

137
third-party/draco/.cmake-format.py generated vendored Normal file
View File

@ -0,0 +1,137 @@
with section('parse'):
# Specify structure for custom cmake functions
additional_commands = {
'draco_add_emscripten_executable': {
'kwargs': {
'NAME': '*',
'SOURCES': '*',
'OUTPUT_NAME': '*',
'DEFINES': '*',
'INCLUDES': '*',
'COMPILE_FLAGS': '*',
'LINK_FLAGS': '*',
'OBJLIB_DEPS': '*',
'LIB_DEPS': '*',
'GLUE_PATH': '*',
'PRE_LINK_JS_SOURCES': '*',
'POST_LINK_JS_SOURCES': '*',
'FEATURES': '*',
},
'pargs': 0,
},
'draco_add_executable': {
'kwargs': {
'NAME': '*',
'SOURCES': '*',
'OUTPUT_NAME': '*',
'TEST': 0,
'DEFINES': '*',
'INCLUDES': '*',
'COMPILE_FLAGS': '*',
'LINK_FLAGS': '*',
'OBJLIB_DEPS': '*',
'LIB_DEPS': '*',
},
'pargs': 0,
},
'draco_add_library': {
'kwargs': {
'NAME': '*',
'TYPE': '*',
'SOURCES': '*',
'TEST': 0,
'OUTPUT_NAME': '*',
'DEFINES': '*',
'INCLUDES': '*',
'COMPILE_FLAGS': '*',
'LINK_FLAGS': '*',
'OBJLIB_DEPS': '*',
'LIB_DEPS': '*',
'PUBLIC_INCLUDES': '*',
},
'pargs': 0,
},
'draco_generate_emscripten_glue': {
'kwargs': {
'INPUT_IDL': '*',
'OUTPUT_PATH': '*',
},
'pargs': 0,
},
'draco_get_required_emscripten_flags': {
'kwargs': {
'FLAG_LIST_VAR_COMPILER': '*',
'FLAG_LIST_VAR_LINKER': '*',
},
'pargs': 0,
},
'draco_option': {
'kwargs': {
'NAME': '*',
'HELPSTRING': '*',
'VALUE': '*',
},
'pargs': 0,
},
# Rules for built in CMake commands and those from dependencies.
'list': {
'kwargs': {
'APPEND': '*',
'FILTER': '*',
'FIND': '*',
'GET': '*',
'INSERT': '*',
'JOIN': '*',
'LENGTH': '*',
'POP_BACK': '*',
'POP_FRONT': '*',
'PREPEND': '*',
'REMOVE_DUPLICATES': '*',
'REMOVE_ITEM': '*',
'REVERSE': '*',
'SORT': '*',
'SUBLIST': '*',
'TRANSFORM': '*',
},
},
'protobuf_generate': {
'kwargs': {
'IMPORT_DIRS': '*',
'LANGUAGE': '*',
'OUT_VAR': '*',
'PROTOC_OUT_DIR': '*',
'PROTOS': '*',
},
},
}
with section('format'):
# Formatting options.
# How wide to allow formatted cmake files
line_width = 80
# How many spaces to tab for indent
tab_size = 2
# If true, separate flow control names from their parentheses with a space
separate_ctrl_name_with_space = False
# If true, separate function names from parentheses with a space
separate_fn_name_with_space = False
# If a statement is wrapped to more than one line, than dangle the closing
# parenthesis on its own line.
dangle_parens = False
# Do not sort argument lists.
enable_sort = False
# What style line endings to use in the output.
line_ending = 'unix'
# Format command names consistently as 'lower' or 'upper' case
command_case = 'canonical'
# Format keywords consistently as 'lower' or 'upper' case
keyword_case = 'upper'

1
third-party/draco/.gitattributes generated vendored Normal file
View File

@ -0,0 +1 @@
*.obj eol=lf

13
third-party/draco/.github/dependabot.yml generated vendored Normal file
View File

@ -0,0 +1,13 @@
# https://docs.github.com/code-security/dependabot/dependabot-version-updates/configuration-options-for-the-dependabot.yml-file
version: 2
updates:
- package-ecosystem: "bundler"
directory: "/docs"
schedule:
interval: "monthly"
groups:
doc-gems-security:
applies-to: "security-updates"
patterns:
- "*"

318
third-party/draco/.github/workflows/ci.yml generated vendored Normal file
View File

@ -0,0 +1,318 @@
on:
pull_request:
# Run on all pull requests.
push:
# Run on merges/pushes to main.
branches:
- main
schedule:
# Run nightly, at midnight.
- cron: '8 0 * * *'
name: draco-ci
jobs:
# Main build and test job.
draco-tests:
strategy:
matrix:
include:
- test_name: macos-make-release-shared
os: macos-latest
cmake_configure_command: |-
cmake .. -G "Unix Makefiles" \
-DBUILD_SHARED_LIBS=ON \
-DCMAKE_BUILD_TYPE=Release \
-DDRACO_TESTS=ON
cmake_build_command: cmake --build . -- -j2
draco_test_command: ./draco_tests
- test_name: macos-make-release-shared-with-transcoder
os: macos-latest
cmake_configure_command: |-
cmake .. -G "Unix Makefiles" \
-DBUILD_SHARED_LIBS=ON \
-DCMAKE_BUILD_TYPE=Release \
-DDRACO_TESTS=ON \
-DDRACO_TRANSCODER_SUPPORTED=ON
cmake_build_command: cmake --build . -- -j2
draco_test_command: ./draco_tests
- test_name: macos-make-release-static
os: macos-latest
cmake_configure_command: |-
cmake .. -G "Unix Makefiles" \
-DBUILD_SHARED_LIBS=OFF \
-DCMAKE_BUILD_TYPE=Release \
-DDRACO_TESTS=ON
cmake_build_command: cmake --build . -- -j2
draco_test_command: ./draco_tests
- test_name: macos-make-release-static-with-transcoder
os: macos-latest
cmake_configure_command: |-
cmake .. -G "Unix Makefiles" \
-DBUILD_SHARED_LIBS=OFF \
-DCMAKE_BUILD_TYPE=Release \
-DDRACO_TESTS=ON \
-DDRACO_TRANSCODER_SUPPORTED=ON
cmake_build_command: cmake --build . -- -j2
draco_test_command: ./draco_tests
- test_name: macos-xcode-release-shared
os: macos-latest
cmake_configure_command: |-
cmake .. -G Xcode \
-DBUILD_SHARED_LIBS=ON \
-DCMAKE_CONFIGURATION_TYPES=Release \
-DDRACO_TESTS=ON
cmake_build_command: cmake --build . --config Release
draco_test_command: Release/draco_tests
- test_name: macos-xcode-release-shared-with-transcoder
os: macos-latest
cmake_configure_command: |-
cmake .. -G Xcode \
-DBUILD_SHARED_LIBS=ON \
-DCMAKE_CONFIGURATION_TYPES=Release \
-DDRACO_TESTS=ON \
-DDRACO_TRANSCODER_SUPPORTED=ON
cmake_build_command: cmake --build . --config Release
draco_test_command: Release/draco_tests
- test_name: macos-xcode-release-static
os: macos-latest
cmake_configure_command: |-
cmake .. -G Xcode \
-DBUILD_SHARED_LIBS=OFF \
-DCMAKE_CONFIGURATION_TYPES=Release \
-DDRACO_TESTS=ON
cmake_build_command: cmake --build . --config Release
draco_test_command: Release/draco_tests
- test_name: macos-xcode-release-static-with-transcoder
os: macos-latest
cmake_configure_command: |-
cmake .. -G Xcode \
-DBUILD_SHARED_LIBS=OFF \
-DCMAKE_CONFIGURATION_TYPES=Release \
-DDRACO_TESTS=ON \
-DDRACO_TRANSCODER_SUPPORTED=ON
cmake_build_command: cmake --build . --config Release
draco_test_command: Release/draco_tests
- test_name: ubuntu-make-release-shared
os: ubuntu-latest
cmake_configure_command: |-
cmake .. -G "Unix Makefiles" \
-DBUILD_SHARED_LIBS=ON \
-DCMAKE_BUILD_TYPE=Release \
-DCMAKE_C_COMPILER=gcc-10 \
-DCMAKE_CXX_COMPILER=g++-10 \
-DDRACO_TESTS=ON
cmake_build_command: cmake --build . -- -j2
draco_test_command: ./draco_tests
- test_name: ubuntu-make-release-shared-with-transcoder
os: ubuntu-latest
cmake_configure_command: |-
cmake .. -G "Unix Makefiles" \
-DBUILD_SHARED_LIBS=ON \
-DCMAKE_BUILD_TYPE=Release \
-DCMAKE_C_COMPILER=gcc-10 \
-DCMAKE_CXX_COMPILER=g++-10 \
-DDRACO_TESTS=ON \
-DDRACO_TRANSCODER_SUPPORTED=ON
cmake_build_command: cmake --build . -- -j2
draco_test_command: ./draco_tests
- test_name: ubuntu-make-release-static
os: ubuntu-latest
cmake_configure_command: |-
cmake .. -G "Unix Makefiles" \
-DBUILD_SHARED_LIBS=OFF \
-DCMAKE_BUILD_TYPE=Release \
-DCMAKE_C_COMPILER=gcc-10 \
-DCMAKE_CXX_COMPILER=g++-10 \
-DDRACO_TESTS=ON
cmake_build_command: cmake --build . -- -j2
draco_test_command: ./draco_tests
- test_name: ubuntu-make-release-static-with-transcoder
os: ubuntu-latest
cmake_configure_command: |-
cmake .. -G "Unix Makefiles" \
-DBUILD_SHARED_LIBS=OFF \
-DCMAKE_BUILD_TYPE=Release \
-DCMAKE_C_COMPILER=gcc-10 \
-DCMAKE_CXX_COMPILER=g++-10 \
-DDRACO_TESTS=ON \
-DDRACO_TRANSCODER_SUPPORTED=ON
cmake_build_command: cmake --build . -- -j2
draco_test_command: ./draco_tests
- test_name: windows-msvc-release-shared
os: windows-2019
cmake_configure_command: |-
cmake .. -G "Visual Studio 16 2019" \
-DBUILD_SHARED_LIBS=ON \
-DCMAKE_CONFIGURATION_TYPES=Release \
-DDRACO_TESTS=ON
cmake_build_command: cmake --build . --config Release -- -m:2
draco_test_command: Release/draco_tests
- test_name: windows-msvc-release-shared-with-transcoder
os: windows-2019
cmake_configure_command: |-
cmake .. -G "Visual Studio 16 2019" \
-DBUILD_SHARED_LIBS=ON \
-DCMAKE_CONFIGURATION_TYPES=Release \
-DDRACO_TESTS=ON \
-DDRACO_TRANSCODER_SUPPORTED=ON
cmake_build_command: cmake --build . --config Release -- -m:2
draco_test_command: Release/draco_tests
- test_name: windows-msvc-release-static
os: windows-2019
cmake_configure_command: |-
cmake .. -G "Visual Studio 16 2019" \
-DBUILD_SHARED_LIBS=OFF \
-DCMAKE_CONFIGURATION_TYPES=Release \
-DDRACO_TESTS=ON
cmake_build_command: cmake --build . --config Release -- -m:2
draco_test_command: Release/draco_tests
- test_name: windows-msvc-release-static-with-transcoder
os: windows-2019
cmake_configure_command: |-
cmake .. -G "Visual Studio 16 2019" \
-DBUILD_SHARED_LIBS=OFF \
-DCMAKE_CONFIGURATION_TYPES=Release \
-DDRACO_TESTS=ON \
-DDRACO_TRANSCODER_SUPPORTED=ON
cmake_build_command: cmake --build . --config Release -- -m:2
draco_test_command: Release/draco_tests
- test_name: windows-make-release-shared
os: windows-2019
cmake_configure_command: |-
cmake .. -G "MinGW Makefiles" \
-DBUILD_SHARED_LIBS=ON \
-DCMAKE_BUILD_TYPE=Release \
-DCMAKE_C_COMPILER=x86_64-w64-mingw32-gcc \
-DCMAKE_CXX_COMPILER=x86_64-w64-mingw32-g++ \
-DDRACO_TESTS=ON
cmake_build_command: cmake --build . -- -j2
draco_test_command: ./draco_tests
- test_name: windows-make-release-shared-with-transcoder
os: windows-2019
cmake_configure_command: |-
cmake .. -G "MinGW Makefiles" \
-DBUILD_SHARED_LIBS=ON \
-DCMAKE_BUILD_TYPE=Release \
-DCMAKE_C_COMPILER=x86_64-w64-mingw32-gcc \
-DCMAKE_CXX_COMPILER=x86_64-w64-mingw32-g++ \
-DDRACO_TESTS=ON \
-DDRACO_TRANSCODER_SUPPORTED=ON
cmake_build_command: cmake --build . -- -j2
draco_test_command: ./draco_tests
- test_name: windows-make-release-static
os: windows-2019
cmake_configure_command: |-
cmake .. -G "MinGW Makefiles" \
-DBUILD_SHARED_LIBS=OFF \
-DCMAKE_BUILD_TYPE=Release \
-DCMAKE_C_COMPILER=x86_64-w64-mingw32-gcc \
-DCMAKE_CXX_COMPILER=x86_64-w64-mingw32-g++ \
-DDRACO_TESTS=ON
cmake_build_command: cmake --build . -- -j2
draco_test_command: ./draco_tests
- test_name: windows-make-release-static-with-transcoder
os: windows-2019
cmake_configure_command: |-
cmake .. -G "MinGW Makefiles" \
-DBUILD_SHARED_LIBS=OFF \
-DCMAKE_BUILD_TYPE=Release \
-DCMAKE_C_COMPILER=x86_64-w64-mingw32-gcc \
-DCMAKE_CXX_COMPILER=x86_64-w64-mingw32-g++ \
-DDRACO_TESTS=ON \
-DDRACO_TRANSCODER_SUPPORTED=ON
cmake_build_command: cmake --build . -- -j2
draco_test_command: ./draco_tests
name: test-${{ matrix.test_name }}
runs-on: ${{ matrix.os }}
steps:
- name: Clone Draco with Submodules.
uses: actions/checkout@v2
with:
submodules: true
- name: Create build directory
shell: bash
run: mkdir _gh_build
- name: Configure CMake build
shell: bash
run: ${{ matrix.cmake_configure_command }}
working-directory: ./_gh_build
- name: Build with CMake
shell: bash
run: ${{ matrix.cmake_build_command }}
working-directory: ./_gh_build
- name: Run tests
shell: bash
run: ${{ matrix.draco_test_command }}
working-directory: ./_gh_build
# Runs src/draco/tools/install_test.
draco-install-tests:
strategy:
matrix:
include:
- test_name: ubuntu-make
os: ubuntu-latest
test_command: python3 test.py -v -G "Unix Makefiles"
- test_name: ubuntu-make-with-transcoder
os: ubuntu-latest
test_command: python3 test.py -v -t -G "Unix Makefiles"
- test_name: macos-make
os: macos-latest
test_command: python3 test.py -v -G "Unix Makefiles"
- test_name: macos-make-with-transcoder
os: macos-latest
test_command: python3 test.py -v -t -G "Unix Makefiles"
- test_name: macos-xcode
os: macos-latest
test_command: python3 test.py -v -G Xcode
- test_name: macos-xcode-with-transcoder
os: macos-latest
test_command: python3 test.py -v -t -G Xcode
- test_name: windows-make
os: windows-2019
test_command: python3 test.py -v -G "MinGW Makefiles"
- test_name: windows-make-with-transcoder
os: windows-2019
test_command: python3 test.py -v -t -G "MinGW Makefiles"
- test_name: windows-msvc
os: windows-2019
test_command: python3 test.py -v -G "Visual Studio 16 2019"
- test_name: windows-msvc-with-transcoder
os: windows-2019
test_command: python3 test.py -v -t -G "Visual Studio 16 2019"
name: install-test-${{ matrix.test_name }}
runs-on: ${{ matrix.os }}
steps:
- name: Clone Draco with Submodules
uses: actions/checkout@v2
with:
submodules: true
- name: Run src/draco/tools/install_test/test.py
shell: bash
run: ${{ matrix.test_command }}
working-directory: ./src/draco/tools/install_test

2
third-party/draco/.gitignore generated vendored Normal file
View File

@ -0,0 +1,2 @@
docs/_site
src/draco/draco_features.h

12
third-party/draco/.gitmodules generated vendored Normal file
View File

@ -0,0 +1,12 @@
[submodule "third_party/googletest"]
path = third_party/googletest
url = https://github.com/google/googletest.git
[submodule "third_party/eigen"]
path = third_party/eigen
url = https://gitlab.com/libeigen/eigen.git
[submodule "third_party/tinygltf"]
path = third_party/tinygltf
url = https://github.com/syoyo/tinygltf.git
[submodule "third_party/filesystem"]
path = third_party/filesystem
url = https://github.com/gulrak/filesystem

7
third-party/draco/AUTHORS generated vendored Normal file
View File

@ -0,0 +1,7 @@
# This is the list of Draco authors for copyright purposes.
#
# This does not necessarily list everyone who has contributed code, since in
# some cases, their employer may be the copyright holder. To see the full list
# of contributors, see the revision history in source control.
Google Inc.
and other contributors

375
third-party/draco/BUILDING.md generated vendored Normal file
View File

@ -0,0 +1,375 @@
_**Contents**_
* [CMake Basics](#cmake-basics)
* [Mac OS X](#mac-os-x)
* [Windows](#windows)
* [CMake Build Configuration](#cmake-build-configuration)
* [Transcoder](#transcoder)
* [Debugging and Optimization](#debugging-and-optimization)
* [Googletest Integration](#googletest-integration)
* [Third Party Libraries](#third-party-libraries)
* [Javascript Encoder/Decoder](#javascript-encoderdecoder)
* [WebAssembly Decoder](#webassembly-decoder)
* [WebAssembly Mesh Only Decoder](#webassembly-mesh-only-decoder)
* [WebAssembly Point Cloud Only Decoder](#webassembly-point-cloud-only-decoder)
* [iOS Builds](#ios-builds)
* [Android Studio Project Integration](#android-studio-project-integration)
* [Native Android Builds](#native-android-builds)
* [vcpkg](#vcpkg)
Building
========
For all platforms, you must first generate the project/make files and then
compile the examples.
CMake Basics
------------
To generate project/make files for the default toolchain on your system, run
`cmake` from a directory where you would like to generate build files, and pass
it the path to your Draco repository.
E.g. Starting from Draco root.
~~~~~ bash
$ mkdir build_dir && cd build_dir
$ cmake ../
~~~~~
On Windows, the above command will produce Visual Studio project files for the
newest Visual Studio detected on the system. On Mac OS X and Linux systems,
the above command will produce a `makefile`.
To control what types of projects are generated, add the `-G` parameter to the
`cmake` command. This argument must be followed by the name of a generator.
Running `cmake` with the `--help` argument will list the available
generators for your system.
Mac OS X
---------
On Mac OS X, run the following command to generate Xcode projects:
~~~~~ bash
$ cmake ../ -G Xcode
~~~~~
Windows
-------
On a Windows box you would run the following command to generate Visual Studio
2019 projects:
~~~~~ bash
C:\Users\nobody> cmake ../ -G "Visual Studio 16 2019" -A Win32
~~~~~
To generate 64-bit Windows Visual Studio 2019 projects:
~~~~~ bash
C:\Users\nobody> cmake ../ -G "Visual Studio 16 2019" -A x64
~~~~~
CMake Build Configuration
-------------------------
Transcoder
----------
Before attempting to build Draco with transcoding support you must run an
additional Git command to obtain the submodules:
~~~~~ bash
# Run this command from within your Draco clone.
$ git submodule update --init
# See below if you prefer to use existing versions of Draco dependencies.
~~~~~
In order to build the `draco_transcoder` target, the transcoding support needs
to be explicitly enabled when you run `cmake`, for example:
~~~~~ bash
$ cmake ../ -DDRACO_TRANSCODER_SUPPORTED=ON
~~~~~
The above option is currently not compatible with our Javascript or WebAssembly
builds but all other use cases are supported. Note that binaries and libraries
built with the transcoder support may result in increased binary sizes of the
produced libraries and executables compared to the default CMake settings.
The following CMake variables can be used to configure Draco to use local
copies of third party dependencies instead of git submodules.
- `DRACO_EIGEN_PATH`: this path must contain an Eigen directory that includes
the Eigen sources.
- `DRACO_FILESYSTEM_PATH`: this path must contain the ghc directory where the
filesystem includes are located.
- `DRACO_TINYGLTF_PATH`: this path must contain tiny_gltf.h and its
dependencies.
When not specified the Draco build requires the presence of the submodules that
are stored within `draco/third_party`.
Debugging and Optimization
--------------------------
Unlike Visual Studio and Xcode projects, the build configuration for make
builds is controlled when you run `cmake`. The following examples demonstrate
various build configurations.
Omitting the build type produces makefiles that use release build flags
by default:
~~~~~ bash
$ cmake ../
~~~~~
A makefile using release (optimized) flags is produced like this:
~~~~~ bash
$ cmake ../ -DCMAKE_BUILD_TYPE=Release
~~~~~
A release build with debug info can be produced as well:
~~~~~ bash
$ cmake ../ -DCMAKE_BUILD_TYPE=RelWithDebInfo
~~~~~
And your standard debug build will be produced using:
~~~~~ bash
$ cmake ../ -DCMAKE_BUILD_TYPE=Debug
~~~~~
To enable the use of sanitizers when the compiler in use supports them, set the
sanitizer type when running CMake:
~~~~~ bash
$ cmake ../ -DDRACO_SANITIZE=address
~~~~~
Googletest Integration
----------------------
Draco includes testing support built using Googletest. The Googletest repository
is included as a submodule of the Draco git repository. Run the following
command to clone the Googletest repository:
~~~~~ bash
$ git submodule update --init
~~~~~
To enable Googletest unit test support the DRACO_TESTS cmake variable must be
turned on at cmake generation time:
~~~~~ bash
$ cmake ../ -DDRACO_TESTS=ON
~~~~~
To run the tests execute `draco_tests` from your build output directory:
~~~~~ bash
$ ./draco_tests
~~~~~
Draco can be configured to use a local Googletest installation. The
`DRACO_GOOGLETEST_PATH` variable overrides the behavior described above and
configures Draco to use the Googletest at the specified path.
Third Party Libraries
---------------------
When Draco is built with transcoding and/or testing support enabled the project
has dependencies on third party libraries:
- [Eigen](https://eigen.tuxfamily.org/)
- Provides various math utilites.
- [Googletest](https://github.com/google/googletest)
- Provides testing support.
- [Gulrak/filesystem](https://github.com/gulrak/filesystem)
- Provides C++17 std::filesystem emulation for pre-C++17 environments.
- [TinyGLTF](https://github.com/syoyo/tinygltf)
- Provides GLTF I/O support.
These dependencies are managed as Git submodules. To obtain the dependencies
run the following command in your Draco repository:
~~~~~ bash
$ git submodule update --init
~~~~~
WebAssembly Decoder
-------------------
The WebAssembly decoder can be built using the existing cmake build file by
passing the path the Emscripten's cmake toolchain file at cmake generation time
in the CMAKE_TOOLCHAIN_FILE variable and enabling the WASM build option.
In addition, the EMSCRIPTEN environment variable must be set to the local path
of the parent directory of the Emscripten tools directory.
~~~~~ bash
# Make the path to emscripten available to cmake.
$ export EMSCRIPTEN=/path/to/emscripten/tools/parent
# Emscripten.cmake can be found within your Emscripten installation directory,
# it should be the subdir: cmake/Modules/Platform/Emscripten.cmake
$ cmake ../ -DCMAKE_TOOLCHAIN_FILE=/path/to/Emscripten.cmake -DDRACO_WASM=ON
# Build the WebAssembly decoder.
$ make
# Run the Javascript wrapper through Closure.
$ java -jar closure.jar --compilation_level SIMPLE --js draco_decoder.js --js_output_file draco_wasm_wrapper.js
~~~~~
WebAssembly Mesh Only Decoder
-----------------------------
~~~~~ bash
# cmake command line for mesh only WebAssembly decoder.
$ cmake ../ -DCMAKE_TOOLCHAIN_FILE=/path/to/Emscripten.cmake -DDRACO_WASM=ON -DDRACO_POINT_CLOUD_COMPRESSION=OFF
~~~~~
WebAssembly Point Cloud Only Decoder
-----------------------------
~~~~~ bash
# cmake command line for point cloud only WebAssembly decoder.
$ cmake ../ -DCMAKE_TOOLCHAIN_FILE=/path/to/Emscripten.cmake -DDRACO_WASM=ON -DDRACO_MESH_COMPRESSION=OFF
~~~~~
Javascript Encoder/Decoder
------------------
The javascript encoder and decoder can be built using the existing cmake build
file by passing the path the Emscripten's cmake toolchain file at cmake
generation time in the CMAKE_TOOLCHAIN_FILE variable.
In addition, the EMSCRIPTEN environment variable must be set to the local path
of the parent directory of the Emscripten tools directory.
*Note* The WebAssembly decoder should be favored over the JavaScript decoder.
~~~~~ bash
# Make the path to emscripten available to cmake.
$ export EMSCRIPTEN=/path/to/emscripten/tools/parent
# Emscripten.cmake can be found within your Emscripten installation directory,
# it should be the subdir: cmake/Modules/Platform/Emscripten.cmake
$ cmake ../ -DCMAKE_TOOLCHAIN_FILE=/path/to/Emscripten.cmake
# Build the Javascript encoder and decoder.
$ make
~~~~~
iOS Builds
---------------------
These are the basic commands needed to build Draco for iOS targets.
~~~~~ bash
#arm64
$ cmake ../ -DCMAKE_TOOLCHAIN_FILE=../cmake/toolchains/arm64-ios.cmake
$ make
#x86_64
$ cmake ../ -DCMAKE_TOOLCHAIN_FILE=../cmake/toolchains/x86_64-ios.cmake
$ make
#armv7
$ cmake ../ -DCMAKE_TOOLCHAIN_FILE=../cmake/toolchains/armv7-ios.cmake
$ make
#i386
$ cmake ../ -DCMAKE_TOOLCHAIN_FILE=../cmake/toolchains/i386-ios.cmake
$ make
~~~~~~
After building for each target the libraries can be merged into a single
universal/fat library using lipo, and then used in iOS applications.
Native Android Builds
---------------------
It's sometimes useful to build Draco command line tools and run them directly on
Android devices via adb.
~~~~~ bash
# This example is for armeabi-v7a.
$ cmake ../ -DCMAKE_TOOLCHAIN_FILE=../cmake/toolchains/android.cmake \
-DDRACO_ANDROID_NDK_PATH=path/to/ndk -DANDROID_ABI=armeabi-v7a
$ make
# See the android.cmake toolchain file for additional ANDROID_ABI options and
# other configurable Android variables.
~~~~~
After building the tools they can be moved to an android device via the use of
`adb push`, and then run within an `adb shell` instance.
Android Studio Project Integration
----------------------------------
Tested on Android Studio 3.5.3.
Draco - Static Library
----------------------
To include Draco in an existing or new Android Studio project, reference it
from the `cmake` file of an existing native project that has a minimum SDK
version of 18 or higher. The project must support C++11.
To add Draco to your project:
1. Create a new "Native C++" project.
2. Add the following somewhere within the `CMakeLists.txt` for your project
before the `add_library()` for your project's native-lib:
~~~~~ cmake
# Note "/path/to/draco" must be changed to the path where you have cloned
# the Draco sources.
add_subdirectory(/path/to/draco
${CMAKE_BINARY_DIR}/draco_build)
include_directories("${CMAKE_BINARY_DIR}" /path/to/draco)
~~~~~
3. Add the library target "draco" to the `target_link_libraries()` call for
your project's native-lib. The `target_link_libraries()` call for an
empty activity native project looks like this after the addition of
Draco:
~~~~~ cmake
target_link_libraries( # Specifies the target library.
native-lib
# Tells cmake this build depends on libdraco.
draco
# Links the target library to the log library
# included in the NDK.
${log-lib} )
vcpkg
---------------------
You can download and install Draco using the
[vcpkg](https://github.com/Microsoft/vcpkg/) dependency manager:
git clone https://github.com/Microsoft/vcpkg.git
cd vcpkg
./bootstrap-vcpkg.sh
./vcpkg integrate install
vcpkg install draco
The Draco port in vcpkg is kept up to date by Microsoft team members and
community contributors. If the version is out of date, please
[create an issue or pull request](https://github.com/Microsoft/vcpkg) on the
vcpkg repository.

106
third-party/draco/CMAKE.md generated vendored Normal file
View File

@ -0,0 +1,106 @@
# CMake Build System Overview
[TOC]
This document provides a general layout of the Draco CMake build system.
## Core Build System Files
These files are listed in order of interest to maintainers of the build system.
- `CMakeLists.txt` is the main driver of the build system. It's responsible
for defining targets and source lists, surfacing build system options, and
tying the components of the build system together.
- `cmake/draco_build_definitions.cmake` defines the macro
`draco_set_build_definitions()`, which is called from `CMakeLists.txt` to
configure include paths, compiler and linker flags, library settings,
platform speficic configuration, and other build system settings that
depend on optional build configurations.
- `cmake/draco_targets.cmake` defines the macros `draco_add_library()` and
`draco_add_executable()` which are used to create all targets in the CMake
build. These macros attempt to behave in a manner that loosely mirrors the
blaze `cc_library()` and `cc_binary()` commands. Note that
`draco_add_executable()` is also used for tests.
- `cmake/draco_emscripten.cmake` handles Emscripten SDK integration. It
defines several Emscripten specific macros that are required to build the
Emscripten specific targets defined in `CMakeLists.txt`.
- `cmake/draco_flags.cmake` defines macros related to compiler and linker
flags. Testing macros, macros for isolating flags to specific source files,
and the main flag configuration function for the library are defined here.
- `cmake/draco_options.cmake` defines macros that control optional features
of draco, and help track draco library and build system options.
- `cmake/draco_install.cmake` defines the draco install target.
- `cmake/draco_cpu_detection.cmake` determines the optimization types to
enable based on target system processor as reported by CMake.
- `cmake/draco_intrinsics.cmake` manages flags for source files that use
intrinsics. It handles detection of whether flags are necessary, and the
application of the flags to the sources that need them when they are
required.
## Helper and Utility Files
- `.cmake-format.py` Defines coding style for cmake-format.
- `cmake/draco_helpers.cmake` defines utility macros.
- `cmake/draco_sanitizer.cmake` defines the `draco_configure_sanitizer()`
macro, which implements support for `DRACO_SANITIZE`. It handles the
compiler and linker flags necessary for using sanitizers like asan and msan.
- `cmake/draco_variables.cmake` defines macros for tracking and control of
draco build system variables.
## Toolchain Files
These files help facilitate cross compiling of draco for various targets.
- `cmake/toolchains/aarch64-linux-gnu.cmake` provides cross compilation
support for arm64 targets.
- `cmake/toolchains/android.cmake` provides cross compilation support for
Android targets.
- `cmake/toolchains/arm-linux-gnueabihf.cmake` provides cross compilation
support for armv7 targets.
- `cmake/toolchains/arm64-ios.cmake`, `cmake/toolchains/armv7-ios.cmake`,
and `cmake/toolchains/armv7s-ios.cmake` provide support for iOS.
- `cmake/toolchains/arm64-linux-gcc.cmake` and
`cmake/toolchains/armv7-linux-gcc.cmake` are deprecated, but remain for
compatibility. `cmake/toolchains/android.cmake` should be used instead.
- `cmake/toolchains/arm64-android-ndk-libcpp.cmake`,
`cmake/toolchains/armv7-android-ndk-libcpp.cmake`,
`cmake/toolchains/x86-android-ndk-libcpp.cmake`, and
`cmake/toolchains/x86_64-android-ndk-libcpp.cmake` are deprecated, but
remain for compatibility. `cmake/toolchains/android.cmake` should be used
instead.
- `cmake/toolchains/i386-ios.cmake` and `cmake/toolchains/x86_64-ios.cmake`
provide support for the iOS simulator.
- `cmake/toolchains/android-ndk-common.cmake` and
`cmake/toolchains/arm-ios-common.cmake` are support files used by other
toolchain files.
## Template Files
These files are inputs to the CMake build and are used to generate inputs to the
build system output by CMake.
- `cmake/draco-config.cmake.template` is used to produce
draco-config.cmake. draco-config.cmake can be used by CMake to find draco
when another CMake project depends on draco.
- `cmake/draco.pc.template` is used to produce draco's pkg-config file.
Some build systems use pkg-config to configure include and library paths
when they depend upon third party libraries like draco.

1156
third-party/draco/CMakeLists.txt generated vendored Normal file

File diff suppressed because it is too large Load Diff

27
third-party/draco/CONTRIBUTING.md generated vendored Normal file
View File

@ -0,0 +1,27 @@
Want to contribute? Great! First, read this page (including the small print at the end).
### Before you contribute
Before we can use your code, you must sign the
[Google Individual Contributor License Agreement](https://cla.developers.google.com/about/google-individual)
(CLA), which you can do online. The CLA is necessary mainly because you own the
copyright to your changes, even after your contribution becomes part of our
codebase, so we need your permission to use and distribute your code. We also
need to be sure of various other things—for instance that you'll tell us if you
know that your code infringes on other people's patents. You don't have to sign
the CLA until after you've submitted your code for review and a member has
approved it, but you must do it before we can put your code into our codebase.
Before you start working on a larger contribution, you should get in touch with
us first through the issue tracker with your idea so that we can help out and
possibly guide you. Coordinating up front makes it much easier to avoid
frustration later on.
### Code reviews
All submissions, including submissions by project members, require review. We
use GitHub pull requests for this purpose.
Please make sure that your code conforms with our
[coding style guidelines](https://google.github.io/styleguide/cppguide.html).
### The small print
Contributions made by corporations are covered by a different agreement than
the one above, the
[Software Grant and Corporate Contributor License Agreement](https://cla.developers.google.com/about/google-corporate).

252
third-party/draco/LICENSE generated vendored Normal file
View File

@ -0,0 +1,252 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
--------------------------------------------------------------------------------
Files: docs/assets/js/ASCIIMathML.js
Copyright (c) 2014 Peter Jipsen and other ASCIIMathML.js contributors
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
--------------------------------------------------------------------------------
Files: docs/assets/css/pygments/*
This is free and unencumbered software released into the public domain.
Anyone is free to copy, modify, publish, use, compile, sell, or
distribute this software, either in source code form or as a compiled
binary, for any purpose, commercial or non-commercial, and by any
means.
In jurisdictions that recognize copyright laws, the author or authors
of this software dedicate any and all copyright interest in the
software to the public domain. We make this dedication for the benefit
of the public at large and to the detriment of our heirs and
successors. We intend this dedication to be an overt act of
relinquishment in perpetuity of all present and future rights to this
software under copyright law.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
For more information, please refer to <http://unlicense.org>

616
third-party/draco/README.md generated vendored Normal file
View File

@ -0,0 +1,616 @@
<p align="center">
<img width="350px" src="docs/artwork/draco3d-vert.svg" />
</p>
[![draco-ci](https://github.com/google/draco/workflows/draco-ci/badge.svg?branch=main)](https://github.com/google/draco/actions/workflows/ci.yml)
News
=======
Attention GStatic users: the Draco team strongly recommends using the versioned
URLs for accessing Draco GStatic content. If you are using the URLs that include
the `v1/decoders` substring within the URL, edge caching and GStatic propagation
delays can result in transient errors that can be difficult to diagnose when
new Draco releases are launched. To avoid the issue pin your sites to a
versioned release.
### Version 1.5.7 release:
* Using the versioned www.gstatic.com WASM and Javascript decoders continues
to be recommended. To use v1.5.7, use this URL:
* https://www.gstatic.com/draco/versioned/decoders/1.5.7/*
* Added support for normalized attributes to Emscripten encoder API.
* Bug fixes.
* Security fixes.
### Version 1.5.6 release:
* Using the versioned www.gstatic.com WASM and Javascript decoders continues
to be recommended. To use v1.5.6, use this URL:
* https://www.gstatic.com/draco/versioned/decoders/1.5.6/*
* The CMake flag DRACO_DEBUG_MSVC_WARNINGS has been replaced with
DRACO_DEBUG_COMPILER_WARNINGS, and the behavior has changed. It is now a
boolean flag defined in draco_options.cmake.
* Bug fixes.
* Security fixes.
### Version 1.5.5 release:
* Using the versioned www.gstatic.com WASM and Javascript decoders continues
to be recommended. To use v1.5.5, use this URL:
* https://www.gstatic.com/draco/versioned/decoders/1.5.5/*
* Bug fix: https://github.com/google/draco/issues/935
### Version 1.5.4 release:
* Using the versioned www.gstatic.com WASM and Javascript decoders continues
to be recommended. To use v1.5.4, use this URL:
* https://www.gstatic.com/draco/versioned/decoders/1.5.4/*
* Added partial support for glTF extensions EXT_mesh_features and
EXT_structural_metadata.
* Bug fixes.
* Security fixes.
### Version 1.5.3 release:
* Using the versioned www.gstatic.com WASM and Javascript decoders continues
to be recommended. To use v1.5.3, use this URL:
* https://www.gstatic.com/draco/versioned/decoders/1.5.3/*
* Bug fixes.
### Version 1.5.2 release
* This is the same as v1.5.1 with the following two bug fixes:
* Fixes DRACO_TRANSCODER_SUPPORTED enabled builds.
* ABI version updated.
### Version 1.5.1 release
* Adds assertion enabled Emscripten builds to the release, and a subset of the
assertion enabled builds to GStatic. See the file listing below.
* Custom paths to third party dependencies are now supported. See BUILDING.md
for more information.
* The CMake configuration file draco-config.cmake is now tested and known to
work for using Draco in Linux, MacOS, and Windows CMake projects. See the
`install_test` subdirectory of `src/draco/tools` for more information.
* Bug fixes.
### Version 1.5.0 release
* Adds the draco_transcoder tool. See the section below on the glTF transcoding
tool, and BUILDING.md for build and dependency information.
* Some changes to configuration variables have been made for this release:
- The DRACO_GLTF flag has been renamed to DRACO_GLTF_BITSTREAM to help
increase understanding of its purpose, which is to limit Draco features to
those included in the Draco glTF specification.
- Variables exported in CMake via draco-config.cmake and find-draco.cmake
(formerly FindDraco.cmake) have been renamed. It's unlikely that this
impacts any existing projects as the aforementioned files were not formed
correctly. See [PR775](https://github.com/google/draco/pull/775) for full
details of the changes.
* A CMake version file has been added.
* The CMake install target now uses absolute paths direct from CMake instead
of building them using CMAKE_INSTALL_PREFIX. This was done to make Draco
easier to use for downstream packagers and should have little to no impact on
users picking up Draco from source.
* Certain MSVC warnings have had their levels changed via compiler flag to
reduce the amount of noise output by the MSVC compilers. Set MSVC warning
level to 4, or define DRACO_DEBUG_MSVC_WARNINGS at CMake configuration time
to restore previous behavior.
* Bug fixes.
### Version 1.4.3 release
* Using the versioned www.gstatic.com WASM and Javascript decoders continues
to be recommended. To use v1.4.3, use this URL:
* https://www.gstatic.com/draco/versioned/decoders/1.4.3/*
* Bug fixes
### Version 1.4.1 release
* Using the versioned www.gstatic.com WASM and Javascript decoders is now
recommended. To use v1.4.1, use this URL:
* https://www.gstatic.com/draco/versioned/decoders/1.4.1/*
* Replace the * with the files to load. E.g.
* https://www.gstatic.com/draco/versioned/decoders/1.4.1/draco_decoder.js
* This works with the v1.3.6 and v1.4.0 releases, and will work with future
Draco releases.
* Bug fixes
### Version 1.4.0 release
* WASM and JavaScript decoders are hosted from a static URL.
* It is recommended to always pull your Draco WASM and JavaScript decoders from this URL:
* https://www.gstatic.com/draco/v1/decoders/*
* Replace * with the files to load. E.g.
* https://www.gstatic.com/draco/v1/decoders/draco_decoder_gltf.wasm
* Users will benefit from having the Draco decoder in cache as more sites start using the static URL
* Changed npm modules to use WASM, which increased performance by ~200%.
* Updated Emscripten to 2.0.
* This causes the Draco codec modules to return a promise instead of the module directly.
* Please see the example code on how to handle the promise.
* Changed NORMAL quantization default to 8.
* Added new array API to decoder and deprecated DecoderBuffer.
* See PR https://github.com/google/draco/issues/513 for more information.
* Changed WASM/JavaScript behavior of catching exceptions.
* See issue https://github.com/google/draco/issues/629 for more information.
* Code cleanup.
* Emscripten builds now disable NODEJS_CATCH_EXIT and NODEJS_CATCH_REJECTION.
* Authors of a CLI tool might want to add their own error handlers.
* Added Maya plugin builds.
* Unity plugin builds updated.
* Builds are now stored as archives.
* Added iOS build.
* Unity users may want to look into https://github.com/atteneder/DracoUnity.
* Bug fixes.
### Version 1.3.6 release
* WASM and JavaScript decoders are now hosted from a static URL
* It is recommended to always pull your Draco WASM and JavaScript decoders from this URL:
* https://www.gstatic.com/draco/v1/decoders/*
* Replace * with the files to load. E.g.
* https://www.gstatic.com/draco/v1/decoders/draco_decoder_gltf.wasm
* Users will benefit from having the Draco decoder in cache as more sites start using the static URL
* Changed web examples to pull Draco decoders from static URL
* Added new API to Draco WASM decoder, which increased performance by ~15%
* Decreased Draco WASM decoder size by ~20%
* Added support for generic and multiple attributes to Draco Unity plug-ins
* Added new API to Draco Unity, which increased decoder performance by ~15%
* Changed quantization defaults:
* POSITION: 11
* NORMAL: 7
* TEX_COORD: 10
* COLOR: 8
* GENERIC: 8
* Code cleanup
* Bug fixes
### Version 1.3.5 release
* Added option to build Draco for Universal Scene Description
* Code cleanup
* Bug fixes
### Version 1.3.4 release
* Released Draco Animation code
* Fixes for Unity
* Various file location and name changes
### Version 1.3.3 release
* Added ExpertEncoder to the Javascript API
* Allows developers to set quantization options per attribute id
* Bug fixes
### Version 1.3.2 release
* Bug fixes
### Version 1.3.1 release
* Fix issue with multiple attributes when skipping an attribute transform
### Version 1.3.0 release
* Improved kD-tree based point cloud encoding
* Now applicable to point clouds with any number of attributes
* Support for all integer attribute types and quantized floating point types
* Improved mesh compression up to 10% (on average ~2%)
* For meshes, the 1.3.0 bitstream is fully compatible with 1.2.x decoders
* Improved Javascript API
* Added support for all signed and unsigned integer types
* Added support for point clouds to our Javascript encoder API
* Added support for integer properties to the PLY decoder
* Bug fixes
### Previous releases
https://github.com/google/draco/releases
Description
===========
Draco is a library for compressing and decompressing 3D geometric [meshes] and
[point clouds]. It is intended to improve the storage and transmission of 3D
graphics.
Draco was designed and built for compression efficiency and speed. The code
supports compressing points, connectivity information, texture coordinates,
color information, normals, and any other generic attributes associated with
geometry. With Draco, applications using 3D graphics can be significantly
smaller without compromising visual fidelity. For users, this means apps can
now be downloaded faster, 3D graphics in the browser can load quicker, and VR
and AR scenes can now be transmitted with a fraction of the bandwidth and
rendered quickly.
Draco is released as C++ source code that can be used to compress 3D graphics
as well as C++ and Javascript decoders for the encoded data.
_**Contents**_
* [Building](#building)
* [Usage](#usage)
* [Unity](#unity)
* [WASM and JavaScript Decoders](#WASM-and-JavaScript-Decoders)
* [Command Line Applications](#command-line-applications)
* [Encoding Tool](#encoding-tool)
* [Encoding Point Clouds](#encoding-point-clouds)
* [Decoding Tool](#decoding-tool)
* [glTF Transcoding Tool](#gltf-transcoding-tool)
* [C++ Decoder API](#c-decoder-api)
* [Javascript Encoder API](#javascript-encoder-api)
* [Javascript Decoder API](#javascript-decoder-api)
* [Javascript Decoder Performance](#javascript-decoder-performance)
* [Metadata API](#metadata-api)
* [NPM Package](#npm-package)
* [three.js Renderer Example](#threejs-renderer-example)
* [GStatic Javascript Builds](#gstatic-javascript-builds)
* [Support](#support)
* [License](#license)
* [References](#references)
Building
========
See [BUILDING](BUILDING.md) for building instructions.
Usage
======
Unity
-----
For the best information about using Unity with Draco please visit https://github.com/atteneder/DracoUnity
For a simple example of using Unity with Draco see [README](unity/README.md) in the unity folder.
WASM and JavaScript Decoders
----------------------------
It is recommended to always pull your Draco WASM and JavaScript decoders from:
~~~~~ bash
https://www.gstatic.com/draco/v1/decoders/
~~~~~
Users will benefit from having the Draco decoder in cache as more sites start using the static URL.
Command Line Applications
------------------------
The default target created from the build files will be the `draco_encoder`
and `draco_decoder` command line applications. Additionally, `draco_transcoder`
is generated when CMake is run with the DRACO_TRANSCODER_SUPPORTED variable set
to ON (see [BUILDING](BUILDING.md#transcoder) for more details). For all
applications, if you run them without any arguments or `-h`, the applications
will output usage and options.
Encoding Tool
-------------
`draco_encoder` will read OBJ, STL or PLY files as input, and output
Draco-encoded files. We have included Stanford's [Bunny] mesh for testing. The
basic command line looks like this:
~~~~~ bash
./draco_encoder -i testdata/bun_zipper.ply -o out.drc
~~~~~
A value of `0` for the quantization parameter will not perform any quantization
on the specified attribute. Any value other than `0` will quantize the input
values for the specified attribute to that number of bits. For example:
~~~~~ bash
./draco_encoder -i testdata/bun_zipper.ply -o out.drc -qp 14
~~~~~
will quantize the positions to 14 bits (default is 11 for the position
coordinates).
In general, the more you quantize your attributes the better compression rate
you will get. It is up to your project to decide how much deviation it will
tolerate. In general, most projects can set quantization values of about `11`
without any noticeable difference in quality.
The compression level (`-cl`) parameter turns on/off different compression
features.
~~~~~ bash
./draco_encoder -i testdata/bun_zipper.ply -o out.drc -cl 8
~~~~~
In general, the highest setting, `10`, will have the most compression but
worst decompression speed. `0` will have the least compression, but best
decompression speed. The default setting is `7`.
Encoding Point Clouds
---------------------
You can encode point cloud data with `draco_encoder` by specifying the
`-point_cloud` parameter. If you specify the `-point_cloud` parameter with a
mesh input file, `draco_encoder` will ignore the connectivity data and encode
the positions from the mesh file.
~~~~~ bash
./draco_encoder -point_cloud -i testdata/bun_zipper.ply -o out.drc
~~~~~
This command line will encode the mesh input as a point cloud, even though the
input might not produce compression that is representative of other point
clouds. Specifically, one can expect much better compression rates for larger
and denser point clouds.
Decoding Tool
-------------
`draco_decoder` will read Draco files as input, and output OBJ, STL or PLY
files. The basic command line looks like this:
~~~~~ bash
./draco_decoder -i in.drc -o out.obj
~~~~~
glTF Transcoding Tool
---------------------
`draco_transcoder` can be used to add Draco compression to glTF assets. The
basic command line looks like this:
~~~~~ bash
./draco_transcoder -i in.glb -o out.glb
~~~~~
This command line will add geometry compression to all meshes in the `in.glb`
file. Quantization values for different glTF attributes can be specified
similarly to the `draco_encoder` tool. For example `-qp` can be used to define
quantization of the position attribute:
~~~~~ bash
./draco_transcoder -i in.glb -o out.glb -qp 12
~~~~~
C++ Decoder API
---------------
If you'd like to add decoding to your applications you will need to include
the `draco_dec` library. In order to use the Draco decoder you need to
initialize a `DecoderBuffer` with the compressed data. Then call
`DecodeMeshFromBuffer()` to return a decoded mesh object or call
`DecodePointCloudFromBuffer()` to return a decoded `PointCloud` object. For
example:
~~~~~ cpp
draco::DecoderBuffer buffer;
buffer.Init(data.data(), data.size());
const draco::EncodedGeometryType geom_type =
draco::GetEncodedGeometryType(&buffer);
if (geom_type == draco::TRIANGULAR_MESH) {
unique_ptr<draco::Mesh> mesh = draco::DecodeMeshFromBuffer(&buffer);
} else if (geom_type == draco::POINT_CLOUD) {
unique_ptr<draco::PointCloud> pc = draco::DecodePointCloudFromBuffer(&buffer);
}
~~~~~
Please see [src/draco/mesh/mesh.h](src/draco/mesh/mesh.h) for the full `Mesh` class interface and
[src/draco/point_cloud/point_cloud.h](src/draco/point_cloud/point_cloud.h) for the full `PointCloud` class interface.
Javascript Encoder API
----------------------
The Javascript encoder is located in `javascript/draco_encoder.js`. The encoder
API can be used to compress mesh and point cloud. In order to use the encoder,
you need to first create an instance of `DracoEncoderModule`. Then use this
instance to create `MeshBuilder` and `Encoder` objects. `MeshBuilder` is used
to construct a mesh from geometry data that could be later compressed by
`Encoder`. First create a mesh object using `new encoderModule.Mesh()` . Then,
use `AddFacesToMesh()` to add indices to the mesh and use
`AddFloatAttributeToMesh()` to add attribute data to the mesh, e.g. position,
normal, color and texture coordinates. After a mesh is constructed, you could
then use `EncodeMeshToDracoBuffer()` to compress the mesh. For example:
~~~~~ js
const mesh = {
indices : new Uint32Array(indices),
vertices : new Float32Array(vertices),
normals : new Float32Array(normals)
};
const encoderModule = DracoEncoderModule();
const encoder = new encoderModule.Encoder();
const meshBuilder = new encoderModule.MeshBuilder();
const dracoMesh = new encoderModule.Mesh();
const numFaces = mesh.indices.length / 3;
const numPoints = mesh.vertices.length;
meshBuilder.AddFacesToMesh(dracoMesh, numFaces, mesh.indices);
meshBuilder.AddFloatAttributeToMesh(dracoMesh, encoderModule.POSITION,
numPoints, 3, mesh.vertices);
if (mesh.hasOwnProperty('normals')) {
meshBuilder.AddFloatAttributeToMesh(
dracoMesh, encoderModule.NORMAL, numPoints, 3, mesh.normals);
}
if (mesh.hasOwnProperty('colors')) {
meshBuilder.AddFloatAttributeToMesh(
dracoMesh, encoderModule.COLOR, numPoints, 3, mesh.colors);
}
if (mesh.hasOwnProperty('texcoords')) {
meshBuilder.AddFloatAttributeToMesh(
dracoMesh, encoderModule.TEX_COORD, numPoints, 3, mesh.texcoords);
}
if (method === "edgebreaker") {
encoder.SetEncodingMethod(encoderModule.MESH_EDGEBREAKER_ENCODING);
} else if (method === "sequential") {
encoder.SetEncodingMethod(encoderModule.MESH_SEQUENTIAL_ENCODING);
}
const encodedData = new encoderModule.DracoInt8Array();
// Use default encoding setting.
const encodedLen = encoder.EncodeMeshToDracoBuffer(dracoMesh,
encodedData);
encoderModule.destroy(dracoMesh);
encoderModule.destroy(encoder);
encoderModule.destroy(meshBuilder);
~~~~~
Please see [src/draco/javascript/emscripten/draco_web_encoder.idl](src/draco/javascript/emscripten/draco_web_encoder.idl) for the full API.
Javascript Decoder API
----------------------
The Javascript decoder is located in [javascript/draco_decoder.js](javascript/draco_decoder.js). The
Javascript decoder can decode mesh and point cloud. In order to use the
decoder, you must first create an instance of `DracoDecoderModule`. The
instance is then used to create `DecoderBuffer` and `Decoder` objects. Set
the encoded data in the `DecoderBuffer`. Then call `GetEncodedGeometryType()`
to identify the type of geometry, e.g. mesh or point cloud. Then call either
`DecodeBufferToMesh()` or `DecodeBufferToPointCloud()`, which will return
a Mesh object or a point cloud. For example:
~~~~~ js
// Create the Draco decoder.
const decoderModule = DracoDecoderModule();
const buffer = new decoderModule.DecoderBuffer();
buffer.Init(byteArray, byteArray.length);
// Create a buffer to hold the encoded data.
const decoder = new decoderModule.Decoder();
const geometryType = decoder.GetEncodedGeometryType(buffer);
// Decode the encoded geometry.
let outputGeometry;
let status;
if (geometryType == decoderModule.TRIANGULAR_MESH) {
outputGeometry = new decoderModule.Mesh();
status = decoder.DecodeBufferToMesh(buffer, outputGeometry);
} else {
outputGeometry = new decoderModule.PointCloud();
status = decoder.DecodeBufferToPointCloud(buffer, outputGeometry);
}
// You must explicitly delete objects created from the DracoDecoderModule
// or Decoder.
decoderModule.destroy(outputGeometry);
decoderModule.destroy(decoder);
decoderModule.destroy(buffer);
~~~~~
Please see [src/draco/javascript/emscripten/draco_web_decoder.idl](src/draco/javascript/emscripten/draco_web_decoder.idl) for the full API.
Javascript Decoder Performance
------------------------------
The Javascript decoder is built with dynamic memory. This will let the decoder
work with all of the compressed data. But this option is not the fastest.
Pre-allocating the memory sees about a 2x decoder speed improvement. If you
know all of your project's memory requirements, you can turn on static memory
by changing `CMakeLists.txt` accordingly.
Metadata API
------------
Starting from v1.0, Draco provides metadata functionality for encoding data
other than geometry. It could be used to encode any custom data along with the
geometry. For example, we can enable metadata functionality to encode the name
of attributes, name of sub-objects and customized information.
For one mesh and point cloud, it can have one top-level geometry metadata class.
The top-level metadata then can have hierarchical metadata. Other than that,
the top-level metadata can have metadata for each attribute which is called
attribute metadata. The attribute metadata should be initialized with the
correspondent attribute id within the mesh. The metadata API is provided both
in C++ and Javascript.
For example, to add metadata in C++:
~~~~~ cpp
draco::PointCloud pc;
// Add metadata for the geometry.
std::unique_ptr<draco::GeometryMetadata> metadata =
std::unique_ptr<draco::GeometryMetadata>(new draco::GeometryMetadata());
metadata->AddEntryString("description", "This is an example.");
pc.AddMetadata(std::move(metadata));
// Add metadata for attributes.
draco::GeometryAttribute pos_att;
pos_att.Init(draco::GeometryAttribute::POSITION, nullptr, 3,
draco::DT_FLOAT32, false, 12, 0);
const uint32_t pos_att_id = pc.AddAttribute(pos_att, false, 0);
std::unique_ptr<draco::AttributeMetadata> pos_metadata =
std::unique_ptr<draco::AttributeMetadata>(
new draco::AttributeMetadata(pos_att_id));
pos_metadata->AddEntryString("name", "position");
// Directly add attribute metadata to geometry.
// You can do this without explicitly add |GeometryMetadata| to mesh.
pc.AddAttributeMetadata(pos_att_id, std::move(pos_metadata));
~~~~~
To read metadata from a geometry in C++:
~~~~~ cpp
// Get metadata for the geometry.
const draco::GeometryMetadata *pc_metadata = pc.GetMetadata();
// Request metadata for a specific attribute.
const draco::AttributeMetadata *requested_pos_metadata =
pc.GetAttributeMetadataByStringEntry("name", "position");
~~~~~
Please see [src/draco/metadata](src/draco/metadata) and [src/draco/point_cloud](src/draco/point_cloud) for the full API.
NPM Package
-----------
Draco NPM NodeJS package is located in [javascript/npm/draco3d](javascript/npm/draco3d). Please see the
doc in the folder for detailed usage.
three.js Renderer Example
-------------------------
Here's an [example] of a geometric compressed with Draco loaded via a
Javascript decoder using the `three.js` renderer.
Please see the [javascript/example/README.md](javascript/example/README.md) file for more information.
GStatic Javascript Builds
=========================
Prebuilt versions of the Emscripten-built Draco javascript decoders are hosted
on www.gstatic.com in version labeled directories:
https://www.gstatic.com/draco/versioned/decoders/VERSION/*
As of the v1.4.3 release the files available are:
- [draco_decoder.js](https://www.gstatic.com/draco/versioned/decoders/1.4.3/draco_decoder.js)
- [draco_decoder.wasm](https://www.gstatic.com/draco/versioned/decoders/1.4.3/draco_decoder.wasm)
- [draco_decoder_gltf.js](https://www.gstatic.com/draco/versioned/decoders/1.4.3/draco_decoder_gltf.js)
- [draco_decoder_gltf.wasm](https://www.gstatic.com/draco/versioned/decoders/1.4.3/draco_decoder_gltf.wasm)
- [draco_wasm_wrapper.js](https://www.gstatic.com/draco/versioned/decoders/1.4.3/draco_wasm_wrapper.js)
- [draco_wasm_wrapper_gltf.js](https://www.gstatic.com/draco/versioned/decoders/1.4.3/draco_wasm_wrapper_gltf.js)
Beginning with the v1.5.1 release assertion enabled builds of the following
files are available:
- [draco_decoder.js](https://www.gstatic.com/draco/versioned/decoders/1.5.1/with_asserts/draco_decoder.js)
- [draco_decoder.wasm](https://www.gstatic.com/draco/versioned/decoders/1.5.1/with_asserts/draco_decoder.wasm)
- [draco_wasm_wrapper.js](https://www.gstatic.com/draco/versioned/decoders/1.5.1/with_asserts/draco_wasm_wrapper.js)
Support
=======
For questions/comments please email <draco-3d-discuss@googlegroups.com>
If you have found an error in this library, please file an issue at
<https://github.com/google/draco/issues>
Patches are encouraged, and may be submitted by forking this project and
submitting a pull request through GitHub. See [CONTRIBUTING] for more detail.
License
=======
Licensed under the Apache License, Version 2.0 (the "License"); you may not
use this file except in compliance with the License. You may obtain a copy of
the License at
<http://www.apache.org/licenses/LICENSE-2.0>
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
License for the specific language governing permissions and limitations under
the License.
References
==========
[example]:https://storage.googleapis.com/demos.webmproject.org/draco/draco_loader_throw.html
[meshes]: https://en.wikipedia.org/wiki/Polygon_mesh
[point clouds]: https://en.wikipedia.org/wiki/Point_cloud
[Bunny]: https://graphics.stanford.edu/data/3Dscanrep/
[CONTRIBUTING]: https://raw.githubusercontent.com/google/draco/main/CONTRIBUTING.md
Bunny model from Stanford's graphic department <https://graphics.stanford.edu/data/3Dscanrep/>

3
third-party/draco/cmake/draco-config.cmake.template generated vendored Normal file
View File

@ -0,0 +1,3 @@
@PACKAGE_INIT@
include("${CMAKE_CURRENT_LIST_DIR}/draco-targets.cmake")

6
third-party/draco/cmake/draco.pc.template generated vendored Normal file
View File

@ -0,0 +1,6 @@
Name: @PROJECT_NAME@
Description: Draco geometry de(com)pression library.
Version: @DRACO_VERSION@
Cflags: -I@includes_path@
Libs: -L@libs_path@ -ldraco
Libs.private: @CMAKE_THREAD_LIBS_INIT@

157
third-party/draco/cmake/draco_build_definitions.cmake generated vendored Normal file
View File

@ -0,0 +1,157 @@
# Copyright 2021 The Draco Authors
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
if(DRACO_CMAKE_DRACO_BUILD_DEFINITIONS_CMAKE_)
return()
endif() # DRACO_CMAKE_DRACO_BUILD_DEFINITIONS_CMAKE_
set(DRACO_CMAKE_DRACO_BUILD_DEFINITIONS_CMAKE_ 1)
# Utility for controlling the main draco library dependency. This changes in
# shared builds, and when an optional target requires a shared library build.
macro(set_draco_target)
if(WIN32)
set(draco_dependency draco)
set(draco_plugin_dependency ${draco_dependency})
else()
if(BUILD_SHARED_LIBS)
set(draco_dependency draco_shared)
else()
set(draco_dependency draco_static)
endif()
set(draco_plugin_dependency draco_static)
endif()
endmacro()
# Configures flags and sets build system globals.
macro(draco_set_build_definitions)
string(TOLOWER "${CMAKE_BUILD_TYPE}" build_type_lowercase)
if(build_type_lowercase MATCHES "rel" AND DRACO_FAST)
if(MSVC)
list(APPEND draco_msvc_cxx_flags "/Ox")
else()
list(APPEND draco_base_cxx_flags "-O3")
endif()
endif()
draco_load_version_info()
# Library version info. See the libtool docs for updating the values:
# https://www.gnu.org/software/libtool/manual/libtool.html#Updating-version-info
#
# c=<current>, r=<revision>, a=<age>
#
# libtool generates a .so file as .so.[c-a].a.r, while -version-info c:r:a is
# passed to libtool.
#
# We set DRACO_SOVERSION = [c-a].a.r
set(LT_CURRENT 9)
set(LT_REVISION 0)
set(LT_AGE 0)
math(EXPR DRACO_SOVERSION_MAJOR "${LT_CURRENT} - ${LT_AGE}")
set(DRACO_SOVERSION "${DRACO_SOVERSION_MAJOR}.${LT_AGE}.${LT_REVISION}")
unset(LT_CURRENT)
unset(LT_REVISION)
unset(LT_AGE)
list(APPEND draco_include_paths "${draco_root}" "${draco_root}/src"
"${draco_build}")
if(DRACO_TRANSCODER_SUPPORTED)
draco_setup_eigen()
draco_setup_filesystem()
draco_setup_tinygltf()
endif()
list(APPEND draco_defines "DRACO_CMAKE=1"
"DRACO_FLAGS_SRCDIR=\"${draco_root}\""
"DRACO_FLAGS_TMPDIR=\"/tmp\"")
if(MSVC OR WIN32)
list(APPEND draco_defines "_CRT_SECURE_NO_DEPRECATE=1" "NOMINMAX=1")
if(BUILD_SHARED_LIBS)
set(CMAKE_WINDOWS_EXPORT_ALL_SYMBOLS TRUE)
endif()
endif()
if(NOT MSVC)
if(${CMAKE_SIZEOF_VOID_P} EQUAL 8)
# Ensure 64-bit platforms can support large files.
list(APPEND draco_defines "_LARGEFILE_SOURCE" "_FILE_OFFSET_BITS=64")
endif()
if(NOT DRACO_DEBUG_COMPILER_WARNINGS)
if(CMAKE_CXX_COMPILER_ID STREQUAL "Clang")
list(APPEND draco_clang_cxx_flags
"-Wno-implicit-const-int-float-conversion")
else()
list(APPEND draco_base_cxx_flags "-Wno-deprecated-declarations")
endif()
endif()
endif()
if(ANDROID)
if(CMAKE_ANDROID_ARCH_ABI STREQUAL "armeabi-v7a")
set(CMAKE_ANDROID_ARM_MODE ON)
endif()
endif()
set_draco_target()
if(CMAKE_CXX_COMPILER_ID STREQUAL "Clang")
if(CMAKE_CXX_COMPILER_VERSION VERSION_LESS "6")
# Quiet warnings in copy-list-initialization where {} elision has always
# been allowed.
list(APPEND draco_clang_cxx_flags "-Wno-missing-braces")
endif()
endif()
if(CMAKE_CXX_COMPILER_ID STREQUAL "GNU")
if(CMAKE_CXX_COMPILER_VERSION VERSION_GREATER_EQUAL "7")
if(CMAKE_SYSTEM_PROCESSOR STREQUAL "armv7")
# Quiet gcc 6 vs 7 abi warnings:
# https://gcc.gnu.org/bugzilla/show_bug.cgi?id=77728
list(APPEND draco_base_cxx_flags "-Wno-psabi")
list(APPEND ABSL_GCC_FLAGS "-Wno-psabi")
endif()
endif()
endif()
# Source file names ending in these suffixes will have the appropriate
# compiler flags added to their compile commands to enable intrinsics.
set(draco_neon_source_file_suffix "neon.cc")
set(draco_sse4_source_file_suffix "sse4.cc")
if((${CMAKE_CXX_COMPILER_ID} STREQUAL "GNU" AND ${CMAKE_CXX_COMPILER_VERSION}
VERSION_LESS 5)
OR (${CMAKE_CXX_COMPILER_ID} STREQUAL "Clang"
AND ${CMAKE_CXX_COMPILER_VERSION} VERSION_LESS 4))
message(
WARNING "GNU/GCC < v5 or Clang/LLVM < v4, ENABLING COMPATIBILITY MODE.")
draco_enable_feature(FEATURE "DRACO_OLD_GCC")
endif()
if(EMSCRIPTEN)
draco_check_emscripten_environment()
draco_get_required_emscripten_flags(
FLAG_LIST_VAR_COMPILER draco_base_cxx_flags
FLAG_LIST_VAR_LINKER draco_base_exe_linker_flags)
endif()
draco_configure_sanitizer()
endmacro()

42
third-party/draco/cmake/draco_cpu_detection.cmake generated vendored Normal file
View File

@ -0,0 +1,42 @@
# Copyright 2021 The Draco Authors
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
if(DRACO_CMAKE_DRACO_CPU_DETECTION_CMAKE_)
return()
endif() # DRACO_CMAKE_DRACO_CPU_DETECTION_CMAKE_
set(DRACO_CMAKE_DRACO_CPU_DETECTION_CMAKE_ 1)
# Detect optimizations available for the current target CPU.
macro(draco_optimization_detect)
if(DRACO_ENABLE_OPTIMIZATIONS)
string(TOLOWER "${CMAKE_SYSTEM_PROCESSOR}" cpu_lowercase)
if(cpu_lowercase MATCHES "^arm|^aarch64")
set(draco_have_neon ON)
elseif(cpu_lowercase MATCHES "^x86|amd64")
set(draco_have_sse4 ON)
endif()
endif()
if(draco_have_neon AND DRACO_ENABLE_NEON)
list(APPEND draco_defines "DRACO_ENABLE_NEON=1")
else()
list(APPEND draco_defines "DRACO_ENABLE_NEON=0")
endif()
if(draco_have_sse4 AND DRACO_ENABLE_SSE4_1)
list(APPEND draco_defines "DRACO_ENABLE_SSE4_1=1")
else()
list(APPEND draco_defines "DRACO_ENABLE_SSE4_1=0")
endif()
endmacro()

136
third-party/draco/cmake/draco_dependencies.cmake generated vendored Normal file
View File

@ -0,0 +1,136 @@
# Copyright 2022 The Draco Authors
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
if(DRACO_CMAKE_DRACO_DEPENDENCIES_CMAKE)
return()
endif()
set(DRACO_CMAKE_DRACO_DEPENDENCIES_CMAKE 1)
include("${draco_root}/cmake/draco_variables.cmake")
# Each variable holds a user specified custom path to a local copy of the
# sources that belong to each project that Draco depends on. When paths are
# empty the build will be generated pointing to the Draco git submodules.
# Otherwise the paths specified by the user will be used in the build
# configuration.
# Path to the Eigen. The path must contain the Eigen directory.
set(DRACO_EIGEN_PATH)
draco_track_configuration_variable(DRACO_EIGEN_PATH)
# Path to the gulrak/filesystem installation. The path specified must contain
# the ghc subdirectory that houses the filesystem includes.
set(DRACO_FILESYSTEM_PATH)
draco_track_configuration_variable(DRACO_FILESYSTEM_PATH)
# Path to the googletest installation. The path must be to the root of the
# Googletest project directory.
set(DRACO_GOOGLETEST_PATH)
draco_track_configuration_variable(DRACO_GOOGLETEST_PATH)
# Path to the syoyo/tinygltf installation. The path must be to the root of the
# project directory.
set(DRACO_TINYGLTF_PATH)
draco_track_configuration_variable(DRACO_TINYGLTF_PATH)
# Utility macro for killing the build due to a missing submodule directory.
macro(draco_die_missing_submodule dir)
message(FATAL_ERROR "${dir} missing, run git submodule update --init")
endmacro()
# Determines the Eigen location and updates the build configuration accordingly.
macro(draco_setup_eigen)
if(DRACO_EIGEN_PATH)
set(eigen_path "${DRACO_EIGEN_PATH}")
if(NOT IS_DIRECTORY "${eigen_path}")
message(FATAL_ERROR "DRACO_EIGEN_PATH does not exist.")
endif()
else()
set(eigen_path "${draco_root}/third_party/eigen")
if(NOT IS_DIRECTORY "${eigen_path}")
draco_die_missing_submodule("${eigen_path}")
endif()
endif()
set(eigen_include_path "${eigen_path}/Eigen")
if(NOT EXISTS "${eigen_path}/Eigen")
message(FATAL_ERROR "The eigen path does not contain an Eigen directory.")
endif()
list(APPEND draco_include_paths "${eigen_path}")
endmacro()
# Determines the gulrak/filesystem location and updates the build configuration
# accordingly.
macro(draco_setup_filesystem)
if(DRACO_FILESYSTEM_PATH)
set(fs_path "${DRACO_FILESYSTEM_PATH}")
if(NOT IS_DIRECTORY "${fs_path}")
message(FATAL_ERROR "DRACO_FILESYSTEM_PATH does not exist.")
endif()
else()
set(fs_path "${draco_root}/third_party/filesystem/include")
if(NOT IS_DIRECTORY "${fs_path}")
draco_die_missing_submodule("${fs_path}")
endif()
endif()
list(APPEND draco_include_paths "${fs_path}")
endmacro()
# Determines the Googletest location and sets up include and source list vars
# for the draco_tests build.
macro(draco_setup_googletest)
if(DRACO_GOOGLETEST_PATH)
set(gtest_path "${DRACO_GOOGLETEST_PATH}")
if(NOT IS_DIRECTORY "${gtest_path}")
message(FATAL_ERROR "DRACO_GOOGLETEST_PATH does not exist.")
endif()
else()
set(gtest_path "${draco_root}/third_party/googletest")
endif()
list(APPEND draco_test_include_paths ${draco_include_paths}
"${gtest_path}/include" "${gtest_path}/googlemock"
"${gtest_path}/googletest/include" "${gtest_path}/googletest")
list(APPEND draco_gtest_all "${gtest_path}/googletest/src/gtest-all.cc")
list(APPEND draco_gtest_main "${gtest_path}/googletest/src/gtest_main.cc")
endmacro()
# Determines the location of TinyGLTF and updates the build configuration
# accordingly.
macro(draco_setup_tinygltf)
if(DRACO_TINYGLTF_PATH)
set(tinygltf_path "${DRACO_TINYGLTF_PATH}")
if(NOT IS_DIRECTORY "${tinygltf_path}")
message(FATAL_ERROR "DRACO_TINYGLTF_PATH does not exist.")
endif()
else()
set(tinygltf_path "${draco_root}/third_party/tinygltf")
if(NOT IS_DIRECTORY "${tinygltf_path}")
draco_die_missing_submodule("${tinygltf_path}")
endif()
endif()
list(APPEND draco_include_paths "${tinygltf_path}")
endmacro()

232
third-party/draco/cmake/draco_emscripten.cmake generated vendored Normal file
View File

@ -0,0 +1,232 @@
# Copyright 2021 The Draco Authors
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
if(DRACO_CMAKE_DRACO_EMSCRIPTEN_CMAKE_)
return()
endif() # DRACO_CMAKE_DRACO_EMSCRIPTEN_CMAKE_
# Checks environment for Emscripten prerequisites.
macro(draco_check_emscripten_environment)
if(NOT PYTHONINTERP_FOUND)
message(
FATAL_ERROR
"Python required for Emscripten builds, but cmake cannot find it.")
endif()
if(NOT EXISTS "$ENV{EMSCRIPTEN}")
message(
FATAL_ERROR
"The EMSCRIPTEN environment variable must be set. See README.md.")
endif()
endmacro()
# Obtains the required Emscripten flags for Draco targets.
macro(draco_get_required_emscripten_flags)
set(em_FLAG_LIST_VAR_COMPILER)
set(em_FLAG_LIST_VAR_LINKER)
set(em_flags)
set(em_single_arg_opts FLAG_LIST_VAR_COMPILER FLAG_LIST_VAR_LINKER)
set(em_multi_arg_opts)
cmake_parse_arguments(em "${em_flags}" "${em_single_arg_opts}"
"${em_multi_arg_opts}" ${ARGN})
if(NOT em_FLAG_LIST_VAR_COMPILER)
message(
FATAL
"draco_get_required_emscripten_flags: FLAG_LIST_VAR_COMPILER required")
endif()
if(NOT em_FLAG_LIST_VAR_LINKER)
message(
FATAL
"draco_get_required_emscripten_flags: FLAG_LIST_VAR_LINKER required")
endif()
if(DRACO_JS_GLUE)
unset(required_flags)
# TODO(tomfinegan): Revisit splitting of compile/link flags for Emscripten,
# and drop -Wno-unused-command-line-argument. Emscripten complains about
# what are supposedly link-only flags sent with compile commands, but then
# proceeds to produce broken code if the warnings are heeded.
list(APPEND ${em_FLAG_LIST_VAR_COMPILER}
"-Wno-unused-command-line-argument")
list(APPEND ${em_FLAG_LIST_VAR_COMPILER} "-Wno-almost-asm")
list(APPEND ${em_FLAG_LIST_VAR_COMPILER} "--memory-init-file" "0")
list(APPEND ${em_FLAG_LIST_VAR_COMPILER} "-fno-omit-frame-pointer")
# According to Emscripten the following flags are linker only, but sending
# these flags (en masse) to only the linker results in a broken Emscripten
# build with an empty DracoDecoderModule.
list(APPEND ${em_FLAG_LIST_VAR_COMPILER} "-sALLOW_MEMORY_GROWTH=1")
list(APPEND ${em_FLAG_LIST_VAR_COMPILER} "-sMODULARIZE=1")
list(APPEND ${em_FLAG_LIST_VAR_COMPILER} "-sFILESYSTEM=0")
list(APPEND ${em_FLAG_LIST_VAR_COMPILER}
"-sEXPORTED_FUNCTIONS=[\"_free\",\"_malloc\"]")
list(APPEND ${em_FLAG_LIST_VAR_COMPILER} "-sPRECISE_F32=1")
list(APPEND ${em_FLAG_LIST_VAR_COMPILER} "-sNODEJS_CATCH_EXIT=0")
list(APPEND ${em_FLAG_LIST_VAR_COMPILER} "-sNODEJS_CATCH_REJECTION=0")
if(DRACO_FAST)
list(APPEND ${em_FLAG_LIST_VAR_COMPILER} "--llvm-lto" "1")
endif()
# The WASM flag is reported as linker only.
if(DRACO_WASM)
list(APPEND ${em_FLAG_LIST_VAR_COMPILER} "-sWASM=1")
else()
list(APPEND ${em_FLAG_LIST_VAR_COMPILER} "-sWASM=0")
endif()
# The LEGACY_VM_SUPPORT flag is reported as linker only.
if(DRACO_IE_COMPATIBLE)
list(APPEND ${em_FLAG_LIST_VAR_COMPILER} "-sLEGACY_VM_SUPPORT=1")
endif()
endif()
endmacro()
# Macro for generating C++ glue code from IDL for Emscripten targets. Executes
# python to generate the C++ binding, and establishes dendency: $OUTPUT_PATH.cpp
# on $INPUT_IDL.
macro(draco_generate_emscripten_glue)
set(glue_flags)
set(glue_single_arg_opts INPUT_IDL OUTPUT_PATH)
set(glue_multi_arg_opts)
cmake_parse_arguments(glue "${glue_flags}" "${glue_single_arg_opts}"
"${glue_multi_arg_opts}" ${ARGN})
if(DRACO_VERBOSE GREATER 1)
message(
"--------- draco_generate_emscripten_glue -----------\n"
"glue_INPUT_IDL=${glue_INPUT_IDL}\n"
"glue_OUTPUT_PATH=${glue_OUTPUT_PATH}\n"
"----------------------------------------------------\n")
endif()
if(NOT glue_INPUT_IDL OR NOT glue_OUTPUT_PATH)
message(
FATAL_ERROR
"draco_generate_emscripten_glue: INPUT_IDL and OUTPUT_PATH required.")
endif()
# Generate the glue source.
execute_process(
COMMAND ${PYTHON_EXECUTABLE} $ENV{EMSCRIPTEN}/tools/webidl_binder.py
${glue_INPUT_IDL} ${glue_OUTPUT_PATH})
if(NOT EXISTS "${glue_OUTPUT_PATH}.cpp")
message(FATAL_ERROR "JS glue generation failed for ${glue_INPUT_IDL}.")
endif()
# Create a dependency so that it regenerated on edits.
add_custom_command(
OUTPUT "${glue_OUTPUT_PATH}.cpp"
COMMAND ${PYTHON_EXECUTABLE} $ENV{EMSCRIPTEN}/tools/webidl_binder.py
${glue_INPUT_IDL} ${glue_OUTPUT_PATH}
DEPENDS ${draco_js_dec_idl}
COMMENT "Generating ${glue_OUTPUT_PATH}.cpp."
WORKING_DIRECTORY ${draco_build}
VERBATIM)
endmacro()
# Wrapper for draco_add_executable() that handles the extra work necessary for
# emscripten targets when generating JS glue:
#
# ~~~
# - Set source level dependency on the C++ binding.
# - Pre/Post link emscripten magic.
#
# Required args:
# - GLUE_PATH: Base path for glue file. Used to generate .cpp and .js files.
# - PRE_LINK_JS_SOURCES: em_link_pre_js() source files.
# - POST_LINK_JS_SOURCES: em_link_post_js() source files.
# Optional args:
# - FEATURES:
# ~~~
macro(draco_add_emscripten_executable)
unset(emexe_NAME)
unset(emexe_FEATURES)
unset(emexe_SOURCES)
unset(emexe_DEFINES)
unset(emexe_INCLUDES)
unset(emexe_LINK_FLAGS)
set(optional_args)
set(single_value_args NAME GLUE_PATH)
set(multi_value_args
SOURCES
DEFINES
FEATURES
INCLUDES
LINK_FLAGS
PRE_LINK_JS_SOURCES
POST_LINK_JS_SOURCES)
cmake_parse_arguments(emexe "${optional_args}" "${single_value_args}"
"${multi_value_args}" ${ARGN})
if(NOT
(emexe_GLUE_PATH
AND emexe_POST_LINK_JS_SOURCES
AND emexe_PRE_LINK_JS_SOURCES))
message(FATAL
"draco_add_emscripten_executable: GLUE_PATH PRE_LINK_JS_SOURCES "
"POST_LINK_JS_SOURCES args required.")
endif()
if(DRACO_VERBOSE GREATER 1)
message(
"--------- draco_add_emscripten_executable ---------\n"
"emexe_NAME=${emexe_NAME}\n"
"emexe_SOURCES=${emexe_SOURCES}\n"
"emexe_DEFINES=${emexe_DEFINES}\n"
"emexe_INCLUDES=${emexe_INCLUDES}\n"
"emexe_LINK_FLAGS=${emexe_LINK_FLAGS}\n"
"emexe_GLUE_PATH=${emexe_GLUE_PATH}\n"
"emexe_FEATURES=${emexe_FEATURES}\n"
"emexe_PRE_LINK_JS_SOURCES=${emexe_PRE_LINK_JS_SOURCES}\n"
"emexe_POST_LINK_JS_SOURCES=${emexe_POST_LINK_JS_SOURCES}\n"
"----------------------------------------------------\n")
endif()
# The Emscripten linker needs the C++ flags in addition to whatever has been
# passed in with the target.
list(APPEND emexe_LINK_FLAGS ${DRACO_CXX_FLAGS})
if(DRACO_GLTF_BITSTREAM)
# Add "_gltf" suffix to target output name.
draco_add_executable(
NAME ${emexe_NAME}
OUTPUT_NAME ${emexe_NAME}_gltf
SOURCES ${emexe_SOURCES}
DEFINES ${emexe_DEFINES}
INCLUDES ${emexe_INCLUDES}
LINK_FLAGS ${emexe_LINK_FLAGS})
else()
draco_add_executable(
NAME ${emexe_NAME}
SOURCES ${emexe_SOURCES}
DEFINES ${emexe_DEFINES}
INCLUDES ${emexe_INCLUDES}
LINK_FLAGS ${emexe_LINK_FLAGS})
endif()
foreach(feature ${emexe_FEATURES})
draco_enable_feature(FEATURE ${feature} TARGETS ${emexe_NAME})
endforeach()
set_property(
SOURCE ${emexe_SOURCES}
APPEND
PROPERTY OBJECT_DEPENDS "${emexe_GLUE_PATH}.cpp")
em_link_pre_js(${emexe_NAME} ${emexe_PRE_LINK_JS_SOURCES})
em_link_post_js(${emexe_NAME} "${emexe_GLUE_PATH}.js"
${emexe_POST_LINK_JS_SOURCES})
endmacro()

292
third-party/draco/cmake/draco_flags.cmake generated vendored Normal file
View File

@ -0,0 +1,292 @@
# Copyright 2021 The Draco Authors
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
if(DRACO_CMAKE_DRACO_FLAGS_CMAKE_)
return()
endif() # DRACO_CMAKE_DRACO_FLAGS_CMAKE_
set(DRACO_CMAKE_DRACO_FLAGS_CMAKE_ 1)
include(CheckCXXCompilerFlag)
include(CheckCXXSourceCompiles)
# Adds compiler flags specified by FLAGS to the sources specified by SOURCES:
#
# draco_set_compiler_flags_for_sources(SOURCES <sources> FLAGS <flags>)
macro(draco_set_compiler_flags_for_sources)
unset(compiler_SOURCES)
unset(compiler_FLAGS)
unset(optional_args)
unset(single_value_args)
set(multi_value_args SOURCES FLAGS)
cmake_parse_arguments(compiler "${optional_args}" "${single_value_args}"
"${multi_value_args}" ${ARGN})
if(NOT (compiler_SOURCES AND compiler_FLAGS))
draco_die("draco_set_compiler_flags_for_sources: SOURCES and "
"FLAGS required.")
endif()
set_source_files_properties(${compiler_SOURCES} PROPERTIES COMPILE_FLAGS
${compiler_FLAGS})
if(DRACO_VERBOSE GREATER 1)
foreach(source ${compiler_SOURCES})
foreach(flag ${compiler_FLAGS})
message("draco_set_compiler_flags_for_sources: source:${source} "
"flag:${flag}")
endforeach()
endforeach()
endif()
endmacro()
# Tests compiler flags stored in list(s) specified by FLAG_LIST_VAR_NAMES, adds
# flags to $DRACO_CXX_FLAGS when tests pass. Terminates configuration if
# FLAG_REQUIRED is specified and any flag check fails.
#
# ~~~
# draco_test_cxx_flag(<FLAG_LIST_VAR_NAMES <flag list variable(s)>>
# [FLAG_REQUIRED])
# ~~~
macro(draco_test_cxx_flag)
unset(cxx_test_FLAG_LIST_VAR_NAMES)
unset(cxx_test_FLAG_REQUIRED)
unset(single_value_args)
set(optional_args FLAG_REQUIRED)
set(multi_value_args FLAG_LIST_VAR_NAMES)
cmake_parse_arguments(cxx_test "${optional_args}" "${single_value_args}"
"${multi_value_args}" ${ARGN})
if(NOT cxx_test_FLAG_LIST_VAR_NAMES)
draco_die("draco_test_cxx_flag: FLAG_LIST_VAR_NAMES required")
endif()
unset(cxx_flags)
foreach(list_var ${cxx_test_FLAG_LIST_VAR_NAMES})
if(DRACO_VERBOSE)
message("draco_test_cxx_flag: adding ${list_var} to cxx_flags")
endif()
list(APPEND cxx_flags ${${list_var}})
endforeach()
if(DRACO_VERBOSE)
message("CXX test: all flags: ${cxx_flags}")
endif()
unset(all_cxx_flags)
list(APPEND all_cxx_flags ${DRACO_CXX_FLAGS} ${cxx_flags})
# Turn off output from check_cxx_source_compiles. Print status directly
# instead since the logging messages from check_cxx_source_compiles can be
# quite confusing.
set(CMAKE_REQUIRED_QUIET TRUE)
# Run the actual compile test.
unset(draco_all_cxx_flags_pass CACHE)
message("--- Running combined CXX flags test, flags: ${all_cxx_flags}")
# check_cxx_compiler_flag() requires that the flags are a string. When flags
# are passed as a list it will remove the list separators, and attempt to run
# a compile command using list entries concatenated together as a single
# argument. Avoid the problem by forcing the argument to be a string.
draco_set_and_stringify(SOURCE_VARS all_cxx_flags DEST all_cxx_flags_string)
check_cxx_compiler_flag("${all_cxx_flags_string}" draco_all_cxx_flags_pass)
if(cxx_test_FLAG_REQUIRED AND NOT draco_all_cxx_flags_pass)
draco_die("Flag test failed for required flag(s): "
"${all_cxx_flags} and FLAG_REQUIRED specified.")
endif()
if(draco_all_cxx_flags_pass)
# Test passed: update the global flag list used by the draco target creation
# wrappers.
set(DRACO_CXX_FLAGS ${cxx_flags})
list(REMOVE_DUPLICATES DRACO_CXX_FLAGS)
if(DRACO_VERBOSE)
message("DRACO_CXX_FLAGS=${DRACO_CXX_FLAGS}")
endif()
message("--- Passed combined CXX flags test")
else()
message("--- Failed combined CXX flags test, testing flags individually.")
if(cxx_flags)
message("--- Testing flags from $cxx_flags: " "${cxx_flags}")
foreach(cxx_flag ${cxx_flags})
# Since 3.17.0 check_cxx_compiler_flag() sets a normal variable at
# parent scope while check_cxx_source_compiles() continues to set an
# internal cache variable, so we unset both to avoid the failure /
# success state persisting between checks. This has been fixed in newer
# CMake releases, but 3.17 is pretty common: we will need this to avoid
# weird build breakages while the fix propagates.
unset(cxx_flag_test_passed)
unset(cxx_flag_test_passed CACHE)
message("--- Testing flag: ${cxx_flag}")
check_cxx_compiler_flag("${cxx_flag}" cxx_flag_test_passed)
if(cxx_flag_test_passed)
message("--- Passed test for ${cxx_flag}")
else()
list(REMOVE_ITEM cxx_flags ${cxx_flag})
message("--- Failed test for ${cxx_flag}, flag removed.")
endif()
endforeach()
set(DRACO_CXX_FLAGS ${cxx_flags})
endif()
endif()
if(DRACO_CXX_FLAGS)
list(REMOVE_DUPLICATES DRACO_CXX_FLAGS)
endif()
endmacro()
# Tests executable linker flags stored in list specified by FLAG_LIST_VAR_NAME,
# adds flags to $DRACO_EXE_LINKER_FLAGS when test passes. Terminates
# configuration when flag check fails. draco_set_cxx_flags() must be called
# before calling this macro because it assumes $DRACO_CXX_FLAGS contains only
# valid CXX flags.
#
# draco_test_exe_linker_flag(<FLAG_LIST_VAR_NAME <flag list variable)>)
macro(draco_test_exe_linker_flag)
unset(link_FLAG_LIST_VAR_NAME)
unset(optional_args)
unset(multi_value_args)
set(single_value_args FLAG_LIST_VAR_NAME)
cmake_parse_arguments(link "${optional_args}" "${single_value_args}"
"${multi_value_args}" ${ARGN})
if(NOT link_FLAG_LIST_VAR_NAME)
draco_die("draco_test_link_flag: FLAG_LIST_VAR_NAME required")
endif()
draco_set_and_stringify(DEST linker_flags SOURCE_VARS
${link_FLAG_LIST_VAR_NAME})
if(DRACO_VERBOSE)
message("EXE LINKER test: all flags: ${linker_flags}")
endif()
# Tests of $DRACO_CXX_FLAGS have already passed. Include them with the linker
# test.
draco_set_and_stringify(DEST CMAKE_REQUIRED_FLAGS SOURCE_VARS DRACO_CXX_FLAGS)
# Cache the global exe linker flags.
if(CMAKE_EXE_LINKER_FLAGS)
set(cached_CMAKE_EXE_LINKER_FLAGS ${CMAKE_EXE_LINKER_FLAGS})
draco_set_and_stringify(DEST CMAKE_EXE_LINKER_FLAGS SOURCE ${linker_flags})
endif()
draco_set_and_stringify(DEST CMAKE_EXE_LINKER_FLAGS SOURCE ${linker_flags}
${CMAKE_EXE_LINKER_FLAGS})
# Turn off output from check_cxx_source_compiles. Print status directly
# instead since the logging messages from check_cxx_source_compiles can be
# quite confusing.
set(CMAKE_REQUIRED_QUIET TRUE)
message("--- Running EXE LINKER test for flags: ${linker_flags}")
unset(linker_flag_test_passed CACHE)
set(draco_cxx_main "\nint main() { return 0; }")
check_cxx_source_compiles("${draco_cxx_main}" linker_flag_test_passed)
if(NOT linker_flag_test_passed)
draco_die("EXE LINKER test failed.")
endif()
message("--- Passed EXE LINKER flag test.")
# Restore cached global exe linker flags.
if(cached_CMAKE_EXE_LINKER_FLAGS)
set(CMAKE_EXE_LINKER_FLAGS ${cached_CMAKE_EXE_LINKER_FLAGS})
else()
unset(CMAKE_EXE_LINKER_FLAGS)
endif()
list(APPEND DRACO_EXE_LINKER_FLAGS ${${link_FLAG_LIST_VAR_NAME}})
list(REMOVE_DUPLICATES DRACO_EXE_LINKER_FLAGS)
endmacro()
# Runs the draco compiler tests. This macro builds up the list of list var(s)
# that is passed to draco_test_cxx_flag().
#
# Note: draco_set_build_definitions() must be called before this macro.
macro(draco_set_cxx_flags)
unset(cxx_flag_lists)
if(CMAKE_CXX_COMPILER_ID MATCHES "Clang|GNU")
list(APPEND cxx_flag_lists draco_base_cxx_flags)
endif()
# Append clang flags after the base set to allow -Wno* overrides to take
# effect. Some of the base flags may enable a large set of warnings, e.g.,
# -Wall.
if(CMAKE_CXX_COMPILER_ID MATCHES "Clang")
list(APPEND cxx_flag_lists draco_clang_cxx_flags)
endif()
if(MSVC)
list(APPEND cxx_flag_lists draco_msvc_cxx_flags)
endif()
draco_set_and_stringify(DEST cxx_flags SOURCE_VARS ${cxx_flag_lists})
if(DRACO_VERBOSE)
message("draco_set_cxx_flags: internal CXX flags: ${cxx_flags}")
endif()
if(DRACO_CXX_FLAGS)
list(APPEND cxx_flag_lists DRACO_CXX_FLAGS)
if(DRACO_VERBOSE)
message("draco_set_cxx_flags: user CXX flags: ${DRACO_CXX_FLAGS}")
endif()
endif()
draco_set_and_stringify(DEST cxx_flags SOURCE_VARS ${cxx_flag_lists})
if(cxx_flags)
draco_test_cxx_flag(FLAG_LIST_VAR_NAMES ${cxx_flag_lists})
endif()
endmacro()
# Collects Draco built-in and user-specified linker flags and tests them. Halts
# configuration and reports the error when any flags cause the build to fail.
#
# Note: draco_test_exe_linker_flag() does the real work of setting the flags and
# running the test compile commands.
macro(draco_set_exe_linker_flags)
unset(linker_flag_lists)
if(DRACO_VERBOSE)
message("draco_set_exe_linker_flags: "
"draco_base_exe_linker_flags=${draco_base_exe_linker_flags}")
endif()
if(draco_base_exe_linker_flags)
list(APPEND linker_flag_lists draco_base_exe_linker_flags)
endif()
if(linker_flag_lists)
unset(test_linker_flags)
if(DRACO_VERBOSE)
message("draco_set_exe_linker_flags: "
"linker_flag_lists=${linker_flag_lists}")
endif()
draco_set_and_stringify(DEST test_linker_flags SOURCE_VARS
${linker_flag_lists})
draco_test_exe_linker_flag(FLAG_LIST_VAR_NAME test_linker_flags)
endif()
endmacro()

124
third-party/draco/cmake/draco_helpers.cmake generated vendored Normal file
View File

@ -0,0 +1,124 @@
# Copyright 2021 The Draco Authors
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
if(DRACO_CMAKE_DRACO_HELPERS_CMAKE_)
return()
endif() # DRACO_CMAKE_DRACO_HELPERS_CMAKE_
set(DRACO_CMAKE_DRACO_HELPERS_CMAKE_ 1)
# Kills build generation using message(FATAL_ERROR) and outputs all data passed
# to the console via use of $ARGN.
macro(draco_die)
message(FATAL_ERROR ${ARGN})
endmacro()
# Converts semi-colon delimited list variable(s) to string. Output is written to
# variable supplied via the DEST parameter. Input is from an expanded variable
# referenced by SOURCE and/or variable(s) referenced by SOURCE_VARS.
macro(draco_set_and_stringify)
set(optional_args)
set(single_value_args DEST SOURCE_VAR)
set(multi_value_args SOURCE SOURCE_VARS)
cmake_parse_arguments(sas "${optional_args}" "${single_value_args}"
"${multi_value_args}" ${ARGN})
if(NOT sas_DEST OR NOT (sas_SOURCE OR sas_SOURCE_VARS))
draco_die("draco_set_and_stringify: DEST and at least one of SOURCE "
"SOURCE_VARS required.")
endif()
unset(${sas_DEST})
if(sas_SOURCE)
# $sas_SOURCE is one or more expanded variables, just copy the values to
# $sas_DEST.
set(${sas_DEST} "${sas_SOURCE}")
endif()
if(sas_SOURCE_VARS)
# $sas_SOURCE_VARS is one or more variable names. Each iteration expands a
# variable and appends it to $sas_DEST.
foreach(source_var ${sas_SOURCE_VARS})
set(${sas_DEST} "${${sas_DEST}} ${${source_var}}")
endforeach()
# Because $sas_DEST can be empty when entering this scope leading whitespace
# can be introduced to $sas_DEST on the first iteration of the above loop.
# Remove it:
string(STRIP "${${sas_DEST}}" ${sas_DEST})
endif()
# Lists in CMake are simply semicolon delimited strings, so stringification is
# just a find and replace of the semicolon.
string(REPLACE ";" " " ${sas_DEST} "${${sas_DEST}}")
if(DRACO_VERBOSE GREATER 1)
message("draco_set_and_stringify: ${sas_DEST}=${${sas_DEST}}")
endif()
endmacro()
# Creates a dummy source file in $DRACO_GENERATED_SOURCES_DIRECTORY and adds it
# to the specified target. Optionally adds its path to a list variable.
#
# draco_create_dummy_source_file(<TARGET <target> BASENAME <basename of file>>
# [LISTVAR <list variable>])
macro(draco_create_dummy_source_file)
set(optional_args)
set(single_value_args TARGET BASENAME LISTVAR)
set(multi_value_args)
cmake_parse_arguments(cdsf "${optional_args}" "${single_value_args}"
"${multi_value_args}" ${ARGN})
if(NOT cdsf_TARGET OR NOT cdsf_BASENAME)
draco_die("draco_create_dummy_source_file: TARGET and BASENAME required.")
endif()
if(NOT DRACO_GENERATED_SOURCES_DIRECTORY)
set(DRACO_GENERATED_SOURCES_DIRECTORY "${draco_build}/gen_src")
endif()
set(dummy_source_dir "${DRACO_GENERATED_SOURCES_DIRECTORY}")
set(dummy_source_file
"${dummy_source_dir}/draco_${cdsf_TARGET}_${cdsf_BASENAME}.cc")
set(dummy_source_code
"// Generated file. DO NOT EDIT!\n"
"// C++ source file created for target ${cdsf_TARGET}.\n"
"void draco_${cdsf_TARGET}_${cdsf_BASENAME}_dummy_function(void)\;\n"
"void draco_${cdsf_TARGET}_${cdsf_BASENAME}_dummy_function(void) {}\n")
file(WRITE "${dummy_source_file}" ${dummy_source_code})
target_sources(${cdsf_TARGET} PRIVATE ${dummy_source_file})
if(cdsf_LISTVAR)
list(APPEND ${cdsf_LISTVAR} "${dummy_source_file}")
endif()
endmacro()
# Loads the version string from $draco_source/draco/version.h and sets
# $DRACO_VERSION.
macro(draco_load_version_info)
file(STRINGS "${draco_src_root}/core/draco_version.h" version_file_strings)
foreach(str ${version_file_strings})
if(str MATCHES "char kDracoVersion")
string(FIND "${str}" "\"" open_quote_pos)
string(FIND "${str}" ";" semicolon_pos)
math(EXPR open_quote_pos "${open_quote_pos} + 1")
math(EXPR close_quote_pos "${semicolon_pos} - 1")
math(EXPR version_string_length "${close_quote_pos} - ${open_quote_pos}")
string(SUBSTRING "${str}" ${open_quote_pos} ${version_string_length}
DRACO_VERSION)
break()
endif()
endforeach()
endmacro()

135
third-party/draco/cmake/draco_install.cmake generated vendored Normal file
View File

@ -0,0 +1,135 @@
# Copyright 2021 The Draco Authors
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
if(DRACO_CMAKE_DRACO_INSTALL_CMAKE_)
return()
endif() # DRACO_CMAKE_DRACO_INSTALL_CMAKE_
set(DRACO_CMAKE_DRACO_INSTALL_CMAKE_ 1)
include(CMakePackageConfigHelpers)
include(GNUInstallDirs)
# Sets up the draco install targets. Must be called after the static library
# target is created.
macro(draco_setup_install_target)
if(DRACO_INSTALL)
set(bin_path "${CMAKE_INSTALL_BINDIR}")
set(data_path "${CMAKE_INSTALL_DATAROOTDIR}")
set(includes_path "${CMAKE_INSTALL_INCLUDEDIR}")
set(libs_path "${CMAKE_INSTALL_LIBDIR}")
foreach(file ${draco_sources})
if(file MATCHES "h$")
list(APPEND draco_api_includes ${file})
endif()
endforeach()
list(REMOVE_DUPLICATES draco_api_includes)
# Strip $draco_src_root from the file paths: we need to install relative to
# $include_directory.
# list(TRANSFORM draco_api_includes REPLACE "${draco_src_root}/" "")
macro(LIST_REPLACE LIST INDEX NEWVALUE)
list(INSERT ${LIST} ${INDEX} ${NEWVALUE})
MATH(EXPR __INDEX "${INDEX} + 1")
list (REMOVE_AT ${LIST} ${__INDEX})
endmacro(LIST_REPLACE)
list(LENGTH draco_api_includes list_count)
math(EXPR list_max_index ${list_count}-1)
foreach(i RANGE ${list_max_index})
list(GET draco_api_includes ${i} x)
string(REPLACE "${draco_src_root}/" "" new ${x})
LIST_REPLACE(draco_api_includes ${i} ${new})
endforeach(i)
foreach(draco_api_include ${draco_api_includes})
get_filename_component(file_directory ${draco_api_include} DIRECTORY)
set(target_directory "${includes_path}/draco/${file_directory}")
install(FILES ${draco_src_root}/${draco_api_include}
DESTINATION "${target_directory}")
endforeach()
install(FILES "${draco_build}/draco/draco_features.h"
DESTINATION "${includes_path}/draco/")
install(TARGETS draco_decoder DESTINATION "${bin_path}")
install(TARGETS draco_encoder DESTINATION "${bin_path}")
if(DRACO_TRANSCODER_SUPPORTED)
install(TARGETS draco_transcoder DESTINATION "${bin_path}")
endif()
if(WIN32)
install(
TARGETS draco
EXPORT dracoExport
RUNTIME DESTINATION "${bin_path}"
ARCHIVE DESTINATION "${libs_path}"
LIBRARY DESTINATION "${libs_path}")
else()
install(
TARGETS draco_static
EXPORT dracoExport
DESTINATION "${libs_path}")
if(BUILD_SHARED_LIBS)
install(
TARGETS draco_shared
EXPORT dracoExport
RUNTIME DESTINATION "${bin_path}"
ARCHIVE DESTINATION "${libs_path}"
LIBRARY DESTINATION "${libs_path}")
endif()
endif()
if(DRACO_UNITY_PLUGIN)
install(TARGETS dracodec_unity DESTINATION "${libs_path}")
endif()
if(DRACO_MAYA_PLUGIN)
install(TARGETS draco_maya_wrapper DESTINATION "${libs_path}")
endif()
# pkg-config: draco.pc
configure_file("${draco_root}/cmake/draco.pc.template"
"${draco_build}/draco.pc" @ONLY NEWLINE_STYLE UNIX)
install(FILES "${draco_build}/draco.pc" DESTINATION "${libs_path}/pkgconfig")
# CMake config: draco-config.cmake
configure_package_config_file(
"${draco_root}/cmake/draco-config.cmake.template"
"${draco_build}/draco-config.cmake"
INSTALL_DESTINATION "${data_path}/cmake/draco")
write_basic_package_version_file(
"${draco_build}/draco-config-version.cmake"
VERSION ${DRACO_VERSION}
COMPATIBILITY AnyNewerVersion)
export(
EXPORT dracoExport
NAMESPACE draco::
FILE "${draco_build}/draco-targets.cmake")
install(
EXPORT dracoExport
NAMESPACE draco::
FILE draco-targets.cmake
DESTINATION "${data_path}/cmake/draco")
install(FILES "${draco_build}/draco-config.cmake"
"${draco_build}/draco-config-version.cmake"
DESTINATION "${data_path}/cmake/draco")
endif(DRACO_INSTALL)
endmacro()

106
third-party/draco/cmake/draco_intrinsics.cmake generated vendored Normal file
View File

@ -0,0 +1,106 @@
# Copyright 2021 The Draco Authors
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
if(DRACO_CMAKE_DRACO_INTRINSICS_CMAKE_)
return()
endif() # DRACO_CMAKE_DRACO_INTRINSICS_CMAKE_
set(DRACO_CMAKE_DRACO_INTRINSICS_CMAKE_ 1)
# Returns the compiler flag for the SIMD intrinsics suffix specified by the
# SUFFIX argument via the variable specified by the VARIABLE argument:
# draco_get_intrinsics_flag_for_suffix(SUFFIX <suffix> VARIABLE <var name>)
macro(draco_get_intrinsics_flag_for_suffix)
unset(intrinsics_SUFFIX)
unset(intrinsics_VARIABLE)
unset(optional_args)
unset(multi_value_args)
set(single_value_args SUFFIX VARIABLE)
cmake_parse_arguments(intrinsics "${optional_args}" "${single_value_args}"
"${multi_value_args}" ${ARGN})
if(NOT (intrinsics_SUFFIX AND intrinsics_VARIABLE))
message(FATAL_ERROR "draco_get_intrinsics_flag_for_suffix: SUFFIX and "
"VARIABLE required.")
endif()
if(intrinsics_SUFFIX MATCHES "neon")
if(NOT MSVC)
set(${intrinsics_VARIABLE} "${DRACO_NEON_INTRINSICS_FLAG}")
endif()
elseif(intrinsics_SUFFIX MATCHES "sse4")
if(NOT MSVC)
set(${intrinsics_VARIABLE} "-msse4.1")
endif()
else()
message(FATAL_ERROR "draco_get_intrinsics_flag_for_suffix: Unknown "
"instrinics suffix: ${intrinsics_SUFFIX}")
endif()
if(DRACO_VERBOSE GREATER 1)
message("draco_get_intrinsics_flag_for_suffix: "
"suffix:${intrinsics_SUFFIX} flag:${${intrinsics_VARIABLE}}")
endif()
endmacro()
# Processes source files specified by SOURCES and adds intrinsics flags as
# necessary: draco_process_intrinsics_sources(SOURCES <sources>)
#
# Detects requirement for intrinsics flags using source file name suffix.
# Currently supports only SSE4.1.
macro(draco_process_intrinsics_sources)
unset(arg_TARGET)
unset(arg_SOURCES)
unset(optional_args)
set(single_value_args TARGET)
set(multi_value_args SOURCES)
cmake_parse_arguments(arg "${optional_args}" "${single_value_args}"
"${multi_value_args}" ${ARGN})
if(NOT (arg_TARGET AND arg_SOURCES))
message(FATAL_ERROR "draco_process_intrinsics_sources: TARGET and "
"SOURCES required.")
endif()
if(DRACO_ENABLE_SSE4_1 AND draco_have_sse4)
unset(sse4_sources)
list(APPEND sse4_sources ${arg_SOURCES})
list(FILTER sse4_sources INCLUDE REGEX "${draco_sse4_source_file_suffix}$")
if(sse4_sources)
unset(sse4_flags)
draco_get_intrinsics_flag_for_suffix(
SUFFIX ${draco_sse4_source_file_suffix} VARIABLE sse4_flags)
if(sse4_flags)
draco_set_compiler_flags_for_sources(SOURCES ${sse4_sources} FLAGS
${sse4_flags})
endif()
endif()
endif()
if(DRACO_ENABLE_NEON AND draco_have_neon)
unset(neon_sources)
list(APPEND neon_sources ${arg_SOURCES})
list(FILTER neon_sources INCLUDE REGEX "${draco_neon_source_file_suffix}$")
if(neon_sources AND DRACO_NEON_INTRINSICS_FLAG)
unset(neon_flags)
draco_get_intrinsics_flag_for_suffix(
SUFFIX ${draco_neon_source_file_suffix} VARIABLE neon_flags)
if(neon_flags)
draco_set_compiler_flags_for_sources(SOURCES ${neon_sources} FLAGS
${neon_flags})
endif()
endif()
endif()
endmacro()

358
third-party/draco/cmake/draco_options.cmake generated vendored Normal file
View File

@ -0,0 +1,358 @@
# Copyright 2021 The Draco Authors
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
if(DRACO_CMAKE_DRACO_OPTIONS_CMAKE_)
return()
endif() # DRACO_CMAKE_DRACO_OPTIONS_CMAKE_
set(DRACO_CMAKE_DRACO_OPTIONS_CMAKE_)
set(draco_features_file_name "${draco_src_root}/draco_features.h")
set(draco_features_list)
# Simple wrapper for CMake's builtin option command that tracks draco's build
# options in the list variable $draco_options.
macro(draco_option)
unset(option_NAME)
unset(option_HELPSTRING)
unset(option_VALUE)
unset(optional_args)
unset(multi_value_args)
set(single_value_args NAME HELPSTRING VALUE)
cmake_parse_arguments(option "${optional_args}" "${single_value_args}"
"${multi_value_args}" ${ARGN})
if(NOT
(option_NAME
AND option_HELPSTRING
AND DEFINED option_VALUE))
message(FATAL_ERROR "draco_option: NAME HELPSTRING and VALUE required.")
endif()
option(${option_NAME} ${option_HELPSTRING} ${option_VALUE})
if(DRACO_VERBOSE GREATER 2)
message(
"--------- draco_option ---------\n"
"option_NAME=${option_NAME}\n"
"option_HELPSTRING=${option_HELPSTRING}\n"
"option_VALUE=${option_VALUE}\n"
"------------------------------------------\n")
endif()
list(APPEND draco_options ${option_NAME})
list(REMOVE_DUPLICATES draco_options)
endmacro()
# Dumps the $draco_options list via CMake message command.
macro(draco_dump_options)
foreach(option_name ${draco_options})
message("${option_name}: ${${option_name}}")
endforeach()
endmacro()
# Set default options.
macro(draco_set_default_options)
draco_option(
NAME DRACO_FAST
HELPSTRING "Try to build faster libs."
VALUE OFF)
draco_option(
NAME DRACO_JS_GLUE
HELPSTRING "Enable JS Glue and JS targets when using Emscripten."
VALUE ON)
draco_option(
NAME DRACO_IE_COMPATIBLE
HELPSTRING "Enable support for older IE builds when using Emscripten."
VALUE OFF)
draco_option(
NAME DRACO_MESH_COMPRESSION
HELPSTRING "Enable mesh compression."
VALUE ON)
draco_option(
NAME DRACO_POINT_CLOUD_COMPRESSION
HELPSTRING "Enable point cloud compression."
VALUE ON)
draco_option(
NAME DRACO_PREDICTIVE_EDGEBREAKER
HELPSTRING "Enable predictive edgebreaker."
VALUE ON)
draco_option(
NAME DRACO_STANDARD_EDGEBREAKER
HELPSTRING "Enable stand edgebreaker."
VALUE ON)
draco_option(
NAME DRACO_BACKWARDS_COMPATIBILITY
HELPSTRING "Enable backwards compatibility."
VALUE ON)
draco_option(
NAME DRACO_DECODER_ATTRIBUTE_DEDUPLICATION
HELPSTRING "Enable attribute deduping."
VALUE OFF)
draco_option(
NAME DRACO_TESTS
HELPSTRING "Enables tests."
VALUE OFF)
draco_option(
NAME DRACO_WASM
HELPSTRING "Enables WASM support."
VALUE OFF)
draco_option(
NAME DRACO_UNITY_PLUGIN
HELPSTRING "Build plugin library for Unity."
VALUE OFF)
draco_option(
NAME DRACO_ANIMATION_ENCODING
HELPSTRING "Enable animation."
VALUE OFF)
draco_option(
NAME DRACO_GLTF_BITSTREAM
HELPSTRING "Draco GLTF extension bitstream specified features only."
VALUE OFF)
draco_option(
NAME DRACO_MAYA_PLUGIN
HELPSTRING "Build plugin library for Maya."
VALUE OFF)
draco_option(
NAME DRACO_TRANSCODER_SUPPORTED
HELPSTRING "Enable the Draco transcoder."
VALUE OFF)
draco_option(
NAME DRACO_DEBUG_COMPILER_WARNINGS
HELPSTRING "Turn on more warnings."
VALUE OFF)
draco_option(
NAME DRACO_INSTALL
HELPSTRING "Enable installation."
VALUE ON)
draco_check_deprecated_options()
endmacro()
# Warns when a deprecated option is used and sets the option that replaced it.
macro(draco_handle_deprecated_option)
unset(option_OLDNAME)
unset(option_NEWNAME)
unset(optional_args)
unset(multi_value_args)
set(single_value_args OLDNAME NEWNAME)
cmake_parse_arguments(option "${optional_args}" "${single_value_args}"
"${multi_value_args}" ${ARGN})
if("${${option_OLDNAME}}")
message(WARNING "${option_OLDNAME} is deprecated. Use ${option_NEWNAME}.")
set(${option_NEWNAME} ${${option_OLDNAME}})
endif()
endmacro()
# Checks for use of deprecated options.
macro(draco_check_deprecated_options)
draco_handle_deprecated_option(OLDNAME ENABLE_EXTRA_SPEED NEWNAME DRACO_FAST)
draco_handle_deprecated_option(OLDNAME ENABLE_JS_GLUE NEWNAME DRACO_JS_GLUE)
draco_handle_deprecated_option(OLDNAME ENABLE_MESH_COMPRESSION NEWNAME
DRACO_MESH_COMPRESSION)
draco_handle_deprecated_option(OLDNAME ENABLE_POINT_CLOUD_COMPRESSION NEWNAME
DRACO_POINT_CLOUD_COMPRESSION)
draco_handle_deprecated_option(OLDNAME ENABLE_PREDICTIVE_EDGEBREAKER NEWNAME
DRACO_PREDICTIVE_EDGEBREAKER)
draco_handle_deprecated_option(OLDNAME ENABLE_STANDARD_EDGEBREAKER NEWNAME
DRACO_STANDARD_EDGEBREAKER)
draco_handle_deprecated_option(OLDNAME ENABLE_BACKWARDS_COMPATIBILITY NEWNAME
DRACO_BACKWARDS_COMPATIBILITY)
draco_handle_deprecated_option(OLDNAME ENABLE_DECODER_ATTRIBUTE_DEDUPLICATION
NEWNAME DRACO_DECODER_ATTRIBUTE_DEDUPLICATION)
draco_handle_deprecated_option(OLDNAME ENABLE_TESTS NEWNAME DRACO_TESTS)
draco_handle_deprecated_option(OLDNAME ENABLE_WASM NEWNAME DRACO_WASM)
draco_handle_deprecated_option(OLDNAME BUILD_UNITY_PLUGIN NEWNAME
DRACO_UNITY_PLUGIN)
draco_handle_deprecated_option(OLDNAME BUILD_ANIMATION_ENCODING NEWNAME
DRACO_ANIMATION_ENCODING)
draco_handle_deprecated_option(OLDNAME BUILD_FOR_GLTF NEWNAME DRACO_GLTF)
draco_handle_deprecated_option(OLDNAME BUILD_MAYA_PLUGIN NEWNAME
DRACO_MAYA_PLUGIN)
draco_handle_deprecated_option(OLDNAME BUILD_USD_PLUGIN NEWNAME
BUILD_SHARED_LIBS)
draco_handle_deprecated_option(OLDNAME DRACO_GLTF NEWNAME
DRACO_GLTF_BITSTREAM)
endmacro()
# Macro for setting Draco features based on user configuration. Features enabled
# by this macro are Draco global.
macro(draco_set_optional_features)
if(DRACO_GLTF_BITSTREAM)
# Enable only the features included in the Draco GLTF bitstream spec.
draco_enable_feature(FEATURE "DRACO_MESH_COMPRESSION_SUPPORTED")
draco_enable_feature(FEATURE "DRACO_NORMAL_ENCODING_SUPPORTED")
draco_enable_feature(FEATURE "DRACO_STANDARD_EDGEBREAKER_SUPPORTED")
else()
if(DRACO_POINT_CLOUD_COMPRESSION)
draco_enable_feature(FEATURE "DRACO_POINT_CLOUD_COMPRESSION_SUPPORTED")
endif()
if(DRACO_MESH_COMPRESSION)
draco_enable_feature(FEATURE "DRACO_MESH_COMPRESSION_SUPPORTED")
draco_enable_feature(FEATURE "DRACO_NORMAL_ENCODING_SUPPORTED")
if(DRACO_STANDARD_EDGEBREAKER)
draco_enable_feature(FEATURE "DRACO_STANDARD_EDGEBREAKER_SUPPORTED")
endif()
if(DRACO_PREDICTIVE_EDGEBREAKER)
draco_enable_feature(FEATURE "DRACO_PREDICTIVE_EDGEBREAKER_SUPPORTED")
endif()
endif()
if(DRACO_BACKWARDS_COMPATIBILITY)
draco_enable_feature(FEATURE "DRACO_BACKWARDS_COMPATIBILITY_SUPPORTED")
endif()
if(NOT EMSCRIPTEN)
# For now, enable deduplication for both encoder and decoder.
# TODO(ostava): Support for disabling attribute deduplication for the C++
# decoder is planned in future releases.
draco_enable_feature(FEATURE
DRACO_ATTRIBUTE_INDICES_DEDUPLICATION_SUPPORTED)
draco_enable_feature(FEATURE
DRACO_ATTRIBUTE_VALUES_DEDUPLICATION_SUPPORTED)
endif()
endif()
if(DRACO_UNITY_PLUGIN)
draco_enable_feature(FEATURE "DRACO_UNITY_PLUGIN")
set(CMAKE_POSITION_INDEPENDENT_CODE ON)
endif()
if(DRACO_MAYA_PLUGIN)
draco_enable_feature(FEATURE "DRACO_MAYA_PLUGIN")
set(CMAKE_POSITION_INDEPENDENT_CODE ON)
endif()
if(DRACO_TRANSCODER_SUPPORTED)
draco_enable_feature(FEATURE "DRACO_TRANSCODER_SUPPORTED")
endif()
endmacro()
# Macro that handles tracking of Draco preprocessor symbols for the purpose of
# producing draco_features.h.
#
# ~~~
# draco_enable_feature(FEATURE <feature_name> [TARGETS <target_name>])
# ~~~
#
# FEATURE is required. It should be a Draco preprocessor symbol. TARGETS is
# optional. It can be one or more draco targets.
#
# When the TARGETS argument is not present the preproc symbol is added to
# draco_features.h. When it is draco_features.h is unchanged, and
# target_compile_options() is called for each target specified.
macro(draco_enable_feature)
set(def_flags)
set(def_single_arg_opts FEATURE)
set(def_multi_arg_opts TARGETS)
cmake_parse_arguments(DEF "${def_flags}" "${def_single_arg_opts}"
"${def_multi_arg_opts}" ${ARGN})
if("${DEF_FEATURE}" STREQUAL "")
message(FATAL_ERROR "Empty FEATURE passed to draco_enable_feature().")
endif()
# Do nothing/return early if $DEF_FEATURE is already in the list.
list(FIND draco_features_list ${DEF_FEATURE} df_index)
if(NOT df_index EQUAL -1)
return()
endif()
list(LENGTH DEF_TARGETS df_targets_list_length)
if(${df_targets_list_length} EQUAL 0)
list(APPEND draco_features_list ${DEF_FEATURE})
else()
foreach(target ${DEF_TARGETS})
target_compile_definitions(${target} PRIVATE ${DEF_FEATURE})
endforeach()
endif()
endmacro()
# Function for generating draco_features.h.
function(draco_generate_features_h)
file(WRITE "${draco_features_file_name}.new"
"// GENERATED FILE -- DO NOT EDIT\n\n" "#ifndef DRACO_FEATURES_H_\n"
"#define DRACO_FEATURES_H_\n\n")
foreach(feature ${draco_features_list})
file(APPEND "${draco_features_file_name}.new" "#define ${feature}\n")
endforeach()
if(WIN32)
if(NOT DRACO_DEBUG_COMPILER_WARNINGS)
file(APPEND "${draco_features_file_name}.new"
"// Enable DRACO_DEBUG_COMPILER_WARNINGS at CMake generation \n"
"// time to remove these pragmas.\n")
# warning C4018: '<operator>': signed/unsigned mismatch.
file(APPEND "${draco_features_file_name}.new"
"#pragma warning(disable:4018)\n")
# warning C4146: unary minus operator applied to unsigned type, result
# still unsigned
file(APPEND "${draco_features_file_name}.new"
"#pragma warning(disable:4146)\n")
# warning C4244: 'return': conversion from '<type>' to '<type>', possible
# loss of data.
file(APPEND "${draco_features_file_name}.new"
"#pragma warning(disable:4244)\n")
# warning C4267: 'initializing' conversion from '<type>' to '<type>',
# possible loss of data.
file(APPEND "${draco_features_file_name}.new"
"#pragma warning(disable:4267)\n")
# warning C4305: 'context' : truncation from 'type1' to 'type2'.
file(APPEND "${draco_features_file_name}.new"
"#pragma warning(disable:4305)\n")
# warning C4661: 'identifier' : no suitable definition provided for
# explicit template instantiation request.
file(APPEND "${draco_features_file_name}.new"
"#pragma warning(disable:4661)\n")
# warning C4800: Implicit conversion from 'type' to bool. Possible
# information loss.
# Also, in older MSVC releases:
# warning C4800: 'type' : forcing value to bool 'true' or 'false'
# (performance warning).
file(APPEND "${draco_features_file_name}.new"
"#pragma warning(disable:4800)\n")
# warning C4804: '<operator>': unsafe use of type '<type>' in operation.
file(APPEND "${draco_features_file_name}.new"
"#pragma warning(disable:4804)\n")
endif()
endif()
file(APPEND "${draco_features_file_name}.new"
"\n#endif // DRACO_FEATURES_H_\n")
# Will replace ${draco_features_file_name} only if the file content has
# changed. This prevents forced Draco rebuilds after CMake runs.
configure_file("${draco_features_file_name}.new"
"${draco_features_file_name}")
file(REMOVE "${draco_features_file_name}.new")
endfunction()
# Sets default options for the build and processes user controlled options to
# compute enabled features.
macro(draco_setup_options)
draco_set_default_options()
draco_set_optional_features()
endmacro()

48
third-party/draco/cmake/draco_sanitizer.cmake generated vendored Normal file
View File

@ -0,0 +1,48 @@
# Copyright 2021 The Draco Authors
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
if(DRACO_CMAKE_DRACO_SANITIZER_CMAKE_)
return()
endif() # DRACO_CMAKE_DRACO_SANITIZER_CMAKE_
set(DRACO_CMAKE_DRACO_SANITIZER_CMAKE_ 1)
# Handles the details of enabling sanitizers.
macro(draco_configure_sanitizer)
if(DRACO_SANITIZE
AND NOT EMSCRIPTEN
AND NOT MSVC)
if(CMAKE_CXX_COMPILER_ID MATCHES "Clang")
if(DRACO_SANITIZE MATCHES "cfi")
list(APPEND SAN_CXX_FLAGS "-flto" "-fno-sanitize-trap=cfi")
list(APPEND SAN_LINKER_FLAGS "-flto" "-fno-sanitize-trap=cfi"
"-fuse-ld=gold")
endif()
if(${CMAKE_SIZEOF_VOID_P} EQUAL 4 AND DRACO_SANITIZE MATCHES
"integer|undefined")
list(APPEND SAN_LINKER_FLAGS "--rtlib=compiler-rt" "-lgcc_s")
endif()
endif()
list(APPEND SAN_CXX_FLAGS "-fsanitize=${DRACO_SANITIZE}")
list(APPEND SAN_LINKER_FLAGS "-fsanitize=${DRACO_SANITIZE}")
# Make sanitizer callstacks accurate.
list(APPEND SAN_CXX_FLAGS "-fno-omit-frame-pointer")
list(APPEND SAN_CXX_FLAGS "-fno-optimize-sibling-calls")
draco_test_cxx_flag(FLAG_LIST_VAR_NAMES SAN_CXX_FLAGS FLAG_REQUIRED)
draco_test_exe_linker_flag(FLAG_LIST_VAR_NAME SAN_LINKER_FLAGS)
endif()
endmacro()

400
third-party/draco/cmake/draco_targets.cmake generated vendored Normal file
View File

@ -0,0 +1,400 @@
# Copyright 2021 The Draco Authors
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
if(DRACO_CMAKE_DRACO_TARGETS_CMAKE_)
return()
endif() # DRACO_CMAKE_DRACO_TARGETS_CMAKE_
set(DRACO_CMAKE_DRACO_TARGETS_CMAKE_ 1)
# Resets list variables used to track draco targets.
macro(draco_reset_target_lists)
unset(draco_targets)
unset(draco_exe_targets)
unset(draco_lib_targets)
unset(draco_objlib_targets)
unset(draco_module_targets)
unset(draco_sources)
unset(draco_test_targets)
endmacro()
# Creates an executable target. The target name is passed as a parameter to the
# NAME argument, and the sources passed as a parameter to the SOURCES argument:
# draco_add_executable(NAME <name> SOURCES <sources> [optional args])
#
# Optional args:
# cmake-format: off
# - OUTPUT_NAME: Override output file basename. Target basename defaults to
# NAME.
# - TEST: Flag. Presence means treat executable as a test.
# - DEFINES: List of preprocessor macro definitions.
# - INCLUDES: list of include directories for the target.
# - COMPILE_FLAGS: list of compiler flags for the target.
# - LINK_FLAGS: List of linker flags for the target.
# - OBJLIB_DEPS: List of CMake object library target dependencies.
# - LIB_DEPS: List of CMake library dependencies.
# cmake-format: on
#
# Sources passed to this macro are added to $draco_test_sources when TEST is
# specified. Otherwise sources are added to $draco_sources.
#
# Targets passed to this macro are always added to the $draco_targets list. When
# TEST is specified targets are also added to the $draco_test_targets list.
# Otherwise targets are added to $draco_exe_targets.
macro(draco_add_executable)
unset(exe_TEST)
unset(exe_TEST_DEFINES_MAIN)
unset(exe_NAME)
unset(exe_OUTPUT_NAME)
unset(exe_SOURCES)
unset(exe_DEFINES)
unset(exe_INCLUDES)
unset(exe_COMPILE_FLAGS)
unset(exe_LINK_FLAGS)
unset(exe_OBJLIB_DEPS)
unset(exe_LIB_DEPS)
set(optional_args TEST)
set(single_value_args NAME OUTPUT_NAME)
set(multi_value_args
SOURCES
DEFINES
INCLUDES
COMPILE_FLAGS
LINK_FLAGS
OBJLIB_DEPS
LIB_DEPS)
cmake_parse_arguments(exe "${optional_args}" "${single_value_args}"
"${multi_value_args}" ${ARGN})
if(DRACO_VERBOSE GREATER 1)
message(
"--------- draco_add_executable ---------\n"
"exe_TEST=${exe_TEST}\n"
"exe_TEST_DEFINES_MAIN=${exe_TEST_DEFINES_MAIN}\n"
"exe_NAME=${exe_NAME}\n"
"exe_OUTPUT_NAME=${exe_OUTPUT_NAME}\n"
"exe_SOURCES=${exe_SOURCES}\n"
"exe_DEFINES=${exe_DEFINES}\n"
"exe_INCLUDES=${exe_INCLUDES}\n"
"exe_COMPILE_FLAGS=${exe_COMPILE_FLAGS}\n"
"exe_LINK_FLAGS=${exe_LINK_FLAGS}\n"
"exe_OBJLIB_DEPS=${exe_OBJLIB_DEPS}\n"
"exe_LIB_DEPS=${exe_LIB_DEPS}\n"
"------------------------------------------\n")
endif()
if(NOT (exe_NAME AND exe_SOURCES))
message(FATAL_ERROR "draco_add_executable: NAME and SOURCES required.")
endif()
list(APPEND draco_targets ${exe_NAME})
if(exe_TEST)
list(APPEND draco_test_targets ${exe_NAME})
list(APPEND draco_test_sources ${exe_SOURCES})
else()
list(APPEND draco_exe_targets ${exe_NAME})
list(APPEND draco_sources ${exe_SOURCES})
endif()
add_executable(${exe_NAME} ${exe_SOURCES})
target_compile_features(${exe_NAME} PUBLIC cxx_std_11)
if(NOT EMSCRIPTEN)
set_target_properties(${exe_NAME} PROPERTIES VERSION ${DRACO_VERSION})
endif()
if(exe_OUTPUT_NAME)
set_target_properties(${exe_NAME} PROPERTIES OUTPUT_NAME ${exe_OUTPUT_NAME})
endif()
draco_process_intrinsics_sources(TARGET ${exe_NAME} SOURCES ${exe_SOURCES})
if(exe_DEFINES)
target_compile_definitions(${exe_NAME} PRIVATE ${exe_DEFINES})
endif()
if(exe_INCLUDES)
target_include_directories(${exe_NAME} PRIVATE ${exe_INCLUDES})
endif()
if(exe_COMPILE_FLAGS OR DRACO_CXX_FLAGS)
target_compile_options(${exe_NAME} PRIVATE ${exe_COMPILE_FLAGS}
${DRACO_CXX_FLAGS})
endif()
if(exe_LINK_FLAGS OR DRACO_EXE_LINKER_FLAGS)
if(${CMAKE_VERSION} VERSION_LESS "3.13")
list(APPEND exe_LINK_FLAGS "${DRACO_EXE_LINKER_FLAGS}")
# LINK_FLAGS is managed as a string.
draco_set_and_stringify(SOURCE "${exe_LINK_FLAGS}" DEST exe_LINK_FLAGS)
set_target_properties(${exe_NAME} PROPERTIES LINK_FLAGS
"${exe_LINK_FLAGS}")
else()
target_link_options(${exe_NAME} PRIVATE ${exe_LINK_FLAGS}
${DRACO_EXE_LINKER_FLAGS})
endif()
endif()
if(exe_OBJLIB_DEPS)
foreach(objlib_dep ${exe_OBJLIB_DEPS})
target_sources(${exe_NAME} PRIVATE $<TARGET_OBJECTS:${objlib_dep}>)
endforeach()
endif()
if(CMAKE_THREAD_LIBS_INIT)
list(APPEND exe_LIB_DEPS ${CMAKE_THREAD_LIBS_INIT})
endif()
if(BUILD_SHARED_LIBS AND (MSVC OR WIN32))
target_compile_definitions(${exe_NAME} PRIVATE "DRACO_BUILDING_DLL=0")
endif()
if(exe_LIB_DEPS)
if(CMAKE_CXX_COMPILER_ID MATCHES "^Clang|^GNU")
# Third party dependencies can introduce dependencies on system and test
# libraries. Since the target created here is an executable, and CMake
# does not provide a method of controlling order of link dependencies,
# wrap all of the dependencies of this target in start/end group flags to
# ensure that dependencies of third party targets can be resolved when
# those dependencies happen to be resolved by dependencies of the current
# target.
# TODO(tomfinegan): For portability use LINK_GROUP with RESCAN instead of
# directly (ab)using compiler/linker specific flags once CMake v3.24 is in
# wider use. See:
# https://cmake.org/cmake/help/latest/manual/cmake-generator-expressions.7.html#genex:LINK_GROUP
list(INSERT exe_LIB_DEPS 0 -Wl,--start-group)
list(APPEND exe_LIB_DEPS -Wl,--end-group)
endif()
target_link_libraries(${exe_NAME} PRIVATE ${exe_LIB_DEPS})
endif()
endmacro()
# Creates a library target of the specified type. The target name is passed as a
# parameter to the NAME argument, the type as a parameter to the TYPE argument,
# and the sources passed as a parameter to the SOURCES argument:
# draco_add_library(NAME <name> TYPE <type> SOURCES <sources> [optional args])
#
# Optional args:
# cmake-format: off
# - OUTPUT_NAME: Override output file basename. Target basename defaults to
# NAME. OUTPUT_NAME is ignored when BUILD_SHARED_LIBS is enabled and CMake
# is generating a build for which MSVC is true. This is to avoid output
# basename collisions with DLL import libraries.
# - TEST: Flag. Presence means treat library as a test.
# - DEFINES: List of preprocessor macro definitions.
# - INCLUDES: list of include directories for the target.
# - COMPILE_FLAGS: list of compiler flags for the target.
# - LINK_FLAGS: List of linker flags for the target.
# - OBJLIB_DEPS: List of CMake object library target dependencies.
# - LIB_DEPS: List of CMake library dependencies.
# - PUBLIC_INCLUDES: List of include paths to export to dependents.
# cmake-format: on
#
# Sources passed to the macro are added to the lists tracking draco sources:
# cmake-format: off
# - When TEST is specified sources are added to $draco_test_sources.
# - Otherwise sources are added to $draco_sources.
# cmake-format: on
#
# Targets passed to this macro are added to the lists tracking draco targets:
# cmake-format: off
# - Targets are always added to $draco_targets.
# - When the TEST flag is specified, targets are added to
# $draco_test_targets.
# - When TEST is not specified:
# - Libraries of type SHARED are added to $draco_dylib_targets.
# - Libraries of type OBJECT are added to $draco_objlib_targets.
# - Libraries of type STATIC are added to $draco_lib_targets.
# cmake-format: on
macro(draco_add_library)
unset(lib_TEST)
unset(lib_NAME)
unset(lib_OUTPUT_NAME)
unset(lib_TYPE)
unset(lib_SOURCES)
unset(lib_DEFINES)
unset(lib_INCLUDES)
unset(lib_COMPILE_FLAGS)
unset(lib_LINK_FLAGS)
unset(lib_OBJLIB_DEPS)
unset(lib_LIB_DEPS)
unset(lib_PUBLIC_INCLUDES)
unset(lib_TARGET_PROPERTIES)
set(optional_args TEST)
set(single_value_args NAME OUTPUT_NAME TYPE)
set(multi_value_args
SOURCES
DEFINES
INCLUDES
COMPILE_FLAGS
LINK_FLAGS
OBJLIB_DEPS
LIB_DEPS
PUBLIC_INCLUDES
TARGET_PROPERTIES)
cmake_parse_arguments(lib "${optional_args}" "${single_value_args}"
"${multi_value_args}" ${ARGN})
if(DRACO_VERBOSE GREATER 1)
message(
"--------- draco_add_library ---------\n"
"lib_TEST=${lib_TEST}\n"
"lib_NAME=${lib_NAME}\n"
"lib_OUTPUT_NAME=${lib_OUTPUT_NAME}\n"
"lib_TYPE=${lib_TYPE}\n"
"lib_SOURCES=${lib_SOURCES}\n"
"lib_DEFINES=${lib_DEFINES}\n"
"lib_INCLUDES=${lib_INCLUDES}\n"
"lib_COMPILE_FLAGS=${lib_COMPILE_FLAGS}\n"
"lib_LINK_FLAGS=${lib_LINK_FLAGS}\n"
"lib_OBJLIB_DEPS=${lib_OBJLIB_DEPS}\n"
"lib_LIB_DEPS=${lib_LIB_DEPS}\n"
"lib_PUBLIC_INCLUDES=${lib_PUBLIC_INCLUDES}\n"
"---------------------------------------\n")
endif()
if(NOT (lib_NAME AND lib_TYPE))
message(FATAL_ERROR "draco_add_library: NAME and TYPE required.")
endif()
list(APPEND draco_targets ${lib_NAME})
if(lib_TEST)
list(APPEND draco_test_targets ${lib_NAME})
list(APPEND draco_test_sources ${lib_SOURCES})
else()
list(APPEND draco_sources ${lib_SOURCES})
if(lib_TYPE STREQUAL MODULE)
list(APPEND draco_module_targets ${lib_NAME})
elseif(lib_TYPE STREQUAL OBJECT)
list(APPEND draco_objlib_targets ${lib_NAME})
elseif(lib_TYPE STREQUAL SHARED)
list(APPEND draco_dylib_targets ${lib_NAME})
elseif(lib_TYPE STREQUAL STATIC)
list(APPEND draco_lib_targets ${lib_NAME})
else()
message(WARNING "draco_add_library: Unhandled type: ${lib_TYPE}")
endif()
endif()
add_library(${lib_NAME} ${lib_TYPE} ${lib_SOURCES})
target_compile_features(${lib_NAME} PUBLIC cxx_std_11)
target_include_directories(${lib_NAME} PUBLIC $<INSTALL_INTERFACE:include>)
if(BUILD_SHARED_LIBS)
# Enable PIC for all targets in shared configurations.
set_target_properties(${lib_NAME} PROPERTIES POSITION_INDEPENDENT_CODE ON)
endif()
if(lib_SOURCES)
draco_process_intrinsics_sources(TARGET ${lib_NAME} SOURCES ${lib_SOURCES})
endif()
if(lib_OUTPUT_NAME)
if(NOT (BUILD_SHARED_LIBS AND MSVC))
set_target_properties(${lib_NAME} PROPERTIES OUTPUT_NAME
${lib_OUTPUT_NAME})
endif()
endif()
if(lib_DEFINES)
target_compile_definitions(${lib_NAME} PRIVATE ${lib_DEFINES})
endif()
if(lib_INCLUDES)
target_include_directories(${lib_NAME} PRIVATE ${lib_INCLUDES})
endif()
if(lib_PUBLIC_INCLUDES)
target_include_directories(${lib_NAME} PUBLIC ${lib_PUBLIC_INCLUDES})
endif()
if(lib_COMPILE_FLAGS OR DRACO_CXX_FLAGS)
target_compile_options(${lib_NAME} PRIVATE ${lib_COMPILE_FLAGS}
${DRACO_CXX_FLAGS})
endif()
if(lib_LINK_FLAGS)
set_target_properties(${lib_NAME} PROPERTIES LINK_FLAGS ${lib_LINK_FLAGS})
endif()
if(lib_OBJLIB_DEPS)
foreach(objlib_dep ${lib_OBJLIB_DEPS})
target_sources(${lib_NAME} PRIVATE $<TARGET_OBJECTS:${objlib_dep}>)
endforeach()
endif()
if(lib_LIB_DEPS)
if(lib_TYPE STREQUAL STATIC)
set(link_type PUBLIC)
else()
set(link_type PRIVATE)
if(lib_TYPE STREQUAL SHARED AND CMAKE_CXX_COMPILER_ID MATCHES "Clang|GNU")
# The draco shared object uses the static draco as input to turn it into
# a shared object. Include everything from the static library in the
# shared object.
if(APPLE)
list(INSERT lib_LIB_DEPS 0 -Wl,-force_load)
else()
list(INSERT lib_LIB_DEPS 0 -Wl,--whole-archive)
list(APPEND lib_LIB_DEPS -Wl,--no-whole-archive)
endif()
endif()
endif()
target_link_libraries(${lib_NAME} ${link_type} ${lib_LIB_DEPS})
endif()
if(NOT MSVC AND lib_NAME MATCHES "^lib")
# Non-MSVC generators prepend lib to static lib target file names. Libdraco
# already includes lib in its name. Avoid naming output files liblib*.
set_target_properties(${lib_NAME} PROPERTIES PREFIX "")
endif()
if(NOT EMSCRIPTEN)
# VERSION and SOVERSION as necessary
if((lib_TYPE STREQUAL BUNDLE OR lib_TYPE STREQUAL SHARED) AND NOT MSVC)
set_target_properties(
${lib_NAME} PROPERTIES VERSION ${DRACO_SOVERSION}
SOVERSION ${DRACO_SOVERSION_MAJOR})
endif()
endif()
if(BUILD_SHARED_LIBS AND (MSVC OR WIN32))
if(lib_TYPE STREQUAL SHARED)
target_compile_definitions(${lib_NAME} PRIVATE "DRACO_BUILDING_DLL=1")
else()
target_compile_definitions(${lib_NAME} PRIVATE "DRACO_BUILDING_DLL=0")
endif()
endif()
# Determine if $lib_NAME is a header only target.
unset(sources_list)
if(lib_SOURCES)
set(sources_list ${lib_SOURCES})
list(FILTER sources_list INCLUDE REGEX cc$)
endif()
if(NOT sources_list)
if(NOT XCODE)
# This is a header only target. Tell CMake the link language.
set_target_properties(${lib_NAME} PROPERTIES LINKER_LANGUAGE CXX)
else()
# The Xcode generator ignores LINKER_LANGUAGE. Add a dummy cc file.
draco_create_dummy_source_file(TARGET ${lib_NAME} BASENAME ${lib_NAME})
endif()
endif()
endmacro()

28
third-party/draco/cmake/draco_test_config.h.cmake generated vendored Normal file
View File

@ -0,0 +1,28 @@
// Copyright 2021 The Draco Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifndef DRACO_TESTING_DRACO_TEST_CONFIG_H_
#define DRACO_TESTING_DRACO_TEST_CONFIG_H_
// If this file is named draco_test_config.h.cmake:
// This file is used as input at cmake generation time.
// If this file is named draco_test_config.h:
// GENERATED FILE, DO NOT EDIT. SEE ABOVE.
#define DRACO_TEST_DATA_DIR "${DRACO_TEST_DATA_DIR}"
#define DRACO_TEST_TEMP_DIR "${DRACO_TEST_TEMP_DIR}"
#define DRACO_TEST_ROOT_DIR "${DRACO_TEST_ROOT_DIR}"
#endif // DRACO_TESTING_DRACO_TEST_CONFIG_H_

173
third-party/draco/cmake/draco_tests.cmake generated vendored Normal file
View File

@ -0,0 +1,173 @@
# Copyright 2021 The Draco Authors
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
if(DRACO_CMAKE_DRACO_TESTS_CMAKE)
return()
endif()
set(DRACO_CMAKE_DRACO_TESTS_CMAKE 1)
# The factory tests are in a separate target to avoid breaking tests that rely
# on file I/O via the factories. The fake reader and writer implementations
# interfere with normal file I/O function.
set(draco_factory_test_sources
"${draco_src_root}/io/file_reader_factory_test.cc"
"${draco_src_root}/io/file_writer_factory_test.cc")
list(
APPEND draco_test_common_sources
"${draco_src_root}/core/draco_test_base.h"
"${draco_src_root}/core/draco_test_utils.cc"
"${draco_src_root}/core/draco_test_utils.h"
"${draco_src_root}/core/status.cc")
list(
APPEND
draco_test_sources
"${draco_src_root}/animation/keyframe_animation_encoding_test.cc"
"${draco_src_root}/animation/keyframe_animation_test.cc"
"${draco_src_root}/attributes/point_attribute_test.cc"
"${draco_src_root}/compression/attributes/point_d_vector_test.cc"
"${draco_src_root}/compression/attributes/prediction_schemes/prediction_scheme_normal_octahedron_canonicalized_transform_test.cc"
"${draco_src_root}/compression/attributes/prediction_schemes/prediction_scheme_normal_octahedron_transform_test.cc"
"${draco_src_root}/compression/attributes/sequential_integer_attribute_encoding_test.cc"
"${draco_src_root}/compression/bit_coders/rans_coding_test.cc"
"${draco_src_root}/compression/decode_test.cc"
"${draco_src_root}/compression/encode_test.cc"
"${draco_src_root}/compression/entropy/shannon_entropy_test.cc"
"${draco_src_root}/compression/entropy/symbol_coding_test.cc"
"${draco_src_root}/compression/mesh/mesh_edgebreaker_encoding_test.cc"
"${draco_src_root}/compression/mesh/mesh_encoder_test.cc"
"${draco_src_root}/compression/point_cloud/point_cloud_kd_tree_encoding_test.cc"
"${draco_src_root}/compression/point_cloud/point_cloud_sequential_encoding_test.cc"
"${draco_src_root}/core/buffer_bit_coding_test.cc"
"${draco_src_root}/core/math_utils_test.cc"
"${draco_src_root}/core/quantization_utils_test.cc"
"${draco_src_root}/core/status_test.cc"
"${draco_src_root}/core/vector_d_test.cc"
"${draco_src_root}/io/file_reader_test_common.h"
"${draco_src_root}/io/file_utils_test.cc"
"${draco_src_root}/io/file_writer_utils_test.cc"
"${draco_src_root}/io/stdio_file_reader_test.cc"
"${draco_src_root}/io/stdio_file_writer_test.cc"
"${draco_src_root}/io/obj_decoder_test.cc"
"${draco_src_root}/io/obj_encoder_test.cc"
"${draco_src_root}/io/ply_decoder_test.cc"
"${draco_src_root}/io/ply_reader_test.cc"
"${draco_src_root}/io/stl_decoder_test.cc"
"${draco_src_root}/io/stl_encoder_test.cc"
"${draco_src_root}/io/point_cloud_io_test.cc"
"${draco_src_root}/mesh/corner_table_test.cc"
"${draco_src_root}/mesh/mesh_are_equivalent_test.cc"
"${draco_src_root}/mesh/mesh_cleanup_test.cc"
"${draco_src_root}/mesh/triangle_soup_mesh_builder_test.cc"
"${draco_src_root}/metadata/metadata_encoder_test.cc"
"${draco_src_root}/metadata/metadata_test.cc"
"${draco_src_root}/point_cloud/point_cloud_builder_test.cc"
"${draco_src_root}/point_cloud/point_cloud_test.cc")
if(DRACO_TRANSCODER_SUPPORTED)
list(
APPEND draco_test_sources
"${draco_src_root}/animation/animation_test.cc"
"${draco_src_root}/io/gltf_decoder_test.cc"
"${draco_src_root}/io/gltf_encoder_test.cc"
"${draco_src_root}/io/gltf_utils_test.cc"
"${draco_src_root}/io/gltf_test_helper.cc"
"${draco_src_root}/io/gltf_test_helper.h"
"${draco_src_root}/io/scene_io_test.cc"
"${draco_src_root}/io/texture_io_test.cc"
"${draco_src_root}/material/material_library_test.cc"
"${draco_src_root}/material/material_test.cc"
"${draco_src_root}/metadata/property_attribute_test.cc"
"${draco_src_root}/metadata/property_table_test.cc"
"${draco_src_root}/metadata/structural_metadata_test.cc"
"${draco_src_root}/metadata/structural_metadata_schema_test.cc"
"${draco_src_root}/scene/instance_array_test.cc"
"${draco_src_root}/scene/light_test.cc"
"${draco_src_root}/scene/mesh_group_test.cc"
"${draco_src_root}/scene/scene_test.cc"
"${draco_src_root}/scene/scene_are_equivalent_test.cc"
"${draco_src_root}/scene/scene_utils_test.cc"
"${draco_src_root}/scene/trs_matrix_test.cc"
"${draco_src_root}/texture/texture_library_test.cc"
"${draco_src_root}/texture/texture_map_test.cc"
"${draco_src_root}/texture/texture_transform_test.cc")
endif()
macro(draco_setup_test_targets)
if(DRACO_TESTS)
draco_setup_googletest()
if(NOT (EXISTS ${draco_gtest_all} AND EXISTS ${draco_gtest_main}))
message(FATAL_ERROR "googletest missing, run git submodule update --init")
endif()
list(APPEND draco_test_defines GTEST_HAS_PTHREAD=0)
draco_add_library(
TEST
NAME draco_test_common
TYPE STATIC
SOURCES ${draco_test_common_sources}
DEFINES ${draco_defines} ${draco_test_defines}
INCLUDES ${draco_test_include_paths})
draco_add_library(
TEST
NAME draco_gtest
TYPE STATIC
SOURCES ${draco_gtest_all}
DEFINES ${draco_defines} ${draco_test_defines}
INCLUDES ${draco_test_include_paths})
draco_add_library(
TEST
NAME draco_gtest_main
TYPE STATIC
SOURCES ${draco_gtest_main}
DEFINES ${draco_defines} ${draco_test_defines}
INCLUDES ${draco_test_include_paths})
set(DRACO_TEST_DATA_DIR "${draco_root}/testdata")
set(DRACO_TEST_TEMP_DIR "${draco_build}/draco_test_temp")
set(DRACO_TEST_ROOT_DIR "${draco_root}")
file(MAKE_DIRECTORY "${DRACO_TEST_TEMP_DIR}")
# Sets DRACO_TEST_DATA_DIR and DRACO_TEST_TEMP_DIR.
configure_file("${draco_root}/cmake/draco_test_config.h.cmake"
"${draco_build}/testing/draco_test_config.h")
# Create the test targets.
draco_add_executable(
TEST
NAME draco_tests
SOURCES ${draco_test_sources}
DEFINES ${draco_defines} ${draco_test_defines}
INCLUDES ${draco_test_include_paths}
LIB_DEPS ${draco_dependency} draco_gtest draco_gtest_main
draco_test_common)
draco_add_executable(
TEST
NAME draco_factory_tests
SOURCES ${draco_factory_test_sources}
DEFINES ${draco_defines} ${draco_test_defines}
INCLUDES ${draco_test_include_paths}
LIB_DEPS ${draco_dependency} draco_gtest draco_gtest_main
draco_test_common)
endif()
endmacro()

79
third-party/draco/cmake/draco_variables.cmake generated vendored Normal file
View File

@ -0,0 +1,79 @@
# Copyright 2021 The Draco Authors
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
if(DRACO_CMAKE_DRACO_VARIABLES_CMAKE_)
return()
endif() # DRACO_CMAKE_DRACO_VARIABLES_CMAKE_
set(DRACO_CMAKE_DRACO_VARIABLES_CMAKE_ 1)
# Halts generation when $variable_name does not refer to a directory that
# exists.
macro(draco_variable_must_be_directory variable_name)
if("${variable_name}" STREQUAL "")
message(
FATAL_ERROR
"Empty variable_name passed to draco_variable_must_be_directory.")
endif()
if("${${variable_name}}" STREQUAL "")
message(
FATAL_ERROR "Empty variable ${variable_name} is required to build draco.")
endif()
if(NOT IS_DIRECTORY "${${variable_name}}")
message(
FATAL_ERROR
"${variable_name}, which is ${${variable_name}}, does not refer to a\n"
"directory.")
endif()
endmacro()
# Adds $var_name to the tracked variables list.
macro(draco_track_configuration_variable var_name)
if(DRACO_VERBOSE GREATER 2)
message("---- draco_track_configuration_variable ----\n"
"var_name=${var_name}\n"
"----------------------------------------------\n")
endif()
list(APPEND draco_configuration_variables ${var_name})
list(REMOVE_DUPLICATES draco_configuration_variables)
endmacro()
# Logs current C++ and executable linker flags via the CMake message command.
macro(draco_dump_cmake_flag_variables)
unset(flag_variables)
list(APPEND flag_variables "CMAKE_CXX_FLAGS_INIT" "CMAKE_CXX_FLAGS"
"CMAKE_EXE_LINKER_FLAGS_INIT" "CMAKE_EXE_LINKER_FLAGS")
if(CMAKE_BUILD_TYPE)
list(
APPEND flag_variables
"CMAKE_BUILD_TYPE"
"CMAKE_CXX_FLAGS_${CMAKE_BUILD_TYPE}_INIT"
"CMAKE_CXX_FLAGS_${CMAKE_BUILD_TYPE}"
"CMAKE_EXE_LINKER_FLAGS_${CMAKE_BUILD_TYPE}_INIT"
"CMAKE_EXE_LINKER_FLAGS_${CMAKE_BUILD_TYPE}")
endif()
foreach(flag_variable ${flag_variables})
message("${flag_variable}:${${flag_variable}}")
endforeach()
endmacro()
# Dumps the variables tracked in $draco_configuration_variables via the CMake
# message command.
macro(draco_dump_tracked_configuration_variables)
foreach(config_variable ${draco_configuration_variables})
message("${config_variable}:${${config_variable}}")
endforeach()
endmacro()

View File

@ -0,0 +1,28 @@
# Copyright 2021 The Draco Authors
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
if(DRACO_CMAKE_TOOLCHAINS_AARCH64_LINUX_GNU_CMAKE_)
return()
endif() # DRACO_CMAKE_TOOLCHAINS_AARCH64_LINUX_GNU_CMAKE_
set(DRACO_CMAKE_TOOLCHAINS_AARCH64_LINUX_GNU_CMAKE_ 1)
set(CMAKE_SYSTEM_NAME "Linux")
if("${CROSS}" STREQUAL "")
set(CROSS aarch64-linux-gnu-)
endif()
set(CMAKE_CXX_COMPILER ${CROSS}g++)
set(CMAKE_CXX_FLAGS_INIT "-march=armv8-a")
set(CMAKE_SYSTEM_PROCESSOR "aarch64")

View File

@ -0,0 +1,37 @@
# Copyright 2021 The Draco Authors
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
if(DRACO_CMAKE_TOOLCHAINS_ANDROID_NDK_COMMON_CMAKE_)
return()
endif()
set(DRACO_CMAKE_TOOLCHAINS_ANDROID_NDK_COMMON_CMAKE_ 1)
# Toolchain files do not have access to cached variables:
# https://gitlab.kitware.com/cmake/cmake/issues/16170. Set an intermediate
# environment variable when loaded the first time.
if(DRACO_ANDROID_NDK_PATH)
set(ENV{DRACO_ANDROID_NDK_PATH} "${DRACO_ANDROID_NDK_PATH}")
else()
set(DRACO_ANDROID_NDK_PATH "$ENV{DRACO_ANDROID_NDK_PATH}")
endif()
set(CMAKE_SYSTEM_NAME Android)
if(NOT CMAKE_ANDROID_STL_TYPE)
set(CMAKE_ANDROID_STL_TYPE c++_static)
endif()
if(NOT CMAKE_ANDROID_NDK_TOOLCHAIN_VERSION)
set(CMAKE_ANDROID_NDK_TOOLCHAIN_VERSION clang)
endif()

53
third-party/draco/cmake/toolchains/android.cmake generated vendored Normal file
View File

@ -0,0 +1,53 @@
# Copyright 2021 The Draco Authors
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
if(DRACO_CMAKE_TOOLCHAINS_ANDROID_CMAKE_)
return()
endif() # DRACO_CMAKE_TOOLCHAINS_ANDROID_CMAKE_
# Additional ANDROID_* settings are available, see:
# https://developer.android.com/ndk/guides/cmake#variables
if(NOT ANDROID_PLATFORM)
set(ANDROID_PLATFORM android-21)
endif()
# Choose target architecture with:
#
# -DANDROID_ABI={armeabi-v7a,armeabi-v7a with NEON,arm64-v8a,x86,x86_64}
if(NOT ANDROID_ABI)
set(ANDROID_ABI arm64-v8a)
endif()
# Force arm mode for 32-bit arm targets (instead of the default thumb) to
# improve performance.
if(ANDROID_ABI MATCHES "^armeabi" AND NOT ANDROID_ARM_MODE)
set(ANDROID_ARM_MODE arm)
endif()
# Toolchain files do not have access to cached variables:
# https://gitlab.kitware.com/cmake/cmake/issues/16170. Set an intermediate
# environment variable when loaded the first time.
if(DRACO_ANDROID_NDK_PATH)
set(ENV{DRACO_ANDROID_NDK_PATH} "${DRACO_ANDROID_NDK_PATH}")
else()
set(DRACO_ANDROID_NDK_PATH "$ENV{DRACO_ANDROID_NDK_PATH}")
endif()
if(NOT DRACO_ANDROID_NDK_PATH)
message(FATAL_ERROR "DRACO_ANDROID_NDK_PATH not set.")
return()
endif()
include("${DRACO_ANDROID_NDK_PATH}/build/cmake/android.toolchain.cmake")

View File

@ -0,0 +1,29 @@
# Copyright 2021 The Draco Authors
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
if(DRACO_CMAKE_TOOLCHAINS_ARM_IOS_COMMON_CMAKE_)
return()
endif()
set(DRACO_CMAKE_ARM_IOS_COMMON_CMAKE_ 1)
set(CMAKE_SYSTEM_NAME "Darwin")
if(CMAKE_OSX_SDK)
set(CMAKE_OSX_SYSROOT ${CMAKE_OSX_SDK})
else()
set(CMAKE_OSX_SYSROOT iphoneos)
endif()
set(CMAKE_C_COMPILER clang)
set(CMAKE_C_COMPILER_ARG1 "-arch ${CMAKE_SYSTEM_PROCESSOR}")
set(CMAKE_CXX_COMPILER clang++)
set(CMAKE_CXX_COMPILER_ARG1 "-arch ${CMAKE_SYSTEM_PROCESSOR}")

View File

@ -0,0 +1,29 @@
# Copyright 2021 The Draco Authors
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
if(DRACO_CMAKE_TOOLCHAINS_ARM_LINUX_GNUEABIHF_CMAKE_)
return()
endif() # DRACO_CMAKE_TOOLCHAINS_ARM_LINUX_GNUEABIHF_CMAKE_
set(DRACO_CMAKE_TOOLCHAINS_ARM_LINUX_GNUEABIHF_CMAKE_ 1)
set(CMAKE_SYSTEM_NAME "Linux")
if("${CROSS}" STREQUAL "")
set(CROSS arm-linux-gnueabihf-)
endif()
set(CMAKE_CXX_COMPILER ${CROSS}g++)
set(CMAKE_CXX_FLAGS_INIT "-march=armv7-a -marm")
set(CMAKE_SYSTEM_PROCESSOR "armv7")
set(DRACO_NEON_INTRINSICS_FLAG "-mfpu=neon")

View File

@ -0,0 +1,30 @@
# Copyright 2021 The Draco Authors
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
if(DRACO_CMAKE_TOOLCHAINS_ARM64_ANDROID_NDK_LIBCPP_CMAKE_)
return()
endif()
set(DRACO_CMAKE_TOOLCHAINS_ARM64_ANDROID_NDK_LIBCPP_CMAKE_ 1)
include("${CMAKE_CURRENT_LIST_DIR}/android-ndk-common.cmake")
if(NOT ANDROID_PLATFORM)
set(ANROID_PLATFORM android-21)
endif()
if(NOT ANDROID_ABI)
set(ANDROID_ABI arm64-v8a)
endif()
include("${DRACO_ANDROID_NDK_PATH}/build/cmake/android.toolchain.cmake")

27
third-party/draco/cmake/toolchains/arm64-ios.cmake generated vendored Normal file
View File

@ -0,0 +1,27 @@
# Copyright 2021 The Draco Authors
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
if(DRACO_CMAKE_TOOLCHAINS_ARM64_IOS_CMAKE_)
return()
endif()
set(DRACO_CMAKE_TOOLCHAINS_ARM64_IOS_CMAKE_ 1)
if(XCODE)
message(FATAL_ERROR "This toolchain does not support Xcode.")
endif()
set(CMAKE_SYSTEM_PROCESSOR "arm64")
set(CMAKE_OSX_ARCHITECTURES "arm64")
include("${CMAKE_CURRENT_LIST_DIR}/arm-ios-common.cmake")

View File

@ -0,0 +1,32 @@
# Copyright 2021 The Draco Authors
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
if(DRACO_CMAKE_TOOLCHAINS_ARM64_LINUX_GCC_CMAKE_)
return()
endif()
set(DRACO_CMAKE_TOOLCHAINS_ARM64_LINUX_GCC_CMAKE_ 1)
set(CMAKE_SYSTEM_NAME "Linux")
if("${CROSS}" STREQUAL "")
# Default the cross compiler prefix to something known to work.
set(CROSS aarch64-linux-gnu-)
endif()
set(CMAKE_C_COMPILER ${CROSS}gcc)
set(CMAKE_CXX_COMPILER ${CROSS}g++)
set(AS_EXECUTABLE ${CROSS}as)
set(CMAKE_C_COMPILER_ARG1 "-march=armv8-a")
set(CMAKE_CXX_COMPILER_ARG1 "-march=armv8-a")
set(CMAKE_SYSTEM_PROCESSOR "arm64")

View File

@ -0,0 +1,30 @@
# Copyright 2021 The Draco Authors
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
if(DRACO_CMAKE_TOOLCHAINS_ARMV7_ANDROID_NDK_LIBCPP_CMAKE_)
return()
endif()
set(DRACO_CMAKE_TOOLCHAINS_ARMV7_ANDROID_NDK_LIBCPP_CMAKE_ 1)
include("${CMAKE_CURRENT_LIST_DIR}/android-ndk-common.cmake")
if(NOT ANDROID_PLATFORM)
set(ANDROID_PLATFORM android-18)
endif()
if(NOT ANDROID_ABI)
set(ANDROID_ABI armeabi-v7a)
endif()
include("${DRACO_ANDROID_NDK_PATH}/build/cmake/android.toolchain.cmake")

27
third-party/draco/cmake/toolchains/armv7-ios.cmake generated vendored Normal file
View File

@ -0,0 +1,27 @@
# Copyright 2021 The Draco Authors
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
if(DRACO_CMAKE_TOOLCHAINS_ARMV7_IOS_CMAKE_)
return()
endif()
set(DRACO_CMAKE_TOOLCHAINS_ARMV7_IOS_CMAKE_ 1)
if(XCODE)
message(FATAL_ERROR "This toolchain does not support Xcode.")
endif()
set(CMAKE_SYSTEM_PROCESSOR "armv7")
set(CMAKE_OSX_ARCHITECTURES "armv7")
include("${CMAKE_CURRENT_LIST_DIR}/arm-ios-common.cmake")

View File

@ -0,0 +1,38 @@
# Copyright 2021 The Draco Authors
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
if(DRACO_CMAKE_TOOLCHAINS_ARMV7_LINUX_GCC_CMAKE_)
return()
endif()
set(DRACO_CMAKE_TOOLCHAINS_ARMV7_LINUX_GCC_CMAKE_ 1)
set(CMAKE_SYSTEM_NAME "Linux")
if("${CROSS}" STREQUAL "")
# Default the cross compiler prefix to something known to work.
set(CROSS arm-linux-gnueabihf-)
endif()
if(NOT ${CROSS} MATCHES hf-$)
set(DRACO_EXTRA_TOOLCHAIN_FLAGS "-mfloat-abi=softfp")
endif()
set(CMAKE_C_COMPILER ${CROSS}gcc)
set(CMAKE_CXX_COMPILER ${CROSS}g++)
set(AS_EXECUTABLE ${CROSS}as)
set(CMAKE_C_COMPILER_ARG1
"-march=armv7-a -mfpu=neon ${DRACO_EXTRA_TOOLCHAIN_FLAGS}")
set(CMAKE_CXX_COMPILER_ARG1
"-march=armv7-a -mfpu=neon ${DRACO_EXTRA_TOOLCHAIN_FLAGS}")
set(CMAKE_SYSTEM_PROCESSOR "armv7")

27
third-party/draco/cmake/toolchains/armv7s-ios.cmake generated vendored Normal file
View File

@ -0,0 +1,27 @@
# Copyright 2021 The Draco Authors
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
if(DRACO_CMAKE_TOOLCHAINS_ARMV7S_IOS_CMAKE_)
return()
endif()
set(DRACO_CMAKE_TOOLCHAINS_ARMV7S_IOS_CMAKE_ 1)
if(XCODE)
message(FATAL_ERROR "This toolchain does not support Xcode.")
endif()
set(CMAKE_SYSTEM_PROCESSOR "armv7s")
set(CMAKE_OSX_ARCHITECTURES "armv7s")
include("${CMAKE_CURRENT_LIST_DIR}/arm-ios-common.cmake")

28
third-party/draco/cmake/toolchains/i386-ios.cmake generated vendored Normal file
View File

@ -0,0 +1,28 @@
# Copyright 2021 The Draco Authors
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
if(DRACO_CMAKE_TOOLCHAINS_i386_IOS_CMAKE_)
return()
endif()
set(DRACO_CMAKE_TOOLCHAINS_i386_IOS_CMAKE_ 1)
if(XCODE)
message(FATAL_ERROR "This toolchain does not support Xcode.")
endif()
set(CMAKE_SYSTEM_PROCESSOR "i386")
set(CMAKE_OSX_ARCHITECTURES "i386")
set(CMAKE_OSX_SDK "iphonesimulator")
include("${CMAKE_CURRENT_LIST_DIR}/arm-ios-common.cmake")

View File

@ -0,0 +1,30 @@
# Copyright 2021 The Draco Authors
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
if(DRACO_CMAKE_TOOLCHAINS_X86_ANDROID_NDK_LIBCPP_CMAKE_)
return()
endif()
set(DRACO_CMAKE_TOOLCHAINS_X86_ANDROID_NDK_LIBCPP_CMAKE_ 1)
include("${CMAKE_CURRENT_LIST_DIR}/android-ndk-common.cmake")
if(NOT ANDROID_PLATFORM)
set(ANDROID_PLATFORM android-18)
endif()
if(NOT ANDROID_ABI)
set(ANDROID_ABI x86)
endif()
include("${DRACO_ANDROID_NDK_PATH}/build/cmake/android.toolchain.cmake")

View File

@ -0,0 +1,30 @@
# Copyright 2021 The Draco Authors
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
if(DRACO_CMAKE_TOOLCHAINS_X86_64_ANDROID_NDK_LIBCPP_CMAKE_)
return()
endif()
set(DRACO_CMAKE_TOOLCHAINS_X86_64_ANDROID_NDK_LIBCPP_CMAKE_ 1)
include("${CMAKE_CURRENT_LIST_DIR}/android-ndk-common.cmake")
if(NOT ANDROID_PLATFORM)
set(ANDROID_PLATFORM android-21)
endif()
if(NOT ANDROID_ABI)
set(ANDROID_ABI x86_64)
endif()
include("${DRACO_ANDROID_NDK_PATH}/build/cmake/android.toolchain.cmake")

28
third-party/draco/cmake/toolchains/x86_64-ios.cmake generated vendored Normal file
View File

@ -0,0 +1,28 @@
# Copyright 2021 The Draco Authors
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
if(DRACO_CMAKE_TOOLCHAINS_X86_64_IOS_CMAKE_)
return()
endif()
set(DRACO_CMAKE_TOOLCHAINS_X86_64_IOS_CMAKE_ 1)
if(XCODE)
message(FATAL_ERROR "This toolchain does not support Xcode.")
endif()
set(CMAKE_SYSTEM_PROCESSOR "x86_64")
set(CMAKE_OSX_ARCHITECTURES "x86_64")
set(CMAKE_OSX_SDK "iphonesimulator")
include("${CMAKE_CURRENT_LIST_DIR}/arm-ios-common.cmake")

47
third-party/draco/src/draco/animation/animation.cc generated vendored Normal file
View File

@ -0,0 +1,47 @@
// Copyright 2019 The Draco Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
#include "draco/animation/animation.h"
#ifdef DRACO_TRANSCODER_SUPPORTED
namespace draco {
void Animation::Copy(const Animation &src) {
name_ = src.name_;
channels_.clear();
for (int i = 0; i < src.NumChannels(); ++i) {
std::unique_ptr<AnimationChannel> new_channel(new AnimationChannel());
new_channel->Copy(*src.GetChannel(i));
channels_.push_back(std::move(new_channel));
}
samplers_.clear();
for (int i = 0; i < src.NumSamplers(); ++i) {
std::unique_ptr<AnimationSampler> new_sampler(new AnimationSampler());
new_sampler->Copy(*src.GetSampler(i));
samplers_.push_back(std::move(new_sampler));
}
node_animation_data_.clear();
for (int i = 0; i < src.NumNodeAnimationData(); ++i) {
std::unique_ptr<NodeAnimationData> new_data(new NodeAnimationData());
new_data->Copy(*src.GetNodeAnimationData(i));
node_animation_data_.push_back(std::move(new_data));
}
}
} // namespace draco
#endif // DRACO_TRANSCODER_SUPPORTED

149
third-party/draco/src/draco/animation/animation.h generated vendored Normal file
View File

@ -0,0 +1,149 @@
// Copyright 2019 The Draco Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
#ifndef DRACO_ANIMATION_ANIMATION_H_
#define DRACO_ANIMATION_ANIMATION_H_
#include "draco/draco_features.h"
#ifdef DRACO_TRANSCODER_SUPPORTED
#include <memory>
#include <vector>
#include "draco/animation/node_animation_data.h"
#include "draco/core/status.h"
namespace draco {
// Struct to hold information about an animation's sampler.
struct AnimationSampler {
enum class SamplerInterpolation { LINEAR, STEP, CUBICSPLINE };
static std::string InterpolationToString(SamplerInterpolation value) {
switch (value) {
case SamplerInterpolation::STEP:
return "STEP";
case SamplerInterpolation::CUBICSPLINE:
return "CUBICSPLINE";
default:
return "LINEAR";
}
}
AnimationSampler()
: input_index(-1),
interpolation_type(SamplerInterpolation::LINEAR),
output_index(-1) {}
void Copy(const AnimationSampler &src) {
input_index = src.input_index;
interpolation_type = src.interpolation_type;
output_index = src.output_index;
}
int input_index;
SamplerInterpolation interpolation_type;
int output_index;
};
// Struct to hold information about an animation's channel.
struct AnimationChannel {
enum class ChannelTransformation { TRANSLATION, ROTATION, SCALE, WEIGHTS };
static std::string TransformationToString(ChannelTransformation value) {
switch (value) {
case ChannelTransformation::ROTATION:
return "rotation";
case ChannelTransformation::SCALE:
return "scale";
case ChannelTransformation::WEIGHTS:
return "weights";
default:
return "translation";
}
}
AnimationChannel()
: target_index(-1),
transformation_type(ChannelTransformation::TRANSLATION),
sampler_index(-1) {}
void Copy(const AnimationChannel &src) {
target_index = src.target_index;
transformation_type = src.transformation_type;
sampler_index = src.sampler_index;
}
int target_index;
ChannelTransformation transformation_type;
int sampler_index;
};
// This class is used to hold data and information of glTF animations.
class Animation {
public:
Animation() {}
void Copy(const Animation &src);
const std::string &GetName() const { return name_; }
void SetName(const std::string &name) { name_ = name; }
// Returns the number of channels in an animation.
int NumChannels() const { return channels_.size(); }
// Returns the number of samplers in an animation.
int NumSamplers() const { return samplers_.size(); }
// Returns the number of accessors in an animation.
int NumNodeAnimationData() const { return node_animation_data_.size(); }
// Returns a channel in the animation.
AnimationChannel *GetChannel(int index) { return channels_[index].get(); }
const AnimationChannel *GetChannel(int index) const {
return channels_[index].get();
}
// Returns a sampler in the animation.
AnimationSampler *GetSampler(int index) { return samplers_[index].get(); }
const AnimationSampler *GetSampler(int index) const {
return samplers_[index].get();
}
// Returns an accessor in the animation.
NodeAnimationData *GetNodeAnimationData(int index) {
return node_animation_data_[index].get();
}
const NodeAnimationData *GetNodeAnimationData(int index) const {
return node_animation_data_[index].get();
}
void AddNodeAnimationData(
std::unique_ptr<NodeAnimationData> node_animation_data) {
node_animation_data_.push_back(std::move(node_animation_data));
}
void AddSampler(std::unique_ptr<AnimationSampler> sampler) {
samplers_.push_back(std::move(sampler));
}
void AddChannel(std::unique_ptr<AnimationChannel> channel) {
channels_.push_back(std::move(channel));
}
private:
std::string name_;
std::vector<std::unique_ptr<AnimationSampler>> samplers_;
std::vector<std::unique_ptr<AnimationChannel>> channels_;
std::vector<std::unique_ptr<NodeAnimationData>> node_animation_data_;
};
} // namespace draco
#endif // DRACO_TRANSCODER_SUPPORTED
#endif // DRACO_ANIMATION_ANIMATION_H_

View File

@ -0,0 +1,71 @@
// Copyright 2021 The Draco Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
#include "draco/animation/animation.h"
#include "draco/core/draco_test_base.h"
#include "draco/draco_features.h"
namespace {
#ifdef DRACO_TRANSCODER_SUPPORTED
TEST(AnimationTest, TestCopy) {
// Test copying of animation data.
draco::Animation src_anim;
ASSERT_TRUE(src_anim.GetName().empty());
src_anim.SetName("Walking");
ASSERT_EQ(src_anim.GetName(), "Walking");
std::unique_ptr<draco::AnimationSampler> src_sampler_0(
new draco::AnimationSampler());
src_sampler_0->interpolation_type =
draco::AnimationSampler::SamplerInterpolation::CUBICSPLINE;
std::unique_ptr<draco::AnimationSampler> src_sampler_1(
new draco::AnimationSampler());
src_sampler_1->Copy(*src_sampler_0);
ASSERT_EQ(src_sampler_0->interpolation_type,
src_sampler_1->interpolation_type);
src_sampler_1->interpolation_type =
draco::AnimationSampler::SamplerInterpolation::STEP;
src_anim.AddSampler(std::move(src_sampler_0));
src_anim.AddSampler(std::move(src_sampler_1));
ASSERT_EQ(src_anim.NumSamplers(), 2);
std::unique_ptr<draco::AnimationChannel> src_channel(
new draco::AnimationChannel());
src_channel->transformation_type =
draco::AnimationChannel::ChannelTransformation::WEIGHTS;
src_anim.AddChannel(std::move(src_channel));
ASSERT_EQ(src_anim.NumChannels(), 1);
draco::Animation dst_anim;
dst_anim.Copy(src_anim);
ASSERT_EQ(dst_anim.GetName(), src_anim.GetName());
ASSERT_EQ(dst_anim.NumSamplers(), 2);
ASSERT_EQ(dst_anim.NumChannels(), 1);
ASSERT_EQ(dst_anim.GetSampler(0)->interpolation_type,
src_anim.GetSampler(0)->interpolation_type);
ASSERT_EQ(dst_anim.GetSampler(1)->interpolation_type,
src_anim.GetSampler(1)->interpolation_type);
ASSERT_EQ(dst_anim.GetChannel(0)->transformation_type,
src_anim.GetChannel(0)->transformation_type);
}
#endif // DRACO_TRANSCODER_SUPPORTED
} // namespace

View File

@ -0,0 +1,54 @@
// Copyright 2017 The Draco Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
#include "draco/animation/keyframe_animation.h"
namespace draco {
KeyframeAnimation::KeyframeAnimation() {}
bool KeyframeAnimation::SetTimestamps(
const std::vector<TimestampType> &timestamp) {
// Already added attributes.
const int32_t num_frames = timestamp.size();
if (num_attributes() > 0) {
// Timestamp attribute could be added only once.
if (timestamps()->size()) {
return false;
} else {
// Check if the number of frames is consistent with
// the existing keyframes.
if (num_frames != num_points()) {
return false;
}
}
} else {
// This is the first attribute.
set_num_frames(num_frames);
}
// Add attribute for time stamp data.
std::unique_ptr<PointAttribute> timestamp_att =
std::unique_ptr<PointAttribute>(new PointAttribute());
timestamp_att->Init(GeometryAttribute::GENERIC, 1, DT_FLOAT32, false,
num_frames);
for (PointIndex i(0); i < num_frames; ++i) {
timestamp_att->SetAttributeValue(timestamp_att->mapped_index(i),
&timestamp[i.value()]);
}
this->SetAttribute(kTimestampId, std::move(timestamp_att));
return true;
}
} // namespace draco

View File

@ -0,0 +1,107 @@
// Copyright 2017 The Draco Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
#ifndef DRACO_ANIMATION_KEYFRAME_ANIMATION_H_
#define DRACO_ANIMATION_KEYFRAME_ANIMATION_H_
#include <vector>
#include "draco/point_cloud/point_cloud.h"
namespace draco {
// Class for holding keyframe animation data. It will have two or more
// attributes as a point cloud. The first attribute is always the timestamp
// of the animation. Each KeyframeAnimation could have multiple animations with
// the same number of frames. Each animation will be treated as a point
// attribute.
class KeyframeAnimation : public PointCloud {
public:
// Force time stamp to be float type.
using TimestampType = float;
KeyframeAnimation();
// Animation must have only one timestamp attribute.
// This function must be called before adding any animation data.
// Returns false if timestamp already exists.
bool SetTimestamps(const std::vector<TimestampType> &timestamp);
// Returns an id for the added animation data. This id will be used to
// identify this animation.
// Returns -1 if error, e.g. number of frames is not consistent.
// Type |T| should be consistent with |DataType|, e.g:
// float - DT_FLOAT32,
// int32_t - DT_INT32, ...
template <typename T>
int32_t AddKeyframes(DataType data_type, uint32_t num_components,
const std::vector<T> &data);
const PointAttribute *timestamps() const {
return GetAttributeByUniqueId(kTimestampId);
}
const PointAttribute *keyframes(int32_t animation_id) const {
return GetAttributeByUniqueId(animation_id);
}
// Number of frames should be equal to number points in the point cloud.
void set_num_frames(int32_t num_frames) { set_num_points(num_frames); }
int32_t num_frames() const { return static_cast<int32_t>(num_points()); }
int32_t num_animations() const { return num_attributes() - 1; }
private:
// Attribute id of timestamp is fixed to 0.
static constexpr int32_t kTimestampId = 0;
};
template <typename T>
int32_t KeyframeAnimation::AddKeyframes(DataType data_type,
uint32_t num_components,
const std::vector<T> &data) {
// TODO(draco-eng): Verify T is consistent with |data_type|.
if (num_components == 0) {
return -1;
}
// If timestamps is not added yet, then reserve attribute 0 for timestamps.
if (!num_attributes()) {
// Add a temporary attribute with 0 points to fill attribute id 0.
std::unique_ptr<PointAttribute> temp_att =
std::unique_ptr<PointAttribute>(new PointAttribute());
temp_att->Init(GeometryAttribute::GENERIC, num_components, data_type, false,
0);
this->AddAttribute(std::move(temp_att));
set_num_frames(data.size() / num_components);
}
if (data.size() != num_components * num_frames()) {
return -1;
}
std::unique_ptr<PointAttribute> keyframe_att =
std::unique_ptr<PointAttribute>(new PointAttribute());
keyframe_att->Init(GeometryAttribute::GENERIC, num_components, data_type,
false, num_frames());
const size_t stride = num_components;
for (PointIndex i(0); i < num_frames(); ++i) {
keyframe_att->SetAttributeValue(keyframe_att->mapped_index(i),
&data[i.value() * stride]);
}
return this->AddAttribute(std::move(keyframe_att));
}
} // namespace draco
#endif // DRACO_ANIMATION_KEYFRAME_ANIMATION_H_

View File

@ -0,0 +1,30 @@
// Copyright 2017 The Draco Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
#include "draco/animation/keyframe_animation_decoder.h"
namespace draco {
Status KeyframeAnimationDecoder::Decode(const DecoderOptions &options,
DecoderBuffer *in_buffer,
KeyframeAnimation *animation) {
const auto status = PointCloudSequentialDecoder::Decode(
options, in_buffer, static_cast<PointCloud *>(animation));
if (!status.ok()) {
return status;
}
return OkStatus();
}
} // namespace draco

View File

@ -0,0 +1,34 @@
// Copyright 2017 The Draco Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
#ifndef DRACO_ANIMATION_KEYFRAME_ANIMATION_DECODER_H_
#define DRACO_ANIMATION_KEYFRAME_ANIMATION_DECODER_H_
#include "draco/animation/keyframe_animation.h"
#include "draco/compression/point_cloud/point_cloud_sequential_decoder.h"
namespace draco {
// Class for decoding keyframe animation.
class KeyframeAnimationDecoder : private PointCloudSequentialDecoder {
public:
KeyframeAnimationDecoder(){};
Status Decode(const DecoderOptions &options, DecoderBuffer *in_buffer,
KeyframeAnimation *animation);
};
} // namespace draco
#endif // DRACO_ANIMATION_KEYFRAME_ANIMATION_DECODER_H_

View File

@ -0,0 +1,28 @@
// Copyright 2017 The Draco Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
#include "draco/animation/keyframe_animation_encoder.h"
namespace draco {
KeyframeAnimationEncoder::KeyframeAnimationEncoder() {}
Status KeyframeAnimationEncoder::EncodeKeyframeAnimation(
const KeyframeAnimation &animation, const EncoderOptions &options,
EncoderBuffer *out_buffer) {
SetPointCloud(animation);
return Encode(options, out_buffer);
}
} // namespace draco

View File

@ -0,0 +1,39 @@
// Copyright 2017 The Draco Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
#ifndef DRACO_ANIMATION_KEYFRAME_ANIMATION_ENCODER_H_
#define DRACO_ANIMATION_KEYFRAME_ANIMATION_ENCODER_H_
#include "draco/animation/keyframe_animation.h"
#include "draco/compression/point_cloud/point_cloud_sequential_encoder.h"
namespace draco {
// Class for encoding keyframe animation. It takes KeyframeAnimation as a
// PointCloud and compress it. It's mostly a wrapper around PointCloudEncoder so
// that the animation module could be separated from geometry compression when
// exposed to developers.
class KeyframeAnimationEncoder : private PointCloudSequentialEncoder {
public:
KeyframeAnimationEncoder();
// Encode an animation to a buffer.
Status EncodeKeyframeAnimation(const KeyframeAnimation &animation,
const EncoderOptions &options,
EncoderBuffer *out_buffer);
};
} // namespace draco
#endif // DRACO_ANIMATION_KEYFRAME_ANIMATION_ENCODER_H_

View File

@ -0,0 +1,169 @@
// Copyright 2017 The Draco Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
#include "draco/animation/keyframe_animation.h"
#include "draco/animation/keyframe_animation_decoder.h"
#include "draco/animation/keyframe_animation_encoder.h"
#include "draco/core/draco_test_base.h"
#include "draco/core/draco_test_utils.h"
namespace draco {
class KeyframeAnimationEncodingTest : public ::testing::Test {
protected:
KeyframeAnimationEncodingTest() {}
bool CreateAndAddTimestamps(int32_t num_frames) {
timestamps_.resize(num_frames);
for (int i = 0; i < timestamps_.size(); ++i) {
timestamps_[i] = static_cast<draco::KeyframeAnimation::TimestampType>(i);
}
return keyframe_animation_.SetTimestamps(timestamps_);
}
int32_t CreateAndAddAnimationData(int32_t num_frames,
uint32_t num_components) {
// Create and add animation data with.
animation_data_.resize(num_frames * num_components);
for (int i = 0; i < animation_data_.size(); ++i) {
animation_data_[i] = static_cast<float>(i);
}
return keyframe_animation_.AddKeyframes(draco::DT_FLOAT32, num_components,
animation_data_);
}
template <int num_components_t>
void CompareAnimationData(const KeyframeAnimation &animation0,
const KeyframeAnimation &animation1,
bool quantized) {
ASSERT_EQ(animation0.num_frames(), animation1.num_frames());
ASSERT_EQ(animation0.num_animations(), animation1.num_animations());
if (quantized) {
// TODO(b/199760123) : Add test for stable quantization.
// Quantization will result in slightly different values.
// Skip comparing values.
return;
}
// Compare time stamp.
const auto timestamp_att0 = animation0.timestamps();
const auto timestamp_att1 = animation0.timestamps();
for (int i = 0; i < animation0.num_frames(); ++i) {
std::array<float, 1> att_value0;
std::array<float, 1> att_value1;
ASSERT_TRUE((timestamp_att0->GetValue<float, 1>(
draco::AttributeValueIndex(i), &att_value0)));
ASSERT_TRUE((timestamp_att1->GetValue<float, 1>(
draco::AttributeValueIndex(i), &att_value1)));
ASSERT_FLOAT_EQ(att_value0[0], att_value1[0]);
}
for (int animation_id = 1; animation_id < animation0.num_animations();
++animation_id) {
// Compare keyframe data.
const auto keyframe_att0 = animation0.keyframes(animation_id);
const auto keyframe_att1 = animation1.keyframes(animation_id);
ASSERT_EQ(keyframe_att0->num_components(),
keyframe_att1->num_components());
for (int i = 0; i < animation0.num_frames(); ++i) {
std::array<float, num_components_t> att_value0;
std::array<float, num_components_t> att_value1;
ASSERT_TRUE((keyframe_att0->GetValue<float, num_components_t>(
draco::AttributeValueIndex(i), &att_value0)));
ASSERT_TRUE((keyframe_att1->GetValue<float, num_components_t>(
draco::AttributeValueIndex(i), &att_value1)));
for (int j = 0; j < att_value0.size(); ++j) {
ASSERT_FLOAT_EQ(att_value0[j], att_value1[j]);
}
}
}
}
template <int num_components_t>
void TestKeyframeAnimationEncoding() {
TestKeyframeAnimationEncoding<num_components_t>(false);
}
template <int num_components_t>
void TestKeyframeAnimationEncoding(bool quantized) {
// Encode animation class.
draco::EncoderBuffer buffer;
draco::KeyframeAnimationEncoder encoder;
EncoderOptions options = EncoderOptions::CreateDefaultOptions();
if (quantized) {
// Set quantization for timestamps.
options.SetAttributeInt(0, "quantization_bits", 20);
// Set quantization for keyframes.
for (int i = 1; i <= keyframe_animation_.num_animations(); ++i) {
options.SetAttributeInt(i, "quantization_bits", 20);
}
}
DRACO_ASSERT_OK(
encoder.EncodeKeyframeAnimation(keyframe_animation_, options, &buffer));
draco::DecoderBuffer dec_decoder;
draco::KeyframeAnimationDecoder decoder;
DecoderBuffer dec_buffer;
dec_buffer.Init(buffer.data(), buffer.size());
// Decode animation class.
std::unique_ptr<KeyframeAnimation> decoded_animation(
new KeyframeAnimation());
DecoderOptions dec_options;
DRACO_ASSERT_OK(
decoder.Decode(dec_options, &dec_buffer, decoded_animation.get()));
// Verify if animation before and after compression is identical.
CompareAnimationData<num_components_t>(keyframe_animation_,
*decoded_animation, quantized);
}
draco::KeyframeAnimation keyframe_animation_;
std::vector<draco::KeyframeAnimation::TimestampType> timestamps_;
std::vector<float> animation_data_;
};
TEST_F(KeyframeAnimationEncodingTest, OneComponent) {
const int num_frames = 1;
ASSERT_TRUE(CreateAndAddTimestamps(num_frames));
ASSERT_EQ(CreateAndAddAnimationData(num_frames, 1), 1);
TestKeyframeAnimationEncoding<1>();
}
TEST_F(KeyframeAnimationEncodingTest, ManyComponents) {
const int num_frames = 100;
ASSERT_TRUE(CreateAndAddTimestamps(num_frames));
ASSERT_EQ(CreateAndAddAnimationData(num_frames, 100), 1);
TestKeyframeAnimationEncoding<100>();
}
TEST_F(KeyframeAnimationEncodingTest, ManyComponentsWithQuantization) {
const int num_frames = 100;
ASSERT_TRUE(CreateAndAddTimestamps(num_frames));
ASSERT_EQ(CreateAndAddAnimationData(num_frames, 4), 1);
// Test compression with quantization.
TestKeyframeAnimationEncoding<4>(true);
}
TEST_F(KeyframeAnimationEncodingTest, MultipleAnimations) {
const int num_frames = 5;
ASSERT_TRUE(CreateAndAddTimestamps(num_frames));
ASSERT_EQ(CreateAndAddAnimationData(num_frames, 3), 1);
ASSERT_EQ(CreateAndAddAnimationData(num_frames, 3), 2);
TestKeyframeAnimationEncoding<3>();
}
} // namespace draco

View File

@ -0,0 +1,104 @@
// Copyright 2017 The Draco Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
#include "draco/animation/keyframe_animation.h"
#include "draco/core/draco_test_base.h"
namespace {
class KeyframeAnimationTest : public ::testing::Test {
protected:
KeyframeAnimationTest() {}
bool CreateAndAddTimestamps(int32_t num_frames) {
timestamps_.resize(num_frames);
for (int i = 0; i < timestamps_.size(); ++i) {
timestamps_[i] = static_cast<draco::KeyframeAnimation::TimestampType>(i);
}
return keyframe_animation_.SetTimestamps(timestamps_);
}
int32_t CreateAndAddAnimationData(int32_t num_frames,
uint32_t num_components) {
// Create and add animation data with.
animation_data_.resize(num_frames * num_components);
for (int i = 0; i < animation_data_.size(); ++i) {
animation_data_[i] = static_cast<float>(i);
}
return keyframe_animation_.AddKeyframes(draco::DT_FLOAT32, num_components,
animation_data_);
}
template <int num_components_t>
void CompareAnimationData() {
// Compare time stamp.
const auto timestamp_att = keyframe_animation_.timestamps();
for (int i = 0; i < timestamps_.size(); ++i) {
std::array<float, 1> att_value;
ASSERT_TRUE((timestamp_att->GetValue<float, 1>(
draco::AttributeValueIndex(i), &att_value)));
ASSERT_FLOAT_EQ(att_value[0], i);
}
// Compare keyframe data.
const auto keyframe_att = keyframe_animation_.keyframes(1);
for (int i = 0; i < animation_data_.size() / num_components_t; ++i) {
std::array<float, num_components_t> att_value;
ASSERT_TRUE((keyframe_att->GetValue<float, num_components_t>(
draco::AttributeValueIndex(i), &att_value)));
for (int j = 0; j < num_components_t; ++j) {
ASSERT_FLOAT_EQ(att_value[j], i * num_components_t + j);
}
}
}
template <int num_components_t>
void TestKeyframeAnimation(int32_t num_frames) {
ASSERT_TRUE(CreateAndAddTimestamps(num_frames));
ASSERT_EQ(CreateAndAddAnimationData(num_frames, num_components_t), 1);
CompareAnimationData<num_components_t>();
}
draco::KeyframeAnimation keyframe_animation_;
std::vector<draco::KeyframeAnimation::TimestampType> timestamps_;
std::vector<float> animation_data_;
};
// Test animation with 1 component and 10 frames.
TEST_F(KeyframeAnimationTest, OneComponent) { TestKeyframeAnimation<1>(10); }
// Test animation with 4 component and 10 frames.
TEST_F(KeyframeAnimationTest, FourComponent) { TestKeyframeAnimation<4>(10); }
// Test adding animation data before timestamp.
TEST_F(KeyframeAnimationTest, AddingAnimationFirst) {
ASSERT_EQ(CreateAndAddAnimationData(5, 1), 1);
ASSERT_TRUE(CreateAndAddTimestamps(5));
}
// Test adding timestamp more than once.
TEST_F(KeyframeAnimationTest, ErrorAddingTimestampsTwice) {
ASSERT_TRUE(CreateAndAddTimestamps(5));
ASSERT_FALSE(CreateAndAddTimestamps(5));
}
// Test animation with multiple animation data.
TEST_F(KeyframeAnimationTest, MultipleAnimationData) {
const int num_frames = 5;
ASSERT_TRUE(CreateAndAddTimestamps(num_frames));
ASSERT_EQ(CreateAndAddAnimationData(num_frames, 1), 1);
ASSERT_EQ(CreateAndAddAnimationData(num_frames, 2), 2);
}
} // namespace

View File

@ -0,0 +1,150 @@
// Copyright 2019 The Draco Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
#ifndef DRACO_ANIMATION_NODE_ANIMATION_DATA_H_
#define DRACO_ANIMATION_NODE_ANIMATION_DATA_H_
#include "draco/draco_features.h"
#ifdef DRACO_TRANSCODER_SUPPORTED
#include "draco/core/hash_utils.h"
#include "draco/core/status.h"
#include "draco/core/status_or.h"
namespace draco {
// This class is used to store information and data for animations that only
// affect the nodes.
// TODO(fgalligan): Think about changing the name of this class now that Skin
// is using it.
class NodeAnimationData {
public:
enum class Type { SCALAR, VEC3, VEC4, MAT4 };
NodeAnimationData() : type_(Type::SCALAR), count_(0), normalized_(false) {}
void Copy(const NodeAnimationData &src) {
type_ = src.type_;
count_ = src.count_;
normalized_ = src.normalized_;
data_ = src.data_;
}
Type type() const { return type_; }
int count() const { return count_; }
bool normalized() const { return normalized_; }
std::vector<float> *GetMutableData() { return &data_; }
const std::vector<float> *GetData() const { return &data_; }
void SetType(Type type) { type_ = type; }
void SetCount(int count) { count_ = count; }
void SetNormalized(bool normalized) { normalized_ = normalized; }
int ComponentSize() const { return sizeof(float); }
int NumComponents() const {
switch (type_) {
case Type::SCALAR:
return 1;
case Type::VEC3:
return 3;
case Type::MAT4:
return 16;
default:
return 4;
}
}
std::string TypeAsString() const {
switch (type_) {
case Type::SCALAR:
return "SCALAR";
case Type::VEC3:
return "VEC3";
case Type::MAT4:
return "MAT4";
default:
return "VEC4";
}
}
bool operator==(const NodeAnimationData &nad) const {
return type_ == nad.type_ && count_ == nad.count_ &&
normalized_ == nad.normalized_ && data_ == nad.data_;
}
private:
Type type_;
int count_;
bool normalized_;
std::vector<float> data_;
};
// Wrapper class for hashing NodeAnimationData. When using different containers,
// this class is preferable instead of copying the data in NodeAnimationData
// every time.
class NodeAnimationDataHash {
public:
NodeAnimationDataHash() = delete;
NodeAnimationDataHash &operator=(const NodeAnimationDataHash &) = delete;
NodeAnimationDataHash(NodeAnimationDataHash &&) = delete;
NodeAnimationDataHash &operator=(NodeAnimationDataHash &&) = delete;
explicit NodeAnimationDataHash(const NodeAnimationData *nad)
: node_animation_data_(nad) {
hash_ = NodeAnimationDataHash::HashNodeAnimationData(*node_animation_data_);
}
NodeAnimationDataHash(const NodeAnimationDataHash &nadh) {
node_animation_data_ = nadh.node_animation_data_;
hash_ = nadh.hash_;
}
bool operator==(const NodeAnimationDataHash &nadh) const {
return *node_animation_data_ == *nadh.node_animation_data_;
}
struct Hash {
size_t operator()(const NodeAnimationDataHash &nadh) const {
return nadh.hash_;
}
};
const NodeAnimationData *GetNodeAnimationData() {
return node_animation_data_;
}
private:
// Returns a hash of |nad|.
static size_t HashNodeAnimationData(const NodeAnimationData &nad) {
size_t hash = 79; // Magic number.
hash = HashCombine(static_cast<int>(nad.type()), hash);
hash = HashCombine(nad.count(), hash);
hash = HashCombine(nad.normalized(), hash);
const uint64_t data_hash =
FingerprintString(reinterpret_cast<const char *>(nad.GetData()->data()),
nad.GetData()->size() * sizeof(float));
hash = HashCombine(data_hash, hash);
return hash;
}
const NodeAnimationData *node_animation_data_;
size_t hash_;
};
} // namespace draco
#endif // DRACO_TRANSCODER_SUPPORTED
#endif // DRACO_ANIMATION_NODE_ANIMATION_DATA_H_

29
third-party/draco/src/draco/animation/skin.cc generated vendored Normal file
View File

@ -0,0 +1,29 @@
// Copyright 2019 The Draco Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
#include "draco/animation/skin.h"
#ifdef DRACO_TRANSCODER_SUPPORTED
namespace draco {
void Skin::Copy(const Skin &s) {
inverse_bind_matrices_.Copy(s.GetInverseBindMatrices());
joints_ = s.GetJoints();
joint_root_index_ = s.GetJointRoot();
}
} // namespace draco
#endif // DRACO_TRANSCODER_SUPPORTED

64
third-party/draco/src/draco/animation/skin.h generated vendored Normal file
View File

@ -0,0 +1,64 @@
// Copyright 2019 The Draco Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
#ifndef DRACO_ANIMATION_SKIN_H_
#define DRACO_ANIMATION_SKIN_H_
#include "draco/draco_features.h"
#ifdef DRACO_TRANSCODER_SUPPORTED
#include <vector>
#include "draco/animation/node_animation_data.h"
#include "draco/scene/scene_indices.h"
namespace draco {
// This class is used to store information on animation skins.
class Skin {
public:
Skin() : joint_root_index_(-1) {}
void Copy(const Skin &s);
NodeAnimationData &GetInverseBindMatrices() { return inverse_bind_matrices_; }
const NodeAnimationData &GetInverseBindMatrices() const {
return inverse_bind_matrices_;
}
int AddJoint(SceneNodeIndex index) {
joints_.push_back(index);
return joints_.size() - 1;
}
int NumJoints() const { return joints_.size(); }
SceneNodeIndex GetJoint(int index) const { return joints_[index]; }
SceneNodeIndex &GetJoint(int index) { return joints_[index]; }
const std::vector<SceneNodeIndex> &GetJoints() const { return joints_; }
void SetJointRoot(SceneNodeIndex index) { joint_root_index_ = index; }
SceneNodeIndex GetJointRoot() const { return joint_root_index_; }
private:
NodeAnimationData inverse_bind_matrices_;
// List of node indices that make up the joint hierarchy.
std::vector<SceneNodeIndex> joints_;
SceneNodeIndex joint_root_index_;
};
} // namespace draco
#endif // DRACO_TRANSCODER_SUPPORTED
#endif // DRACO_ANIMATION_SKIN_H_

View File

@ -0,0 +1,145 @@
// Copyright 2017 The Draco Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
#include "draco/attributes/attribute_octahedron_transform.h"
#include "draco/attributes/attribute_transform_type.h"
#include "draco/compression/attributes/normal_compression_utils.h"
namespace draco {
bool AttributeOctahedronTransform::InitFromAttribute(
const PointAttribute &attribute) {
const AttributeTransformData *const transform_data =
attribute.GetAttributeTransformData();
if (!transform_data ||
transform_data->transform_type() != ATTRIBUTE_OCTAHEDRON_TRANSFORM) {
return false; // Wrong transform type.
}
quantization_bits_ = transform_data->GetParameterValue<int32_t>(0);
return true;
}
void AttributeOctahedronTransform::CopyToAttributeTransformData(
AttributeTransformData *out_data) const {
out_data->set_transform_type(ATTRIBUTE_OCTAHEDRON_TRANSFORM);
out_data->AppendParameterValue(quantization_bits_);
}
bool AttributeOctahedronTransform::TransformAttribute(
const PointAttribute &attribute, const std::vector<PointIndex> &point_ids,
PointAttribute *target_attribute) {
return GeneratePortableAttribute(attribute, point_ids,
target_attribute->size(), target_attribute);
}
bool AttributeOctahedronTransform::InverseTransformAttribute(
const PointAttribute &attribute, PointAttribute *target_attribute) {
if (target_attribute->data_type() != DT_FLOAT32) {
return false;
}
const int num_points = target_attribute->size();
const int num_components = target_attribute->num_components();
if (num_components != 3) {
return false;
}
constexpr int kEntrySize = sizeof(float) * 3;
float att_val[3];
const int32_t *source_attribute_data = reinterpret_cast<const int32_t *>(
attribute.GetAddress(AttributeValueIndex(0)));
uint8_t *target_address =
target_attribute->GetAddress(AttributeValueIndex(0));
OctahedronToolBox octahedron_tool_box;
if (!octahedron_tool_box.SetQuantizationBits(quantization_bits_)) {
return false;
}
for (uint32_t i = 0; i < num_points; ++i) {
const int32_t s = *source_attribute_data++;
const int32_t t = *source_attribute_data++;
octahedron_tool_box.QuantizedOctahedralCoordsToUnitVector(s, t, att_val);
// Store the decoded floating point values into the attribute buffer.
std::memcpy(target_address, att_val, kEntrySize);
target_address += kEntrySize;
}
return true;
}
void AttributeOctahedronTransform::SetParameters(int quantization_bits) {
quantization_bits_ = quantization_bits;
}
bool AttributeOctahedronTransform::EncodeParameters(
EncoderBuffer *encoder_buffer) const {
if (is_initialized()) {
encoder_buffer->Encode(static_cast<uint8_t>(quantization_bits_));
return true;
}
return false;
}
bool AttributeOctahedronTransform::DecodeParameters(
const PointAttribute &attribute, DecoderBuffer *decoder_buffer) {
uint8_t quantization_bits;
if (!decoder_buffer->Decode(&quantization_bits)) {
return false;
}
quantization_bits_ = quantization_bits;
return true;
}
bool AttributeOctahedronTransform::GeneratePortableAttribute(
const PointAttribute &attribute, const std::vector<PointIndex> &point_ids,
int num_points, PointAttribute *target_attribute) const {
DRACO_DCHECK(is_initialized());
// Quantize all values in the order given by point_ids into portable
// attribute.
int32_t *const portable_attribute_data = reinterpret_cast<int32_t *>(
target_attribute->GetAddress(AttributeValueIndex(0)));
float att_val[3];
int32_t dst_index = 0;
OctahedronToolBox converter;
if (!converter.SetQuantizationBits(quantization_bits_)) {
return false;
}
if (!point_ids.empty()) {
for (uint32_t i = 0; i < point_ids.size(); ++i) {
const AttributeValueIndex att_val_id =
attribute.mapped_index(point_ids[i]);
attribute.GetValue(att_val_id, att_val);
// Encode the vector into a s and t octahedral coordinates.
int32_t s, t;
converter.FloatVectorToQuantizedOctahedralCoords(att_val, &s, &t);
portable_attribute_data[dst_index++] = s;
portable_attribute_data[dst_index++] = t;
}
} else {
for (PointIndex i(0); i < num_points; ++i) {
const AttributeValueIndex att_val_id = attribute.mapped_index(i);
attribute.GetValue(att_val_id, att_val);
// Encode the vector into a s and t octahedral coordinates.
int32_t s, t;
converter.FloatVectorToQuantizedOctahedralCoords(att_val, &s, &t);
portable_attribute_data[dst_index++] = s;
portable_attribute_data[dst_index++] = t;
}
}
return true;
}
} // namespace draco

View File

@ -0,0 +1,81 @@
// Copyright 2017 The Draco Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
#ifndef DRACO_ATTRIBUTES_ATTRIBUTE_OCTAHEDRON_TRANSFORM_H_
#define DRACO_ATTRIBUTES_ATTRIBUTE_OCTAHEDRON_TRANSFORM_H_
#include "draco/attributes/attribute_transform.h"
#include "draco/attributes/point_attribute.h"
#include "draco/core/encoder_buffer.h"
namespace draco {
// Attribute transform for attributes transformed to octahedral coordinates.
class AttributeOctahedronTransform : public AttributeTransform {
public:
AttributeOctahedronTransform() : quantization_bits_(-1) {}
// Return attribute transform type.
AttributeTransformType Type() const override {
return ATTRIBUTE_OCTAHEDRON_TRANSFORM;
}
// Try to init transform from attribute.
bool InitFromAttribute(const PointAttribute &attribute) override;
// Copy parameter values into the provided AttributeTransformData instance.
void CopyToAttributeTransformData(
AttributeTransformData *out_data) const override;
bool TransformAttribute(const PointAttribute &attribute,
const std::vector<PointIndex> &point_ids,
PointAttribute *target_attribute) override;
bool InverseTransformAttribute(const PointAttribute &attribute,
PointAttribute *target_attribute) override;
// Set number of quantization bits.
void SetParameters(int quantization_bits);
// Encode relevant parameters into buffer.
bool EncodeParameters(EncoderBuffer *encoder_buffer) const override;
bool DecodeParameters(const PointAttribute &attribute,
DecoderBuffer *decoder_buffer) override;
bool is_initialized() const { return quantization_bits_ != -1; }
int32_t quantization_bits() const { return quantization_bits_; }
protected:
DataType GetTransformedDataType(
const PointAttribute &attribute) const override {
return DT_UINT32;
}
int GetTransformedNumComponents(
const PointAttribute &attribute) const override {
return 2;
}
// Perform the actual transformation.
bool GeneratePortableAttribute(const PointAttribute &attribute,
const std::vector<PointIndex> &point_ids,
int num_points,
PointAttribute *target_attribute) const;
private:
int32_t quantization_bits_;
};
} // namespace draco
#endif // DRACO_ATTRIBUTES_ATTRIBUTE_OCTAHEDRON_TRANSFORM_H_

View File

@ -0,0 +1,268 @@
// Copyright 2017 The Draco Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
#include "draco/attributes/attribute_quantization_transform.h"
#include <cmath>
#include <cstring>
#include <memory>
#include <vector>
#include "draco/attributes/attribute_transform_type.h"
#include "draco/core/quantization_utils.h"
namespace draco {
bool AttributeQuantizationTransform::InitFromAttribute(
const PointAttribute &attribute) {
const AttributeTransformData *const transform_data =
attribute.GetAttributeTransformData();
if (!transform_data ||
transform_data->transform_type() != ATTRIBUTE_QUANTIZATION_TRANSFORM) {
return false; // Wrong transform type.
}
int32_t byte_offset = 0;
quantization_bits_ = transform_data->GetParameterValue<int32_t>(byte_offset);
byte_offset += 4;
min_values_.resize(attribute.num_components());
for (int i = 0; i < attribute.num_components(); ++i) {
min_values_[i] = transform_data->GetParameterValue<float>(byte_offset);
byte_offset += 4;
}
range_ = transform_data->GetParameterValue<float>(byte_offset);
return true;
}
// Copy parameter values into the provided AttributeTransformData instance.
void AttributeQuantizationTransform::CopyToAttributeTransformData(
AttributeTransformData *out_data) const {
out_data->set_transform_type(ATTRIBUTE_QUANTIZATION_TRANSFORM);
out_data->AppendParameterValue(quantization_bits_);
for (int i = 0; i < min_values_.size(); ++i) {
out_data->AppendParameterValue(min_values_[i]);
}
out_data->AppendParameterValue(range_);
}
bool AttributeQuantizationTransform::TransformAttribute(
const PointAttribute &attribute, const std::vector<PointIndex> &point_ids,
PointAttribute *target_attribute) {
if (point_ids.empty()) {
GeneratePortableAttribute(attribute, target_attribute->size(),
target_attribute);
} else {
GeneratePortableAttribute(attribute, point_ids, target_attribute->size(),
target_attribute);
}
return true;
}
bool AttributeQuantizationTransform::InverseTransformAttribute(
const PointAttribute &attribute, PointAttribute *target_attribute) {
if (target_attribute->data_type() != DT_FLOAT32) {
return false;
}
// Convert all quantized values back to floats.
const int32_t max_quantized_value =
(1u << static_cast<uint32_t>(quantization_bits_)) - 1;
const int num_components = target_attribute->num_components();
const int entry_size = sizeof(float) * num_components;
const std::unique_ptr<float[]> att_val(new float[num_components]);
int quant_val_id = 0;
int out_byte_pos = 0;
Dequantizer dequantizer;
if (!dequantizer.Init(range_, max_quantized_value)) {
return false;
}
const int32_t *const source_attribute_data =
reinterpret_cast<const int32_t *>(
attribute.GetAddress(AttributeValueIndex(0)));
const int num_values = target_attribute->size();
for (uint32_t i = 0; i < num_values; ++i) {
for (int c = 0; c < num_components; ++c) {
float value =
dequantizer.DequantizeFloat(source_attribute_data[quant_val_id++]);
value = value + min_values_[c];
att_val[c] = value;
}
// Store the floating point value into the attribute buffer.
target_attribute->buffer()->Write(out_byte_pos, att_val.get(), entry_size);
out_byte_pos += entry_size;
}
return true;
}
bool AttributeQuantizationTransform::IsQuantizationValid(
int quantization_bits) {
// Currently we allow only up to 30 bit quantization.
return quantization_bits >= 1 && quantization_bits <= 30;
}
bool AttributeQuantizationTransform::SetParameters(int quantization_bits,
const float *min_values,
int num_components,
float range) {
if (!IsQuantizationValid(quantization_bits)) {
return false;
}
quantization_bits_ = quantization_bits;
min_values_.assign(min_values, min_values + num_components);
range_ = range;
return true;
}
bool AttributeQuantizationTransform::ComputeParameters(
const PointAttribute &attribute, const int quantization_bits) {
if (quantization_bits_ != -1) {
return false; // already initialized.
}
if (!IsQuantizationValid(quantization_bits)) {
return false;
}
quantization_bits_ = quantization_bits;
const int num_components = attribute.num_components();
range_ = 0.f;
min_values_ = std::vector<float>(num_components, 0.f);
const std::unique_ptr<float[]> max_values(new float[num_components]);
const std::unique_ptr<float[]> att_val(new float[num_components]);
// Compute minimum values and max value difference.
attribute.GetValue(AttributeValueIndex(0), att_val.get());
attribute.GetValue(AttributeValueIndex(0), min_values_.data());
attribute.GetValue(AttributeValueIndex(0), max_values.get());
for (AttributeValueIndex i(1); i < static_cast<uint32_t>(attribute.size());
++i) {
attribute.GetValue(i, att_val.get());
for (int c = 0; c < num_components; ++c) {
if (std::isnan(att_val[c])) {
return false;
}
if (min_values_[c] > att_val[c]) {
min_values_[c] = att_val[c];
}
if (max_values[c] < att_val[c]) {
max_values[c] = att_val[c];
}
}
}
for (int c = 0; c < num_components; ++c) {
if (std::isnan(min_values_[c]) || std::isinf(min_values_[c]) ||
std::isnan(max_values[c]) || std::isinf(max_values[c])) {
return false;
}
const float dif = max_values[c] - min_values_[c];
if (dif > range_) {
range_ = dif;
}
}
// In case all values are the same, initialize the range to unit length. This
// will ensure that all values are quantized properly to the same value.
if (range_ == 0.f) {
range_ = 1.f;
}
return true;
}
bool AttributeQuantizationTransform::EncodeParameters(
EncoderBuffer *encoder_buffer) const {
if (is_initialized()) {
encoder_buffer->Encode(min_values_.data(),
sizeof(float) * min_values_.size());
encoder_buffer->Encode(range_);
encoder_buffer->Encode(static_cast<uint8_t>(quantization_bits_));
return true;
}
return false;
}
bool AttributeQuantizationTransform::DecodeParameters(
const PointAttribute &attribute, DecoderBuffer *decoder_buffer) {
min_values_.resize(attribute.num_components());
if (!decoder_buffer->Decode(&min_values_[0],
sizeof(float) * min_values_.size())) {
return false;
}
if (!decoder_buffer->Decode(&range_)) {
return false;
}
uint8_t quantization_bits;
if (!decoder_buffer->Decode(&quantization_bits)) {
return false;
}
if (!IsQuantizationValid(quantization_bits)) {
return false;
}
quantization_bits_ = quantization_bits;
return true;
}
void AttributeQuantizationTransform::GeneratePortableAttribute(
const PointAttribute &attribute, int num_points,
PointAttribute *target_attribute) const {
DRACO_DCHECK(is_initialized());
const int num_components = attribute.num_components();
// Quantize all values using the order given by point_ids.
int32_t *const portable_attribute_data = reinterpret_cast<int32_t *>(
target_attribute->GetAddress(AttributeValueIndex(0)));
const uint32_t max_quantized_value = (1 << (quantization_bits_)) - 1;
Quantizer quantizer;
quantizer.Init(range(), max_quantized_value);
int32_t dst_index = 0;
const std::unique_ptr<float[]> att_val(new float[num_components]);
for (PointIndex i(0); i < num_points; ++i) {
const AttributeValueIndex att_val_id = attribute.mapped_index(i);
attribute.GetValue(att_val_id, att_val.get());
for (int c = 0; c < num_components; ++c) {
const float value = (att_val[c] - min_values()[c]);
const int32_t q_val = quantizer.QuantizeFloat(value);
portable_attribute_data[dst_index++] = q_val;
}
}
}
void AttributeQuantizationTransform::GeneratePortableAttribute(
const PointAttribute &attribute, const std::vector<PointIndex> &point_ids,
int num_points, PointAttribute *target_attribute) const {
DRACO_DCHECK(is_initialized());
const int num_components = attribute.num_components();
// Quantize all values using the order given by point_ids.
int32_t *const portable_attribute_data = reinterpret_cast<int32_t *>(
target_attribute->GetAddress(AttributeValueIndex(0)));
const uint32_t max_quantized_value = (1 << (quantization_bits_)) - 1;
Quantizer quantizer;
quantizer.Init(range(), max_quantized_value);
int32_t dst_index = 0;
const std::unique_ptr<float[]> att_val(new float[num_components]);
for (uint32_t i = 0; i < point_ids.size(); ++i) {
const AttributeValueIndex att_val_id = attribute.mapped_index(point_ids[i]);
attribute.GetValue(att_val_id, att_val.get());
for (int c = 0; c < num_components; ++c) {
const float value = (att_val[c] - min_values()[c]);
const int32_t q_val = quantizer.QuantizeFloat(value);
portable_attribute_data[dst_index++] = q_val;
}
}
}
} // namespace draco

View File

@ -0,0 +1,102 @@
// Copyright 2017 The Draco Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
#ifndef DRACO_ATTRIBUTES_ATTRIBUTE_QUANTIZATION_TRANSFORM_H_
#define DRACO_ATTRIBUTES_ATTRIBUTE_QUANTIZATION_TRANSFORM_H_
#include <vector>
#include "draco/attributes/attribute_transform.h"
#include "draco/attributes/point_attribute.h"
#include "draco/core/encoder_buffer.h"
namespace draco {
// Attribute transform for quantized attributes.
class AttributeQuantizationTransform : public AttributeTransform {
public:
AttributeQuantizationTransform() : quantization_bits_(-1), range_(0.f) {}
// Return attribute transform type.
AttributeTransformType Type() const override {
return ATTRIBUTE_QUANTIZATION_TRANSFORM;
}
// Try to init transform from attribute.
bool InitFromAttribute(const PointAttribute &attribute) override;
// Copy parameter values into the provided AttributeTransformData instance.
void CopyToAttributeTransformData(
AttributeTransformData *out_data) const override;
bool TransformAttribute(const PointAttribute &attribute,
const std::vector<PointIndex> &point_ids,
PointAttribute *target_attribute) override;
bool InverseTransformAttribute(const PointAttribute &attribute,
PointAttribute *target_attribute) override;
bool SetParameters(int quantization_bits, const float *min_values,
int num_components, float range);
bool ComputeParameters(const PointAttribute &attribute,
const int quantization_bits);
// Encode relevant parameters into buffer.
bool EncodeParameters(EncoderBuffer *encoder_buffer) const override;
bool DecodeParameters(const PointAttribute &attribute,
DecoderBuffer *decoder_buffer) override;
int32_t quantization_bits() const { return quantization_bits_; }
float min_value(int axis) const { return min_values_[axis]; }
const std::vector<float> &min_values() const { return min_values_; }
float range() const { return range_; }
bool is_initialized() const { return quantization_bits_ != -1; }
protected:
// Create portable attribute using 1:1 mapping between points in the input and
// output attribute.
void GeneratePortableAttribute(const PointAttribute &attribute,
int num_points,
PointAttribute *target_attribute) const;
// Create portable attribute using custom mapping between input and output
// points.
void GeneratePortableAttribute(const PointAttribute &attribute,
const std::vector<PointIndex> &point_ids,
int num_points,
PointAttribute *target_attribute) const;
DataType GetTransformedDataType(
const PointAttribute &attribute) const override {
return DT_UINT32;
}
int GetTransformedNumComponents(
const PointAttribute &attribute) const override {
return attribute.num_components();
}
static bool IsQuantizationValid(int quantization_bits);
private:
int32_t quantization_bits_;
// Minimal dequantized value for each component of the attribute.
std::vector<float> min_values_;
// Bounds of the dequantized attribute (max delta over all components).
float range_;
};
} // namespace draco
#endif // DRACO_COMPRESSION_ATTRIBUTES_ATTRIBUTE_DEQUANTIZATION_TRANSFORM_H_

View File

@ -0,0 +1,41 @@
// Copyright 2017 The Draco Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
#include "draco/attributes/attribute_transform.h"
namespace draco {
bool AttributeTransform::TransferToAttribute(PointAttribute *attribute) const {
std::unique_ptr<AttributeTransformData> transform_data(
new AttributeTransformData());
this->CopyToAttributeTransformData(transform_data.get());
attribute->SetAttributeTransformData(std::move(transform_data));
return true;
}
std::unique_ptr<PointAttribute> AttributeTransform::InitTransformedAttribute(
const PointAttribute &src_attribute, int num_entries) {
const int num_components = GetTransformedNumComponents(src_attribute);
const DataType dt = GetTransformedDataType(src_attribute);
GeometryAttribute ga;
ga.Init(src_attribute.attribute_type(), nullptr, num_components, dt, false,
num_components * DataTypeLength(dt), 0);
std::unique_ptr<PointAttribute> transformed_attribute(new PointAttribute(ga));
transformed_attribute->Reset(num_entries);
transformed_attribute->SetIdentityMapping();
transformed_attribute->set_unique_id(src_attribute.unique_id());
return transformed_attribute;
}
} // namespace draco

View File

@ -0,0 +1,76 @@
// Copyright 2017 The Draco Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
#ifndef DRACO_ATTRIBUTES_ATTRIBUTE_TRANSFORM_H_
#define DRACO_ATTRIBUTES_ATTRIBUTE_TRANSFORM_H_
#include "draco/attributes/attribute_transform_data.h"
#include "draco/attributes/point_attribute.h"
#include "draco/core/decoder_buffer.h"
#include "draco/core/encoder_buffer.h"
namespace draco {
// Virtual base class for various attribute transforms, enforcing common
// interface where possible.
class AttributeTransform {
public:
virtual ~AttributeTransform() = default;
// Return attribute transform type.
virtual AttributeTransformType Type() const = 0;
// Try to init transform from attribute.
virtual bool InitFromAttribute(const PointAttribute &attribute) = 0;
// Copy parameter values into the provided AttributeTransformData instance.
virtual void CopyToAttributeTransformData(
AttributeTransformData *out_data) const = 0;
bool TransferToAttribute(PointAttribute *attribute) const;
// Applies the transform to |attribute| and stores the result in
// |target_attribute|. |point_ids| is an optional vector that can be used to
// remap values during the transform.
virtual bool TransformAttribute(const PointAttribute &attribute,
const std::vector<PointIndex> &point_ids,
PointAttribute *target_attribute) = 0;
// Applies an inverse transform to |attribute| and stores the result in
// |target_attribute|. In this case, |attribute| is an attribute that was
// already transformed (e.g. quantized) and |target_attribute| is the
// attribute before the transformation.
virtual bool InverseTransformAttribute(const PointAttribute &attribute,
PointAttribute *target_attribute) = 0;
// Encodes all data needed by the transformation into the |encoder_buffer|.
virtual bool EncodeParameters(EncoderBuffer *encoder_buffer) const = 0;
// Decodes all data needed to transform |attribute| back to the original
// format.
virtual bool DecodeParameters(const PointAttribute &attribute,
DecoderBuffer *decoder_buffer) = 0;
// Initializes a transformed attribute that can be used as target in the
// TransformAttribute() function call.
virtual std::unique_ptr<PointAttribute> InitTransformedAttribute(
const PointAttribute &src_attribute, int num_entries);
protected:
virtual DataType GetTransformedDataType(
const PointAttribute &attribute) const = 0;
virtual int GetTransformedNumComponents(
const PointAttribute &attribute) const = 0;
};
} // namespace draco
#endif // DRACO_ATTRIBUTES_ATTRIBUTE_OCTAHEDRON_TRANSFORM_H_

View File

@ -0,0 +1,71 @@
// Copyright 2017 The Draco Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
#ifndef DRACO_ATTRIBUTES_ATTRIBUTE_TRANSFORM_DATA_H_
#define DRACO_ATTRIBUTES_ATTRIBUTE_TRANSFORM_DATA_H_
#include <memory>
#include "draco/attributes/attribute_transform_type.h"
#include "draco/core/data_buffer.h"
namespace draco {
// Class for holding parameter values for an attribute transform of a
// PointAttribute. This can be for example quantization data for an attribute
// that holds quantized values. This class provides only a basic storage for
// attribute transform parameters and it should be accessed only through wrapper
// classes for a specific transform (e.g. AttributeQuantizationTransform).
class AttributeTransformData {
public:
AttributeTransformData() : transform_type_(ATTRIBUTE_INVALID_TRANSFORM) {}
AttributeTransformData(const AttributeTransformData &data) = default;
// Returns the type of the attribute transform that is described by the class.
AttributeTransformType transform_type() const { return transform_type_; }
void set_transform_type(AttributeTransformType type) {
transform_type_ = type;
}
// Returns a parameter value on a given |byte_offset|.
template <typename DataTypeT>
DataTypeT GetParameterValue(int byte_offset) const {
DataTypeT out_data;
buffer_.Read(byte_offset, &out_data, sizeof(DataTypeT));
return out_data;
}
// Sets a parameter value on a given |byte_offset|.
template <typename DataTypeT>
void SetParameterValue(int byte_offset, const DataTypeT &in_data) {
if (byte_offset + sizeof(DataTypeT) > buffer_.data_size()) {
buffer_.Resize(byte_offset + sizeof(DataTypeT));
}
buffer_.Write(byte_offset, &in_data, sizeof(DataTypeT));
}
// Sets a parameter value at the end of the |buffer_|.
template <typename DataTypeT>
void AppendParameterValue(const DataTypeT &in_data) {
SetParameterValue(static_cast<int>(buffer_.data_size()), in_data);
}
private:
AttributeTransformType transform_type_;
DataBuffer buffer_;
};
} // namespace draco
#endif // DRACO_ATTRIBUTES_ATTRIBUTE_TRANSFORM_DATA_H_

View File

@ -0,0 +1,30 @@
// Copyright 2017 The Draco Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
#ifndef DRACO_ATTRIBUTES_ATTRIBUTE_TRANSFORM_TYPE_H_
#define DRACO_ATTRIBUTES_ATTRIBUTE_TRANSFORM_TYPE_H_
namespace draco {
// List of all currently supported attribute transforms.
enum AttributeTransformType {
ATTRIBUTE_INVALID_TRANSFORM = -1,
ATTRIBUTE_NO_TRANSFORM = 0,
ATTRIBUTE_QUANTIZATION_TRANSFORM = 1,
ATTRIBUTE_OCTAHEDRON_TRANSFORM = 2,
};
} // namespace draco
#endif // DRACO_ATTRIBUTES_ATTRIBUTE_TRANSFORM_TYPE_H_

View File

@ -0,0 +1,110 @@
// Copyright 2016 The Draco Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
#include "draco/attributes/geometry_attribute.h"
namespace draco {
GeometryAttribute::GeometryAttribute()
: buffer_(nullptr),
num_components_(1),
data_type_(DT_FLOAT32),
byte_stride_(0),
byte_offset_(0),
attribute_type_(INVALID),
unique_id_(0) {}
void GeometryAttribute::Init(GeometryAttribute::Type attribute_type,
DataBuffer *buffer, uint8_t num_components,
DataType data_type, bool normalized,
int64_t byte_stride, int64_t byte_offset) {
buffer_ = buffer;
if (buffer) {
buffer_descriptor_.buffer_id = buffer->buffer_id();
buffer_descriptor_.buffer_update_count = buffer->update_count();
}
num_components_ = num_components;
data_type_ = data_type;
normalized_ = normalized;
byte_stride_ = byte_stride;
byte_offset_ = byte_offset;
attribute_type_ = attribute_type;
}
bool GeometryAttribute::CopyFrom(const GeometryAttribute &src_att) {
num_components_ = src_att.num_components_;
data_type_ = src_att.data_type_;
normalized_ = src_att.normalized_;
byte_stride_ = src_att.byte_stride_;
byte_offset_ = src_att.byte_offset_;
attribute_type_ = src_att.attribute_type_;
buffer_descriptor_ = src_att.buffer_descriptor_;
unique_id_ = src_att.unique_id_;
if (src_att.buffer_ == nullptr) {
buffer_ = nullptr;
} else {
if (buffer_ == nullptr) {
return false;
}
buffer_->Update(src_att.buffer_->data(), src_att.buffer_->data_size());
}
#ifdef DRACO_TRANSCODER_SUPPORTED
name_ = src_att.name_;
#endif
return true;
}
bool GeometryAttribute::operator==(const GeometryAttribute &va) const {
if (attribute_type_ != va.attribute_type_) {
return false;
}
// It's OK to compare just the buffer descriptors here. We don't need to
// compare the buffers themselves.
if (buffer_descriptor_.buffer_id != va.buffer_descriptor_.buffer_id) {
return false;
}
if (buffer_descriptor_.buffer_update_count !=
va.buffer_descriptor_.buffer_update_count) {
return false;
}
if (num_components_ != va.num_components_) {
return false;
}
if (data_type_ != va.data_type_) {
return false;
}
if (byte_stride_ != va.byte_stride_) {
return false;
}
if (byte_offset_ != va.byte_offset_) {
return false;
}
#ifdef DRACO_TRANSCODER_SUPPORTED
if (name_ != va.name_) {
return false;
}
#endif
return true;
}
void GeometryAttribute::ResetBuffer(DataBuffer *buffer, int64_t byte_stride,
int64_t byte_offset) {
buffer_ = buffer;
buffer_descriptor_.buffer_id = buffer->buffer_id();
buffer_descriptor_.buffer_update_count = buffer->update_count();
byte_stride_ = byte_stride;
byte_offset_ = byte_offset;
}
} // namespace draco

View File

@ -0,0 +1,541 @@
// Copyright 2016 The Draco Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
#ifndef DRACO_ATTRIBUTES_GEOMETRY_ATTRIBUTE_H_
#define DRACO_ATTRIBUTES_GEOMETRY_ATTRIBUTE_H_
#include <algorithm>
#include <array>
#include <cmath>
#include <limits>
#include "draco/attributes/geometry_indices.h"
#include "draco/core/data_buffer.h"
#include "draco/core/hash_utils.h"
#include "draco/draco_features.h"
#ifdef DRACO_TRANSCODER_SUPPORTED
#include "draco/core/status.h"
#endif
namespace draco {
// The class provides access to a specific attribute which is stored in a
// DataBuffer, such as normals or coordinates. However, the GeometryAttribute
// class does not own the buffer and the buffer itself may store other data
// unrelated to this attribute (such as data for other attributes in which case
// we can have multiple GeometryAttributes accessing one buffer). Typically,
// all attributes for a point (or corner, face) are stored in one block, which
// is advantageous in terms of memory access. The length of the entire block is
// given by the byte_stride, the position where the attribute starts is given by
// the byte_offset, the actual number of bytes that the attribute occupies is
// given by the data_type and the number of components.
class GeometryAttribute {
public:
// Supported attribute types.
enum Type {
INVALID = -1,
// Named attributes start here. The difference between named and generic
// attributes is that for named attributes we know their purpose and we
// can apply some special methods when dealing with them (e.g. during
// encoding).
POSITION = 0,
NORMAL,
COLOR,
TEX_COORD,
// A special id used to mark attributes that are not assigned to any known
// predefined use case. Such attributes are often used for a shader specific
// data.
GENERIC,
#ifdef DRACO_TRANSCODER_SUPPORTED
// TODO(ostava): Adding a new attribute would be bit-stream change for GLTF.
// Older decoders wouldn't know what to do with this attribute type. This
// should be open-sourced only when we are ready to increase our bit-stream
// version.
TANGENT,
MATERIAL,
JOINTS,
WEIGHTS,
#endif
// Total number of different attribute types.
// Always keep behind all named attributes.
NAMED_ATTRIBUTES_COUNT,
};
GeometryAttribute();
// Initializes and enables the attribute.
void Init(Type attribute_type, DataBuffer *buffer, uint8_t num_components,
DataType data_type, bool normalized, int64_t byte_stride,
int64_t byte_offset);
bool IsValid() const { return buffer_ != nullptr; }
// Copies data from the source attribute to the this attribute.
// This attribute must have a valid buffer allocated otherwise the operation
// is going to fail and return false.
bool CopyFrom(const GeometryAttribute &src_att);
// Function for getting a attribute value with a specific format.
// Unsafe. Caller must ensure the accessed memory is valid.
// T is the attribute data type.
// att_components_t is the number of attribute components.
template <typename T, int att_components_t>
std::array<T, att_components_t> GetValue(
AttributeValueIndex att_index) const {
// Byte address of the attribute index.
const int64_t byte_pos = byte_offset_ + byte_stride_ * att_index.value();
std::array<T, att_components_t> out;
buffer_->Read(byte_pos, &(out[0]), sizeof(out));
return out;
}
// Function for getting a attribute value with a specific format.
// T is the attribute data type.
// att_components_t is the number of attribute components.
template <typename T, int att_components_t>
bool GetValue(AttributeValueIndex att_index,
std::array<T, att_components_t> *out) const {
// Byte address of the attribute index.
const int64_t byte_pos = byte_offset_ + byte_stride_ * att_index.value();
// Check we are not reading past end of data.
if (byte_pos + sizeof(*out) > buffer_->data_size()) {
return false;
}
buffer_->Read(byte_pos, &((*out)[0]), sizeof(*out));
return true;
}
// Returns the byte position of the attribute entry in the data buffer.
inline int64_t GetBytePos(AttributeValueIndex att_index) const {
return byte_offset_ + byte_stride_ * att_index.value();
}
inline const uint8_t *GetAddress(AttributeValueIndex att_index) const {
const int64_t byte_pos = GetBytePos(att_index);
return buffer_->data() + byte_pos;
}
inline uint8_t *GetAddress(AttributeValueIndex att_index) {
const int64_t byte_pos = GetBytePos(att_index);
return buffer_->data() + byte_pos;
}
inline bool IsAddressValid(const uint8_t *address) const {
return ((buffer_->data() + buffer_->data_size()) > address);
}
// Fills out_data with the raw value of the requested attribute entry.
// out_data must be at least byte_stride_ long.
void GetValue(AttributeValueIndex att_index, void *out_data) const {
const int64_t byte_pos = byte_offset_ + byte_stride_ * att_index.value();
buffer_->Read(byte_pos, out_data, byte_stride_);
}
// Sets a value of an attribute entry. The input value must be allocated to
// cover all components of a single attribute entry.
void SetAttributeValue(AttributeValueIndex entry_index, const void *value) {
const int64_t byte_pos = entry_index.value() * byte_stride();
buffer_->Write(byte_pos, value, byte_stride());
}
#ifdef DRACO_TRANSCODER_SUPPORTED
// Sets a value of an attribute entry. The input |value| must have
// |input_num_components| entries and it will be automatically converted to
// the internal format used by the geometry attribute. If the conversion is
// not possible, an error status will be returned.
template <typename InputT>
Status ConvertAndSetAttributeValue(AttributeValueIndex avi,
int input_num_components,
const InputT *value);
#endif
// DEPRECATED: Use
// ConvertValue(AttributeValueIndex att_id,
// int out_num_components,
// OutT *out_val);
//
// Function for conversion of a attribute to a specific output format.
// OutT is the desired data type of the attribute.
// out_att_components_t is the number of components of the output format.
// Returns false when the conversion failed.
template <typename OutT, int out_att_components_t>
bool ConvertValue(AttributeValueIndex att_id, OutT *out_val) const {
return ConvertValue(att_id, out_att_components_t, out_val);
}
// Function for conversion of a attribute to a specific output format.
// |out_val| needs to be able to store |out_num_components| values.
// OutT is the desired data type of the attribute.
// Returns false when the conversion failed.
template <typename OutT>
bool ConvertValue(AttributeValueIndex att_id, int8_t out_num_components,
OutT *out_val) const {
if (out_val == nullptr) {
return false;
}
switch (data_type_) {
case DT_INT8:
return ConvertTypedValue<int8_t, OutT>(att_id, out_num_components,
out_val);
case DT_UINT8:
return ConvertTypedValue<uint8_t, OutT>(att_id, out_num_components,
out_val);
case DT_INT16:
return ConvertTypedValue<int16_t, OutT>(att_id, out_num_components,
out_val);
case DT_UINT16:
return ConvertTypedValue<uint16_t, OutT>(att_id, out_num_components,
out_val);
case DT_INT32:
return ConvertTypedValue<int32_t, OutT>(att_id, out_num_components,
out_val);
case DT_UINT32:
return ConvertTypedValue<uint32_t, OutT>(att_id, out_num_components,
out_val);
case DT_INT64:
return ConvertTypedValue<int64_t, OutT>(att_id, out_num_components,
out_val);
case DT_UINT64:
return ConvertTypedValue<uint64_t, OutT>(att_id, out_num_components,
out_val);
case DT_FLOAT32:
return ConvertTypedValue<float, OutT>(att_id, out_num_components,
out_val);
case DT_FLOAT64:
return ConvertTypedValue<double, OutT>(att_id, out_num_components,
out_val);
case DT_BOOL:
return ConvertTypedValue<bool, OutT>(att_id, out_num_components,
out_val);
default:
// Wrong attribute type.
return false;
}
}
// Function for conversion of a attribute to a specific output format.
// The |out_value| must be able to store all components of a single attribute
// entry.
// OutT is the desired data type of the attribute.
// Returns false when the conversion failed.
template <typename OutT>
bool ConvertValue(AttributeValueIndex att_index, OutT *out_value) const {
return ConvertValue<OutT>(att_index, num_components_, out_value);
}
// Utility function. Returns |attribute_type| as std::string.
static std::string TypeToString(Type attribute_type) {
switch (attribute_type) {
case INVALID:
return "INVALID";
case POSITION:
return "POSITION";
case NORMAL:
return "NORMAL";
case COLOR:
return "COLOR";
case TEX_COORD:
return "TEX_COORD";
case GENERIC:
return "GENERIC";
#ifdef DRACO_TRANSCODER_SUPPORTED
case TANGENT:
return "TANGENT";
case MATERIAL:
return "MATERIAL";
case JOINTS:
return "JOINTS";
case WEIGHTS:
return "WEIGHTS";
#endif
default:
return "UNKNOWN";
}
}
bool operator==(const GeometryAttribute &va) const;
// Returns the type of the attribute indicating the nature of the attribute.
Type attribute_type() const { return attribute_type_; }
void set_attribute_type(Type type) { attribute_type_ = type; }
// Returns the data type that is stored in the attribute.
DataType data_type() const { return data_type_; }
// Returns the number of components that are stored for each entry.
// For position attribute this is usually three (x,y,z),
// while texture coordinates have two components (u,v).
uint8_t num_components() const { return num_components_; }
// Indicates whether the data type should be normalized before interpretation,
// that is, it should be divided by the max value of the data type.
bool normalized() const { return normalized_; }
void set_normalized(bool normalized) { normalized_ = normalized; }
// The buffer storing the entire data of the attribute.
const DataBuffer *buffer() const { return buffer_; }
// Returns the number of bytes between two attribute entries, this is, at
// least size of the data types times number of components.
int64_t byte_stride() const { return byte_stride_; }
// The offset where the attribute starts within the block of size byte_stride.
int64_t byte_offset() const { return byte_offset_; }
void set_byte_offset(int64_t byte_offset) { byte_offset_ = byte_offset; }
DataBufferDescriptor buffer_descriptor() const { return buffer_descriptor_; }
uint32_t unique_id() const { return unique_id_; }
void set_unique_id(uint32_t id) { unique_id_ = id; }
#ifdef DRACO_TRANSCODER_SUPPORTED
std::string name() const { return name_; }
void set_name(std::string name) { name_ = name; }
#endif
protected:
// Sets a new internal storage for the attribute.
void ResetBuffer(DataBuffer *buffer, int64_t byte_stride,
int64_t byte_offset);
private:
// Function for conversion of an attribute to a specific output format given a
// format of the stored attribute.
// T is the stored attribute data type.
// OutT is the desired data type of the attribute.
template <typename T, typename OutT>
bool ConvertTypedValue(AttributeValueIndex att_id, uint8_t out_num_components,
OutT *out_value) const {
const uint8_t *src_address = GetAddress(att_id);
// Convert all components available in both the original and output formats.
for (int i = 0; i < std::min(num_components_, out_num_components); ++i) {
if (!IsAddressValid(src_address)) {
return false;
}
const T in_value = *reinterpret_cast<const T *>(src_address);
if (!ConvertComponentValue<T, OutT>(in_value, normalized_,
out_value + i)) {
return false;
}
src_address += sizeof(T);
}
// Fill empty data for unused output components if needed.
for (int i = num_components_; i < out_num_components; ++i) {
out_value[i] = static_cast<OutT>(0);
}
return true;
}
#ifdef DRACO_TRANSCODER_SUPPORTED
// Function that converts input |value| from type T to the internal attribute
// representation defined by OutT and |num_components_|.
template <typename T, typename OutT>
Status ConvertAndSetAttributeTypedValue(AttributeValueIndex avi,
int8_t input_num_components,
const T *value) {
uint8_t *address = GetAddress(avi);
// Convert all components available in both the original and output formats.
for (int i = 0; i < num_components_; ++i) {
if (!IsAddressValid(address)) {
return ErrorStatus("GeometryAttribute: Invalid address.");
}
OutT *const out_value = reinterpret_cast<OutT *>(address);
if (i < input_num_components) {
if (!ConvertComponentValue<T, OutT>(*(value + i), normalized_,
out_value)) {
return ErrorStatus(
"GeometryAttribute: Failed to convert component value.");
}
} else {
*out_value = static_cast<OutT>(0);
}
address += sizeof(OutT);
}
return OkStatus();
}
#endif // DRACO_TRANSCODER_SUPPORTED
// Converts |in_value| of type T into |out_value| of type OutT. If
// |normalized| is true, any conversion between floating point and integer
// values will be treating integers as normalized types (the entire integer
// range will be used to represent 0-1 floating point range).
template <typename T, typename OutT>
static bool ConvertComponentValue(const T &in_value, bool normalized,
OutT *out_value) {
// Make sure the |in_value| can be represented as an integral type OutT.
if (std::is_integral<OutT>::value) {
// Make sure the |in_value| fits within the range of values that OutT
// is able to represent. Perform the check only for integral types.
if (!std::is_same<T, bool>::value && std::is_integral<T>::value) {
static constexpr OutT kOutMin =
std::is_signed<T>::value ? std::numeric_limits<OutT>::min() : 0;
if (in_value < kOutMin || in_value > std::numeric_limits<OutT>::max()) {
return false;
}
}
// Check conversion of floating point |in_value| to integral value OutT.
if (std::is_floating_point<T>::value) {
// Make sure the floating point |in_value| is not NaN and not Inf as
// integral type OutT is unable to represent these values.
if (sizeof(in_value) > sizeof(double)) {
if (std::isnan(static_cast<long double>(in_value)) ||
std::isinf(static_cast<long double>(in_value))) {
return false;
}
} else if (sizeof(in_value) > sizeof(float)) {
if (std::isnan(static_cast<double>(in_value)) ||
std::isinf(static_cast<double>(in_value))) {
return false;
}
} else {
if (std::isnan(static_cast<float>(in_value)) ||
std::isinf(static_cast<float>(in_value))) {
return false;
}
}
// Make sure the floating point |in_value| fits within the range of
// values that integral type OutT is able to represent.
if (in_value < std::numeric_limits<OutT>::min() ||
in_value >= std::numeric_limits<OutT>::max()) {
return false;
}
}
}
if (std::is_integral<T>::value && std::is_floating_point<OutT>::value &&
normalized) {
// When converting integer to floating point, normalize the value if
// necessary.
*out_value = static_cast<OutT>(in_value);
*out_value /= static_cast<OutT>(std::numeric_limits<T>::max());
} else if (std::is_floating_point<T>::value &&
std::is_integral<OutT>::value && normalized) {
// Converting from floating point to a normalized integer.
if (in_value > 1 || in_value < 0) {
// Normalized float values need to be between 0 and 1.
return false;
}
// TODO(ostava): Consider allowing float to normalized integer conversion
// for 64-bit integer types. Currently it doesn't work because we don't
// have a floating point type that could store all 64 bit integers.
if (sizeof(OutT) > 4) {
return false;
}
// Expand the float to the range of the output integer and round it to the
// nearest representable value. Use doubles for the math to ensure the
// integer values are represented properly during the conversion process.
*out_value = static_cast<OutT>(std::floor(
in_value * static_cast<double>(std::numeric_limits<OutT>::max()) +
0.5));
} else {
*out_value = static_cast<OutT>(in_value);
}
// TODO(ostava): Add handling of normalized attributes when converting
// between different integer representations. If the attribute is
// normalized, integer values should be converted as if they represent 0-1
// range. E.g. when we convert uint16 to uint8, the range <0, 2^16 - 1>
// should be converted to range <0, 2^8 - 1>.
return true;
}
DataBuffer *buffer_;
// The buffer descriptor is stored at the time the buffer is attached to this
// attribute. The purpose is to detect if any changes happened to the buffer
// since the time it was attached.
DataBufferDescriptor buffer_descriptor_;
uint8_t num_components_;
DataType data_type_;
bool normalized_;
int64_t byte_stride_;
int64_t byte_offset_;
Type attribute_type_;
// Unique id of this attribute. No two attributes could have the same unique
// id. It is used to identify each attribute, especially when there are
// multiple attribute of the same type in a point cloud.
uint32_t unique_id_;
#ifdef DRACO_TRANSCODER_SUPPORTED
std::string name_;
#endif
friend struct GeometryAttributeHasher;
};
#ifdef DRACO_TRANSCODER_SUPPORTED
template <typename InputT>
Status GeometryAttribute::ConvertAndSetAttributeValue(AttributeValueIndex avi,
int input_num_components,
const InputT *value) {
switch (this->data_type()) {
case DT_INT8:
return ConvertAndSetAttributeTypedValue<InputT, int8_t>(
avi, input_num_components, value);
case DT_UINT8:
return ConvertAndSetAttributeTypedValue<InputT, uint8_t>(
avi, input_num_components, value);
case DT_INT16:
return ConvertAndSetAttributeTypedValue<InputT, int16_t>(
avi, input_num_components, value);
case DT_UINT16:
return ConvertAndSetAttributeTypedValue<InputT, uint16_t>(
avi, input_num_components, value);
case DT_INT32:
return ConvertAndSetAttributeTypedValue<InputT, int32_t>(
avi, input_num_components, value);
case DT_UINT32:
return ConvertAndSetAttributeTypedValue<InputT, uint32_t>(
avi, input_num_components, value);
case DT_INT64:
return ConvertAndSetAttributeTypedValue<InputT, int64_t>(
avi, input_num_components, value);
case DT_UINT64:
return ConvertAndSetAttributeTypedValue<InputT, uint64_t>(
avi, input_num_components, value);
case DT_FLOAT32:
return ConvertAndSetAttributeTypedValue<InputT, float>(
avi, input_num_components, value);
case DT_FLOAT64:
return ConvertAndSetAttributeTypedValue<InputT, double>(
avi, input_num_components, value);
case DT_BOOL:
return ConvertAndSetAttributeTypedValue<InputT, bool>(
avi, input_num_components, value);
default:
break;
}
return ErrorStatus(
"GeometryAttribute::SetAndConvertAttributeValue: Unsupported "
"attribute type.");
}
#endif
// Hashing support
// Function object for using Attribute as a hash key.
struct GeometryAttributeHasher {
size_t operator()(const GeometryAttribute &va) const {
size_t hash = HashCombine(va.buffer_descriptor_.buffer_id,
va.buffer_descriptor_.buffer_update_count);
hash = HashCombine(va.num_components_, hash);
hash = HashCombine(static_cast<int8_t>(va.data_type_), hash);
hash = HashCombine(static_cast<int8_t>(va.attribute_type_), hash);
hash = HashCombine(va.byte_stride_, hash);
return HashCombine(va.byte_offset_, hash);
}
};
// Function object for using GeometryAttribute::Type as a hash key.
struct GeometryAttributeTypeHasher {
size_t operator()(const GeometryAttribute::Type &at) const {
return static_cast<size_t>(at);
}
};
} // namespace draco
#endif // DRACO_ATTRIBUTES_GEOMETRY_ATTRIBUTE_H_

View File

@ -0,0 +1,54 @@
// Copyright 2016 The Draco Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
#ifndef DRACO_ATTRIBUTES_GEOMETRY_INDICES_H_
#define DRACO_ATTRIBUTES_GEOMETRY_INDICES_H_
#include <inttypes.h>
#include <limits>
#include "draco/core/draco_index_type.h"
namespace draco {
// Index of an attribute value entry stored in a GeometryAttribute.
DEFINE_NEW_DRACO_INDEX_TYPE(uint32_t, AttributeValueIndex)
// Index of a point in a PointCloud.
DEFINE_NEW_DRACO_INDEX_TYPE(uint32_t, PointIndex)
// Vertex index in a Mesh or CornerTable.
DEFINE_NEW_DRACO_INDEX_TYPE(uint32_t, VertexIndex)
// Corner index that identifies a corner in a Mesh or CornerTable.
DEFINE_NEW_DRACO_INDEX_TYPE(uint32_t, CornerIndex)
// Face index for Mesh and CornerTable.
DEFINE_NEW_DRACO_INDEX_TYPE(uint32_t, FaceIndex)
// Constants denoting invalid indices.
static constexpr AttributeValueIndex kInvalidAttributeValueIndex(
std::numeric_limits<uint32_t>::max());
static constexpr PointIndex kInvalidPointIndex(
std::numeric_limits<uint32_t>::max());
static constexpr VertexIndex kInvalidVertexIndex(
std::numeric_limits<uint32_t>::max());
static constexpr CornerIndex kInvalidCornerIndex(
std::numeric_limits<uint32_t>::max());
static constexpr FaceIndex kInvalidFaceIndex(
std::numeric_limits<uint32_t>::max());
// TODO(ostava): Add strongly typed indices for attribute id and unique
// attribute id.
} // namespace draco
#endif // DRACO_ATTRIBUTES_GEOMETRY_INDICES_H_

View File

@ -0,0 +1,270 @@
// Copyright 2016 The Draco Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
#include "draco/attributes/point_attribute.h"
#include <tuple>
#include <unordered_map>
using std::unordered_map;
// Shortcut for typed conditionals.
template <bool B, class T, class F>
using conditional_t = typename std::conditional<B, T, F>::type;
namespace draco {
PointAttribute::PointAttribute()
: num_unique_entries_(0), identity_mapping_(false) {}
PointAttribute::PointAttribute(const GeometryAttribute &att)
: GeometryAttribute(att),
num_unique_entries_(0),
identity_mapping_(false) {}
void PointAttribute::Init(Type attribute_type, int8_t num_components,
DataType data_type, bool normalized,
size_t num_attribute_values) {
attribute_buffer_ = std::unique_ptr<DataBuffer>(new DataBuffer());
GeometryAttribute::Init(attribute_type, attribute_buffer_.get(),
num_components, data_type, normalized,
DataTypeLength(data_type) * num_components, 0);
Reset(num_attribute_values);
SetIdentityMapping();
}
void PointAttribute::CopyFrom(const PointAttribute &src_att) {
if (buffer() == nullptr) {
// If the destination attribute doesn't have a valid buffer, create it.
attribute_buffer_ = std::unique_ptr<DataBuffer>(new DataBuffer());
ResetBuffer(attribute_buffer_.get(), 0, 0);
}
if (!GeometryAttribute::CopyFrom(src_att)) {
return;
}
identity_mapping_ = src_att.identity_mapping_;
num_unique_entries_ = src_att.num_unique_entries_;
indices_map_ = src_att.indices_map_;
if (src_att.attribute_transform_data_) {
attribute_transform_data_ = std::unique_ptr<AttributeTransformData>(
new AttributeTransformData(*src_att.attribute_transform_data_));
} else {
attribute_transform_data_ = nullptr;
}
}
bool PointAttribute::Reset(size_t num_attribute_values) {
if (attribute_buffer_ == nullptr) {
attribute_buffer_ = std::unique_ptr<DataBuffer>(new DataBuffer());
}
const int64_t entry_size = DataTypeLength(data_type()) * num_components();
if (!attribute_buffer_->Update(nullptr, num_attribute_values * entry_size)) {
return false;
}
// Assign the new buffer to the parent attribute.
ResetBuffer(attribute_buffer_.get(), entry_size, 0);
num_unique_entries_ = static_cast<uint32_t>(num_attribute_values);
return true;
}
void PointAttribute::Resize(size_t new_num_unique_entries) {
num_unique_entries_ = static_cast<uint32_t>(new_num_unique_entries);
attribute_buffer_->Resize(new_num_unique_entries * byte_stride());
}
#ifdef DRACO_ATTRIBUTE_VALUES_DEDUPLICATION_SUPPORTED
AttributeValueIndex::ValueType PointAttribute::DeduplicateValues(
const GeometryAttribute &in_att) {
return DeduplicateValues(in_att, AttributeValueIndex(0));
}
AttributeValueIndex::ValueType PointAttribute::DeduplicateValues(
const GeometryAttribute &in_att, AttributeValueIndex in_att_offset) {
AttributeValueIndex::ValueType unique_vals = 0;
switch (in_att.data_type()) {
// Currently we support only float, uint8, and uint16 arguments.
case DT_FLOAT32:
unique_vals = DeduplicateTypedValues<float>(in_att, in_att_offset);
break;
case DT_INT8:
unique_vals = DeduplicateTypedValues<int8_t>(in_att, in_att_offset);
break;
case DT_UINT8:
case DT_BOOL:
unique_vals = DeduplicateTypedValues<uint8_t>(in_att, in_att_offset);
break;
case DT_UINT16:
unique_vals = DeduplicateTypedValues<uint16_t>(in_att, in_att_offset);
break;
case DT_INT16:
unique_vals = DeduplicateTypedValues<int16_t>(in_att, in_att_offset);
break;
case DT_UINT32:
unique_vals = DeduplicateTypedValues<uint32_t>(in_att, in_att_offset);
break;
case DT_INT32:
unique_vals = DeduplicateTypedValues<int32_t>(in_att, in_att_offset);
break;
default:
return -1; // Unsupported data type.
}
if (unique_vals == 0) {
return -1; // Unexpected error.
}
return unique_vals;
}
// Helper function for calling UnifyDuplicateAttributes<T,num_components_t>
// with the correct template arguments.
// Returns the number of unique attribute values.
template <typename T>
AttributeValueIndex::ValueType PointAttribute::DeduplicateTypedValues(
const GeometryAttribute &in_att, AttributeValueIndex in_att_offset) {
// Select the correct method to call based on the number of attribute
// components.
switch (in_att.num_components()) {
case 1:
return DeduplicateFormattedValues<T, 1>(in_att, in_att_offset);
case 2:
return DeduplicateFormattedValues<T, 2>(in_att, in_att_offset);
case 3:
return DeduplicateFormattedValues<T, 3>(in_att, in_att_offset);
case 4:
return DeduplicateFormattedValues<T, 4>(in_att, in_att_offset);
default:
return 0;
}
}
template <typename T, int num_components_t>
AttributeValueIndex::ValueType PointAttribute::DeduplicateFormattedValues(
const GeometryAttribute &in_att, AttributeValueIndex in_att_offset) {
// We want to detect duplicates using a hash map but we cannot hash floating
// point numbers directly so bit-copy floats to the same sized integers and
// hash them.
// First we need to determine which int type to use (1, 2, 4 or 8 bytes).
// Note, this is done at compile time using std::conditional struct.
// Conditional is in form <bool-expression, true, false>. If bool-expression
// is true the "true" branch is used and vice versa. All at compile time.
typedef conditional_t<sizeof(T) == 1, uint8_t,
conditional_t<sizeof(T) == 2, uint16_t,
conditional_t<sizeof(T) == 4, uint32_t,
/*else*/ uint64_t>>>
HashType;
AttributeValueIndex unique_vals(0);
typedef std::array<T, num_components_t> AttributeValue;
typedef std::array<HashType, num_components_t> AttributeHashableValue;
typedef unordered_map<AttributeHashableValue, AttributeValueIndex,
HashArray<AttributeHashableValue>>
ValueToIndexMap;
// Hash map storing index of the first attribute with a given value.
ValueToIndexMap value_to_index_map;
AttributeValue att_value;
AttributeHashableValue hashable_value;
IndexTypeVector<AttributeValueIndex, AttributeValueIndex> value_map(
num_unique_entries_);
for (AttributeValueIndex i(0); i < num_unique_entries_; ++i) {
const AttributeValueIndex att_pos = i + in_att_offset;
att_value = in_att.GetValue<T, num_components_t>(att_pos);
// Convert the value to hashable type. Bit-copy real attributes to integers.
memcpy(&(hashable_value[0]), &(att_value[0]), sizeof(att_value));
typename ValueToIndexMap::iterator it;
bool inserted;
std::tie(it, inserted) = value_to_index_map.insert(
std::pair<AttributeHashableValue, AttributeValueIndex>(hashable_value,
unique_vals));
// Try to update the hash map with a new entry pointing to the latest unique
// vertex index.
if (!inserted) {
// Duplicated value found. Update index mapping.
value_map[i] = it->second;
} else {
// New unique value.
SetAttributeValue(unique_vals, &att_value);
// Update index mapping.
value_map[i] = unique_vals;
++unique_vals;
}
}
if (unique_vals == num_unique_entries_) {
return unique_vals.value(); // Nothing has changed.
}
if (is_mapping_identity()) {
// Change identity mapping to the explicit one.
// The number of points is equal to the number of old unique values.
SetExplicitMapping(num_unique_entries_);
// Update the explicit map.
for (uint32_t i = 0; i < num_unique_entries_; ++i) {
SetPointMapEntry(PointIndex(i), value_map[AttributeValueIndex(i)]);
}
} else {
// Update point to value map using the mapping between old and new values.
for (PointIndex i(0); i < static_cast<uint32_t>(indices_map_.size()); ++i) {
SetPointMapEntry(i, value_map[indices_map_[i]]);
}
}
num_unique_entries_ = unique_vals.value();
return num_unique_entries_;
}
#endif
#ifdef DRACO_TRANSCODER_SUPPORTED
void PointAttribute::RemoveUnusedValues() {
if (is_mapping_identity()) {
return; // For identity mapping, all values are always used.
}
// For explicit mapping we need to check if any point is mapped to a value.
// If not we can delete the value.
IndexTypeVector<AttributeValueIndex, bool> is_value_used(size(), false);
int num_used_values = 0;
for (PointIndex pi(0); pi < indices_map_.size(); ++pi) {
const AttributeValueIndex avi = indices_map_[pi];
if (!is_value_used[avi]) {
is_value_used[avi] = true;
num_used_values++;
}
}
if (num_used_values == size()) {
return; // All values are used.
}
// Remap the values and update the point to value mapping.
IndexTypeVector<AttributeValueIndex, AttributeValueIndex>
old_to_new_value_map(size(), kInvalidAttributeValueIndex);
AttributeValueIndex new_avi(0);
for (AttributeValueIndex avi(0); avi < size(); ++avi) {
if (!is_value_used[avi]) {
continue;
}
if (avi != new_avi) {
SetAttributeValue(new_avi, GetAddress(avi));
}
old_to_new_value_map[avi] = new_avi++;
}
// Remap all points to the new attribute values.
for (PointIndex pi(0); pi < indices_map_.size(); ++pi) {
indices_map_[pi] = old_to_new_value_map[indices_map_[pi]];
}
num_unique_entries_ = num_used_values;
}
#endif
} // namespace draco

View File

@ -0,0 +1,196 @@
// Copyright 2016 The Draco Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
#ifndef DRACO_ATTRIBUTES_POINT_ATTRIBUTE_H_
#define DRACO_ATTRIBUTES_POINT_ATTRIBUTE_H_
#include <memory>
#include "draco/attributes/attribute_transform_data.h"
#include "draco/attributes/geometry_attribute.h"
#include "draco/core/draco_index_type_vector.h"
#include "draco/core/hash_utils.h"
#include "draco/core/macros.h"
#include "draco/draco_features.h"
namespace draco {
// Class for storing point specific data about each attribute. In general,
// multiple points stored in a point cloud can share the same attribute value
// and this class provides the necessary mapping between point ids and attribute
// value ids.
class PointAttribute : public GeometryAttribute {
public:
PointAttribute();
explicit PointAttribute(const GeometryAttribute &att);
// Make sure the move constructor is defined (needed for better performance
// when new attributes are added to PointCloud).
PointAttribute(PointAttribute &&attribute) = default;
PointAttribute &operator=(PointAttribute &&attribute) = default;
// Initializes a point attribute. By default the attribute will be set to
// identity mapping between point indices and attribute values. To set custom
// mapping use SetExplicitMapping() function.
void Init(Type attribute_type, int8_t num_components, DataType data_type,
bool normalized, size_t num_attribute_values);
// Copies attribute data from the provided |src_att| attribute.
void CopyFrom(const PointAttribute &src_att);
// Prepares the attribute storage for the specified number of entries.
bool Reset(size_t num_attribute_values);
size_t size() const { return num_unique_entries_; }
AttributeValueIndex mapped_index(PointIndex point_index) const {
if (identity_mapping_) {
return AttributeValueIndex(point_index.value());
}
return indices_map_[point_index];
}
DataBuffer *buffer() const { return attribute_buffer_.get(); }
bool is_mapping_identity() const { return identity_mapping_; }
size_t indices_map_size() const {
if (is_mapping_identity()) {
return 0;
}
return indices_map_.size();
}
const uint8_t *GetAddressOfMappedIndex(PointIndex point_index) const {
return GetAddress(mapped_index(point_index));
}
// Sets the new number of unique attribute entries for the attribute. The
// function resizes the attribute storage to hold |num_attribute_values|
// entries.
// All previous entries with AttributeValueIndex < |num_attribute_values|
// are preserved. Caller needs to ensure that the PointAttribute is still
// valid after the resizing operation (that is, each point is mapped to a
// valid attribute value).
void Resize(size_t new_num_unique_entries);
// Functions for setting the type of mapping between point indices and
// attribute entry ids.
// This function sets the mapping to implicit, where point indices are equal
// to attribute entry indices.
void SetIdentityMapping() {
identity_mapping_ = true;
indices_map_.clear();
}
// This function sets the mapping to be explicitly using the indices_map_
// array that needs to be initialized by the caller.
void SetExplicitMapping(size_t num_points) {
identity_mapping_ = false;
indices_map_.resize(num_points, kInvalidAttributeValueIndex);
}
// Set an explicit map entry for a specific point index.
void SetPointMapEntry(PointIndex point_index,
AttributeValueIndex entry_index) {
DRACO_DCHECK(!identity_mapping_);
indices_map_[point_index] = entry_index;
}
// Same as GeometryAttribute::GetValue(), but using point id as the input.
// Mapping to attribute value index is performed automatically.
void GetMappedValue(PointIndex point_index, void *out_data) const {
return GetValue(mapped_index(point_index), out_data);
}
#ifdef DRACO_ATTRIBUTE_VALUES_DEDUPLICATION_SUPPORTED
// Deduplicate |in_att| values into |this| attribute. |in_att| can be equal
// to |this|.
// Returns -1 if the deduplication failed.
AttributeValueIndex::ValueType DeduplicateValues(
const GeometryAttribute &in_att);
// Same as above but the values read from |in_att| are sampled with the
// provided offset |in_att_offset|.
AttributeValueIndex::ValueType DeduplicateValues(
const GeometryAttribute &in_att, AttributeValueIndex in_att_offset);
#endif
// Set attribute transform data for the attribute. The data is used to store
// the type and parameters of the transform that is applied on the attribute
// data (optional).
void SetAttributeTransformData(
std::unique_ptr<AttributeTransformData> transform_data) {
attribute_transform_data_ = std::move(transform_data);
}
const AttributeTransformData *GetAttributeTransformData() const {
return attribute_transform_data_.get();
}
#ifdef DRACO_TRANSCODER_SUPPORTED
// Removes unused values from the attribute. Value is unused when no point
// is mapped to the value. Only applicable when the mapping is not identity.
void RemoveUnusedValues();
#endif
private:
#ifdef DRACO_ATTRIBUTE_VALUES_DEDUPLICATION_SUPPORTED
template <typename T>
AttributeValueIndex::ValueType DeduplicateTypedValues(
const GeometryAttribute &in_att, AttributeValueIndex in_att_offset);
template <typename T, int COMPONENTS_COUNT>
AttributeValueIndex::ValueType DeduplicateFormattedValues(
const GeometryAttribute &in_att, AttributeValueIndex in_att_offset);
#endif
// Data storage for attribute values. GeometryAttribute itself doesn't own its
// buffer so we need to allocate it here.
std::unique_ptr<DataBuffer> attribute_buffer_;
// Mapping between point ids and attribute value ids.
IndexTypeVector<PointIndex, AttributeValueIndex> indices_map_;
AttributeValueIndex::ValueType num_unique_entries_;
// Flag when the mapping between point ids and attribute values is identity.
bool identity_mapping_;
// If an attribute contains transformed data (e.g. quantized), we can specify
// the attribute transform here and use it to transform the attribute back to
// its original format.
std::unique_ptr<AttributeTransformData> attribute_transform_data_;
friend struct PointAttributeHasher;
};
// Hash functor for the PointAttribute class.
struct PointAttributeHasher {
size_t operator()(const PointAttribute &attribute) const {
GeometryAttributeHasher base_hasher;
size_t hash = base_hasher(attribute);
hash = HashCombine(attribute.identity_mapping_, hash);
hash = HashCombine(attribute.num_unique_entries_, hash);
hash = HashCombine(attribute.indices_map_.size(), hash);
if (!attribute.indices_map_.empty()) {
const uint64_t indices_hash = FingerprintString(
reinterpret_cast<const char *>(attribute.indices_map_.data()),
attribute.indices_map_.size());
hash = HashCombine(indices_hash, hash);
}
if (attribute.attribute_buffer_ != nullptr) {
const uint64_t buffer_hash = FingerprintString(
reinterpret_cast<const char *>(attribute.attribute_buffer_->data()),
attribute.attribute_buffer_->data_size());
hash = HashCombine(buffer_hash, hash);
}
return hash;
}
};
} // namespace draco
#endif // DRACO_ATTRIBUTES_POINT_ATTRIBUTE_H_

View File

@ -0,0 +1,128 @@
// Copyright 2017 The Draco Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
#include "draco/attributes/point_attribute.h"
#include "draco/core/draco_test_base.h"
namespace {
class PointAttributeTest : public ::testing::Test {
protected:
PointAttributeTest() {}
};
TEST_F(PointAttributeTest, TestCopy) {
// This test verifies that PointAttribute can copy data from another point
// attribute.
draco::PointAttribute pa;
pa.Init(draco::GeometryAttribute::POSITION, 1, draco::DT_INT32, false, 10);
for (int32_t i = 0; i < 10; ++i) {
pa.SetAttributeValue(draco::AttributeValueIndex(i), &i);
}
pa.set_unique_id(12);
draco::PointAttribute other_pa;
other_pa.CopyFrom(pa);
draco::PointAttributeHasher hasher;
ASSERT_EQ(hasher(pa), hasher(other_pa));
ASSERT_EQ(pa.unique_id(), other_pa.unique_id());
// The hash function does not actually compute the hash from attribute values,
// so ensure the data got copied correctly as well.
for (int32_t i = 0; i < 10; ++i) {
int32_t data;
other_pa.GetValue(draco::AttributeValueIndex(i), &data);
ASSERT_EQ(data, i);
}
}
TEST_F(PointAttributeTest, TestGetValueFloat) {
draco::PointAttribute pa;
pa.Init(draco::GeometryAttribute::POSITION, 3, draco::DT_FLOAT32, false, 5);
float points[3];
for (int32_t i = 0; i < 5; ++i) {
points[0] = i * 3.0;
points[1] = (i * 3.0) + 1.0;
points[2] = (i * 3.0) + 2.0;
pa.SetAttributeValue(draco::AttributeValueIndex(i), &points);
}
for (int32_t i = 0; i < 5; ++i) {
pa.GetValue(draco::AttributeValueIndex(i), &points);
ASSERT_FLOAT_EQ(points[0], i * 3.0);
ASSERT_FLOAT_EQ(points[1], (i * 3.0) + 1.0);
ASSERT_FLOAT_EQ(points[2], (i * 3.0) + 2.0);
}
}
TEST_F(PointAttributeTest, TestGetArray) {
draco::PointAttribute pa;
pa.Init(draco::GeometryAttribute::POSITION, 3, draco::DT_FLOAT32, false, 5);
float points[3];
for (int32_t i = 0; i < 5; ++i) {
points[0] = i * 3.0;
points[1] = (i * 3.0) + 1.0;
points[2] = (i * 3.0) + 2.0;
pa.SetAttributeValue(draco::AttributeValueIndex(i), &points);
}
for (int32_t i = 0; i < 5; ++i) {
std::array<float, 3> att_value;
att_value = pa.GetValue<float, 3>(draco::AttributeValueIndex(i));
ASSERT_FLOAT_EQ(att_value[0], i * 3.0);
ASSERT_FLOAT_EQ(att_value[1], (i * 3.0) + 1.0);
ASSERT_FLOAT_EQ(att_value[2], (i * 3.0) + 2.0);
}
for (int32_t i = 0; i < 5; ++i) {
std::array<float, 3> att_value;
EXPECT_TRUE(
(pa.GetValue<float, 3>(draco::AttributeValueIndex(i), &att_value)));
ASSERT_FLOAT_EQ(att_value[0], i * 3.0);
ASSERT_FLOAT_EQ(att_value[1], (i * 3.0) + 1.0);
ASSERT_FLOAT_EQ(att_value[2], (i * 3.0) + 2.0);
}
}
TEST_F(PointAttributeTest, TestArrayReadError) {
draco::PointAttribute pa;
pa.Init(draco::GeometryAttribute::POSITION, 3, draco::DT_FLOAT32, false, 5);
float points[3];
for (int32_t i = 0; i < 5; ++i) {
points[0] = i * 3.0;
points[1] = (i * 3.0) + 1.0;
points[2] = (i * 3.0) + 2.0;
pa.SetAttributeValue(draco::AttributeValueIndex(i), &points);
}
std::array<float, 3> att_value;
EXPECT_FALSE(
(pa.GetValue<float, 3>(draco::AttributeValueIndex(5), &att_value)));
}
TEST_F(PointAttributeTest, TestResize) {
draco::PointAttribute pa;
pa.Init(draco::GeometryAttribute::POSITION, 3, draco::DT_FLOAT32, false, 5);
ASSERT_EQ(pa.size(), 5);
ASSERT_EQ(pa.buffer()->data_size(), 4 * 3 * 5);
pa.Resize(10);
ASSERT_EQ(pa.size(), 10);
ASSERT_EQ(pa.buffer()->data_size(), 4 * 3 * 10);
}
} // namespace

View File

@ -0,0 +1,127 @@
// Copyright 2016 The Draco Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
#include "draco/compression/attributes/attributes_decoder.h"
#include "draco/core/varint_decoding.h"
namespace draco {
AttributesDecoder::AttributesDecoder()
: point_cloud_decoder_(nullptr), point_cloud_(nullptr) {}
bool AttributesDecoder::Init(PointCloudDecoder *decoder, PointCloud *pc) {
point_cloud_decoder_ = decoder;
point_cloud_ = pc;
return true;
}
bool AttributesDecoder::DecodeAttributesDecoderData(DecoderBuffer *in_buffer) {
// Decode and create attributes.
uint32_t num_attributes;
#ifdef DRACO_BACKWARDS_COMPATIBILITY_SUPPORTED
if (point_cloud_decoder_->bitstream_version() <
DRACO_BITSTREAM_VERSION(2, 0)) {
if (!in_buffer->Decode(&num_attributes)) {
return false;
}
} else
#endif
{
if (!DecodeVarint(&num_attributes, in_buffer)) {
return false;
}
}
// Check that decoded number of attributes is valid.
if (num_attributes == 0) {
return false;
}
if (num_attributes > 5 * in_buffer->remaining_size()) {
// The decoded number of attributes is unreasonably high, because at least
// five bytes of attribute descriptor data per attribute are expected.
return false;
}
// Decode attribute descriptor data.
point_attribute_ids_.resize(num_attributes);
PointCloud *pc = point_cloud_;
for (uint32_t i = 0; i < num_attributes; ++i) {
// Decode attribute descriptor data.
uint8_t att_type, data_type, num_components, normalized;
if (!in_buffer->Decode(&att_type)) {
return false;
}
if (!in_buffer->Decode(&data_type)) {
return false;
}
if (!in_buffer->Decode(&num_components)) {
return false;
}
if (!in_buffer->Decode(&normalized)) {
return false;
}
if (att_type >= GeometryAttribute::NAMED_ATTRIBUTES_COUNT) {
return false;
}
if (data_type == DT_INVALID || data_type >= DT_TYPES_COUNT) {
return false;
}
// Check decoded attribute descriptor data.
if (num_components == 0) {
return false;
}
// Add the attribute to the point cloud.
const DataType draco_dt = static_cast<DataType>(data_type);
GeometryAttribute ga;
ga.Init(static_cast<GeometryAttribute::Type>(att_type), nullptr,
num_components, draco_dt, normalized > 0,
DataTypeLength(draco_dt) * num_components, 0);
uint32_t unique_id;
#ifdef DRACO_BACKWARDS_COMPATIBILITY_SUPPORTED
if (point_cloud_decoder_->bitstream_version() <
DRACO_BITSTREAM_VERSION(1, 3)) {
uint16_t custom_id;
if (!in_buffer->Decode(&custom_id)) {
return false;
}
// TODO(draco-eng): Add "custom_id" to attribute metadata.
unique_id = static_cast<uint32_t>(custom_id);
ga.set_unique_id(unique_id);
} else
#endif
{
if (!DecodeVarint(&unique_id, in_buffer)) {
return false;
}
ga.set_unique_id(unique_id);
}
const int att_id = pc->AddAttribute(
std::unique_ptr<PointAttribute>(new PointAttribute(ga)));
pc->attribute(att_id)->set_unique_id(unique_id);
point_attribute_ids_[i] = att_id;
// Update the inverse map.
if (att_id >=
static_cast<int32_t>(point_attribute_to_local_id_map_.size())) {
point_attribute_to_local_id_map_.resize(att_id + 1, -1);
}
point_attribute_to_local_id_map_[att_id] = i;
}
return true;
}
} // namespace draco

View File

@ -0,0 +1,97 @@
// Copyright 2016 The Draco Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
#ifndef DRACO_COMPRESSION_ATTRIBUTES_ATTRIBUTES_DECODER_H_
#define DRACO_COMPRESSION_ATTRIBUTES_ATTRIBUTES_DECODER_H_
#include <vector>
#include "draco/compression/attributes/attributes_decoder_interface.h"
#include "draco/compression/point_cloud/point_cloud_decoder.h"
#include "draco/core/decoder_buffer.h"
#include "draco/draco_features.h"
#include "draco/point_cloud/point_cloud.h"
namespace draco {
// Base class for decoding one or more attributes that were encoded with a
// matching AttributesEncoder. It is a basic implementation of
// AttributesDecoderInterface that provides functionality that is shared between
// all AttributesDecoders.
class AttributesDecoder : public AttributesDecoderInterface {
public:
AttributesDecoder();
virtual ~AttributesDecoder() = default;
// Called after all attribute decoders are created. It can be used to perform
// any custom initialization.
bool Init(PointCloudDecoder *decoder, PointCloud *pc) override;
// Decodes any attribute decoder specific data from the |in_buffer|.
bool DecodeAttributesDecoderData(DecoderBuffer *in_buffer) override;
int32_t GetAttributeId(int i) const override {
return point_attribute_ids_[i];
}
int32_t GetNumAttributes() const override {
return static_cast<int32_t>(point_attribute_ids_.size());
}
PointCloudDecoder *GetDecoder() const override {
return point_cloud_decoder_;
}
// Decodes attribute data from the source buffer.
bool DecodeAttributes(DecoderBuffer *in_buffer) override {
if (!DecodePortableAttributes(in_buffer)) {
return false;
}
if (!DecodeDataNeededByPortableTransforms(in_buffer)) {
return false;
}
if (!TransformAttributesToOriginalFormat()) {
return false;
}
return true;
}
protected:
int32_t GetLocalIdForPointAttribute(int32_t point_attribute_id) const {
const int id_map_size =
static_cast<int>(point_attribute_to_local_id_map_.size());
if (point_attribute_id >= id_map_size) {
return -1;
}
return point_attribute_to_local_id_map_[point_attribute_id];
}
virtual bool DecodePortableAttributes(DecoderBuffer *in_buffer) = 0;
virtual bool DecodeDataNeededByPortableTransforms(DecoderBuffer *in_buffer) {
return true;
}
virtual bool TransformAttributesToOriginalFormat() { return true; }
private:
// List of attribute ids that need to be decoded with this decoder.
std::vector<int32_t> point_attribute_ids_;
// Map between point attribute id and the local id (i.e., the inverse of the
// |point_attribute_ids_|.
std::vector<int32_t> point_attribute_to_local_id_map_;
PointCloudDecoder *point_cloud_decoder_;
PointCloud *point_cloud_;
};
} // namespace draco
#endif // DRACO_COMPRESSION_ATTRIBUTES_ATTRIBUTES_DECODER_H_

View File

@ -0,0 +1,62 @@
// Copyright 2017 The Draco Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
#ifndef DRACO_COMPRESSION_ATTRIBUTES_ATTRIBUTES_DECODER_INTERFACE_H_
#define DRACO_COMPRESSION_ATTRIBUTES_ATTRIBUTES_DECODER_INTERFACE_H_
#include <vector>
#include "draco/core/decoder_buffer.h"
#include "draco/point_cloud/point_cloud.h"
namespace draco {
class PointCloudDecoder;
// Interface class for decoding one or more attributes that were encoded with a
// matching AttributesEncoder. It provides only the basic interface
// that is used by the PointCloudDecoder. The actual decoding must be
// implemented in derived classes using the DecodeAttributes() method.
class AttributesDecoderInterface {
public:
AttributesDecoderInterface() = default;
virtual ~AttributesDecoderInterface() = default;
// Called after all attribute decoders are created. It can be used to perform
// any custom initialization.
virtual bool Init(PointCloudDecoder *decoder, PointCloud *pc) = 0;
// Decodes any attribute decoder specific data from the |in_buffer|.
virtual bool DecodeAttributesDecoderData(DecoderBuffer *in_buffer) = 0;
// Decode attribute data from the source buffer. Needs to be implemented by
// the derived classes.
virtual bool DecodeAttributes(DecoderBuffer *in_buffer) = 0;
virtual int32_t GetAttributeId(int i) const = 0;
virtual int32_t GetNumAttributes() const = 0;
virtual PointCloudDecoder *GetDecoder() const = 0;
// Returns an attribute containing data processed by the attribute transform.
// (see TransformToPortableFormat() method). This data is guaranteed to be
// same for encoder and decoder and it can be used by predictors.
virtual const PointAttribute *GetPortableAttribute(
int32_t /* point_attribute_id */) {
return nullptr;
}
};
} // namespace draco
#endif // DRACO_COMPRESSION_ATTRIBUTES_ATTRIBUTES_DECODER_INTERFACE_H_

View File

@ -0,0 +1,59 @@
// Copyright 2016 The Draco Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
#include "draco/compression/attributes/attributes_encoder.h"
#include "draco/core/varint_encoding.h"
#include "draco/draco_features.h"
namespace draco {
AttributesEncoder::AttributesEncoder()
: point_cloud_encoder_(nullptr), point_cloud_(nullptr) {}
AttributesEncoder::AttributesEncoder(int point_attrib_id)
: AttributesEncoder() {
AddAttributeId(point_attrib_id);
}
bool AttributesEncoder::Init(PointCloudEncoder *encoder, const PointCloud *pc) {
point_cloud_encoder_ = encoder;
point_cloud_ = pc;
return true;
}
bool AttributesEncoder::EncodeAttributesEncoderData(EncoderBuffer *out_buffer) {
// Encode data about all attributes.
EncodeVarint(num_attributes(), out_buffer);
for (uint32_t i = 0; i < num_attributes(); ++i) {
const int32_t att_id = point_attribute_ids_[i];
const PointAttribute *const pa = point_cloud_->attribute(att_id);
GeometryAttribute::Type type = pa->attribute_type();
#ifdef DRACO_TRANSCODER_SUPPORTED
// Attribute types TANGENT, MATERIAL, JOINTS, and WEIGHTS are not supported
// in the official bitstream. They will be encoded as GENERIC.
if (type > GeometryAttribute::GENERIC) {
type = GeometryAttribute::GENERIC;
}
#endif
out_buffer->Encode(static_cast<uint8_t>(type));
out_buffer->Encode(static_cast<uint8_t>(pa->data_type()));
out_buffer->Encode(static_cast<uint8_t>(pa->num_components()));
out_buffer->Encode(static_cast<uint8_t>(pa->normalized()));
EncodeVarint(pa->unique_id(), out_buffer);
}
return true;
}
} // namespace draco

View File

@ -0,0 +1,154 @@
// Copyright 2016 The Draco Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
#ifndef DRACO_COMPRESSION_ATTRIBUTES_ATTRIBUTES_ENCODER_H_
#define DRACO_COMPRESSION_ATTRIBUTES_ATTRIBUTES_ENCODER_H_
#include "draco/attributes/point_attribute.h"
#include "draco/core/encoder_buffer.h"
#include "draco/point_cloud/point_cloud.h"
namespace draco {
class PointCloudEncoder;
// Base class for encoding one or more attributes of a PointCloud (or other
// geometry). This base class provides only the basic interface that is used
// by the PointCloudEncoder.
class AttributesEncoder {
public:
AttributesEncoder();
// Constructs an attribute encoder associated with a given point attribute.
explicit AttributesEncoder(int point_attrib_id);
virtual ~AttributesEncoder() = default;
// Called after all attribute encoders are created. It can be used to perform
// any custom initialization, including setting up attribute dependencies.
// Note: no data should be encoded in this function, because the decoder may
// process encoders in a different order from the decoder.
virtual bool Init(PointCloudEncoder *encoder, const PointCloud *pc);
// Encodes data needed by the target attribute decoder.
virtual bool EncodeAttributesEncoderData(EncoderBuffer *out_buffer);
// Returns a unique identifier of the given encoder type, that is used during
// decoding to construct the corresponding attribute decoder.
virtual uint8_t GetUniqueId() const = 0;
// Encode attribute data to the target buffer.
virtual bool EncodeAttributes(EncoderBuffer *out_buffer) {
if (!TransformAttributesToPortableFormat()) {
return false;
}
if (!EncodePortableAttributes(out_buffer)) {
return false;
}
// Encode data needed by portable transforms after the attribute is encoded.
// This corresponds to the order in which the data is going to be decoded by
// the decoder.
if (!EncodeDataNeededByPortableTransforms(out_buffer)) {
return false;
}
return true;
}
// Returns the number of attributes that need to be encoded before the
// specified attribute is encoded.
// Note that the attribute is specified by its point attribute id.
virtual int NumParentAttributes(int32_t /* point_attribute_id */) const {
return 0;
}
virtual int GetParentAttributeId(int32_t /* point_attribute_id */,
int32_t /* parent_i */) const {
return -1;
}
// Marks a given attribute as a parent of another attribute.
virtual bool MarkParentAttribute(int32_t /* point_attribute_id */) {
return false;
}
// Returns an attribute containing data processed by the attribute transform.
// (see TransformToPortableFormat() method). This data is guaranteed to be
// encoded losslessly and it can be safely used for predictors.
virtual const PointAttribute *GetPortableAttribute(
int32_t /* point_attribute_id */) {
return nullptr;
}
void AddAttributeId(int32_t id) {
point_attribute_ids_.push_back(id);
if (id >= static_cast<int32_t>(point_attribute_to_local_id_map_.size())) {
point_attribute_to_local_id_map_.resize(id + 1, -1);
}
point_attribute_to_local_id_map_[id] =
static_cast<int32_t>(point_attribute_ids_.size()) - 1;
}
// Sets new attribute point ids (replacing the existing ones).
void SetAttributeIds(const std::vector<int32_t> &point_attribute_ids) {
point_attribute_ids_.clear();
point_attribute_to_local_id_map_.clear();
for (int32_t att_id : point_attribute_ids) {
AddAttributeId(att_id);
}
}
int32_t GetAttributeId(int i) const { return point_attribute_ids_[i]; }
uint32_t num_attributes() const {
return static_cast<uint32_t>(point_attribute_ids_.size());
}
PointCloudEncoder *encoder() const { return point_cloud_encoder_; }
protected:
// Transforms the input attribute data into a form that should be losslessly
// encoded (transform itself can be lossy).
virtual bool TransformAttributesToPortableFormat() { return true; }
// Losslessly encodes data of all portable attributes.
// Precondition: All attributes must have been transformed into portable
// format at this point (see TransformAttributesToPortableFormat() method).
virtual bool EncodePortableAttributes(EncoderBuffer *out_buffer) = 0;
// Encodes any data needed to revert the transform to portable format for each
// attribute (e.g. data needed for dequantization of quantized values).
virtual bool EncodeDataNeededByPortableTransforms(EncoderBuffer *out_buffer) {
return true;
}
int32_t GetLocalIdForPointAttribute(int32_t point_attribute_id) const {
const int id_map_size =
static_cast<int>(point_attribute_to_local_id_map_.size());
if (point_attribute_id >= id_map_size) {
return -1;
}
return point_attribute_to_local_id_map_[point_attribute_id];
}
private:
// List of attribute ids that need to be encoded with this encoder.
std::vector<int32_t> point_attribute_ids_;
// Map between point attribute id and the local id (i.e., the inverse of the
// |point_attribute_ids_|.
std::vector<int32_t> point_attribute_to_local_id_map_;
PointCloudEncoder *point_cloud_encoder_;
const PointCloud *point_cloud_;
};
} // namespace draco
#endif // DRACO_COMPRESSION_ATTRIBUTES_ATTRIBUTES_ENCODER_H_

View File

@ -0,0 +1,581 @@
// Copyright 2016 The Draco Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
#include "draco/compression/attributes/kd_tree_attributes_decoder.h"
#include "draco/compression/attributes/kd_tree_attributes_shared.h"
#include "draco/compression/point_cloud/algorithms/dynamic_integer_points_kd_tree_decoder.h"
#include "draco/compression/point_cloud/algorithms/float_points_tree_decoder.h"
#include "draco/compression/point_cloud/point_cloud_decoder.h"
#include "draco/core/draco_types.h"
#include "draco/core/varint_decoding.h"
namespace draco {
// attribute, offset_dimensionality, data_type, data_size, num_components
using AttributeTuple =
std::tuple<PointAttribute *, uint32_t, DataType, uint32_t, uint32_t>;
// Output iterator that is used to decode values directly into the data buffer
// of the modified PointAttribute.
// The extension of this iterator beyond the DT_UINT32 concerns itself only with
// the size of the data for efficiency, not the type. DataType is conveyed in
// but is an unused field populated for any future logic/special casing.
// DT_UINT32 and all other 4-byte types are naturally supported from the size of
// data in the kd tree encoder. DT_UINT16 and DT_UINT8 are supported by way
// of byte copies into a temporary memory buffer.
template <class CoeffT>
class PointAttributeVectorOutputIterator {
typedef PointAttributeVectorOutputIterator<CoeffT> Self;
public:
PointAttributeVectorOutputIterator(
PointAttributeVectorOutputIterator &&that) = default;
explicit PointAttributeVectorOutputIterator(
const std::vector<AttributeTuple> &atts)
: attributes_(atts), point_id_(0) {
DRACO_DCHECK_GE(atts.size(), 1);
uint32_t required_decode_bytes = 0;
for (auto index = 0; index < attributes_.size(); index++) {
const AttributeTuple &att = attributes_[index];
required_decode_bytes = (std::max)(required_decode_bytes,
std::get<3>(att) * std::get<4>(att));
}
memory_.resize(required_decode_bytes);
data_ = memory_.data();
}
const Self &operator++() {
++point_id_;
return *this;
}
// We do not want to do ANY copying of this constructor so this particular
// operator is disabled for performance reasons.
// Self operator++(int) {
// Self copy = *this;
// ++point_id_;
// return copy;
// }
Self &operator*() { return *this; }
// Still needed in some cases.
// TODO(b/199760123): Remove.
// hardcoded to 3 based on legacy usage.
const Self &operator=(const VectorD<CoeffT, 3> &val) {
DRACO_DCHECK_EQ(attributes_.size(), 1); // Expect only ONE attribute.
AttributeTuple &att = attributes_[0];
PointAttribute *attribute = std::get<0>(att);
const AttributeValueIndex avi = attribute->mapped_index(point_id_);
if (avi >= static_cast<uint32_t>(attribute->size())) {
return *this;
}
const uint32_t &offset = std::get<1>(att);
DRACO_DCHECK_EQ(offset, 0); // expected to be zero
attribute->SetAttributeValue(avi, &val[0] + offset);
return *this;
}
// Additional operator taking std::vector as argument.
const Self &operator=(const std::vector<CoeffT> &val) {
for (auto index = 0; index < attributes_.size(); index++) {
AttributeTuple &att = attributes_[index];
PointAttribute *attribute = std::get<0>(att);
const AttributeValueIndex avi = attribute->mapped_index(point_id_);
if (avi >= static_cast<uint32_t>(attribute->size())) {
return *this;
}
const uint32_t &offset = std::get<1>(att);
const uint32_t &data_size = std::get<3>(att);
const uint32_t &num_components = std::get<4>(att);
const uint32_t *data_source = val.data() + offset;
if (data_size < 4) { // handle uint16_t, uint8_t
// selectively copy data bytes
uint8_t *data_counter = data_;
for (uint32_t index = 0; index < num_components;
index += 1, data_counter += data_size) {
std::memcpy(data_counter, data_source + index, data_size);
}
// redirect to copied data
data_source = reinterpret_cast<uint32_t *>(data_);
}
attribute->SetAttributeValue(avi, data_source);
}
return *this;
}
private:
// preallocated memory for buffering different data sizes. Never reallocated.
std::vector<uint8_t> memory_;
uint8_t *data_;
std::vector<AttributeTuple> attributes_;
PointIndex point_id_;
// NO COPY
PointAttributeVectorOutputIterator(
const PointAttributeVectorOutputIterator &that) = delete;
PointAttributeVectorOutputIterator &operator=(
PointAttributeVectorOutputIterator const &) = delete;
};
KdTreeAttributesDecoder::KdTreeAttributesDecoder() {}
bool KdTreeAttributesDecoder::DecodePortableAttributes(
DecoderBuffer *in_buffer) {
if (in_buffer->bitstream_version() < DRACO_BITSTREAM_VERSION(2, 3)) {
// Old bitstream does everything in the
// DecodeDataNeededByPortableTransforms() method.
return true;
}
uint8_t compression_level = 0;
if (!in_buffer->Decode(&compression_level)) {
return false;
}
const int32_t num_points = GetDecoder()->point_cloud()->num_points();
// Decode data using the kd tree decoding into integer (portable) attributes.
// We first need to go over all attributes and create a new portable storage
// for those attributes that need it (floating point attributes that have to
// be dequantized after decoding).
const int num_attributes = GetNumAttributes();
uint32_t total_dimensionality = 0; // position is a required dimension
std::vector<AttributeTuple> atts(num_attributes);
for (int i = 0; i < GetNumAttributes(); ++i) {
const int att_id = GetAttributeId(i);
PointAttribute *const att = GetDecoder()->point_cloud()->attribute(att_id);
// All attributes have the same number of values and identity mapping
// between PointIndex and AttributeValueIndex.
att->Reset(num_points);
att->SetIdentityMapping();
PointAttribute *target_att = nullptr;
if (att->data_type() == DT_UINT32 || att->data_type() == DT_UINT16 ||
att->data_type() == DT_UINT8) {
// We can decode to these attributes directly.
target_att = att;
} else if (att->data_type() == DT_INT32 || att->data_type() == DT_INT16 ||
att->data_type() == DT_INT8) {
// Prepare storage for data that is used to convert unsigned values back
// to the signed ones.
for (int c = 0; c < att->num_components(); ++c) {
min_signed_values_.push_back(0);
}
target_att = att;
} else if (att->data_type() == DT_FLOAT32) {
// Create a portable attribute that will hold the decoded data. We will
// dequantize the decoded data to the final attribute later on.
const int num_components = att->num_components();
GeometryAttribute va;
va.Init(att->attribute_type(), nullptr, num_components, DT_UINT32, false,
num_components * DataTypeLength(DT_UINT32), 0);
std::unique_ptr<PointAttribute> port_att(new PointAttribute(va));
port_att->SetIdentityMapping();
port_att->Reset(num_points);
quantized_portable_attributes_.push_back(std::move(port_att));
target_att = quantized_portable_attributes_.back().get();
} else {
// Unsupported type.
return false;
}
// Add attribute to the output iterator used by the core algorithm.
const DataType data_type = target_att->data_type();
const uint32_t data_size = (std::max)(0, DataTypeLength(data_type));
const uint32_t num_components = target_att->num_components();
atts[i] = std::make_tuple(target_att, total_dimensionality, data_type,
data_size, num_components);
total_dimensionality += num_components;
}
typedef PointAttributeVectorOutputIterator<uint32_t> OutIt;
OutIt out_it(atts);
switch (compression_level) {
case 0: {
if (!DecodePoints<0, OutIt>(total_dimensionality, num_points, in_buffer,
&out_it)) {
return false;
}
break;
}
case 1: {
if (!DecodePoints<1, OutIt>(total_dimensionality, num_points, in_buffer,
&out_it)) {
return false;
}
break;
}
case 2: {
if (!DecodePoints<2, OutIt>(total_dimensionality, num_points, in_buffer,
&out_it)) {
return false;
}
break;
}
case 3: {
if (!DecodePoints<3, OutIt>(total_dimensionality, num_points, in_buffer,
&out_it)) {
return false;
}
break;
}
case 4: {
if (!DecodePoints<4, OutIt>(total_dimensionality, num_points, in_buffer,
&out_it)) {
return false;
}
break;
}
case 5: {
if (!DecodePoints<5, OutIt>(total_dimensionality, num_points, in_buffer,
&out_it)) {
return false;
}
break;
}
case 6: {
if (!DecodePoints<6, OutIt>(total_dimensionality, num_points, in_buffer,
&out_it)) {
return false;
}
break;
}
default:
return false;
}
return true;
}
template <int level_t, typename OutIteratorT>
bool KdTreeAttributesDecoder::DecodePoints(int total_dimensionality,
int num_expected_points,
DecoderBuffer *in_buffer,
OutIteratorT *out_iterator) {
DynamicIntegerPointsKdTreeDecoder<level_t> decoder(total_dimensionality);
if (!decoder.DecodePoints(in_buffer, *out_iterator, num_expected_points) ||
decoder.num_decoded_points() != num_expected_points) {
return false;
}
return true;
}
bool KdTreeAttributesDecoder::DecodeDataNeededByPortableTransforms(
DecoderBuffer *in_buffer) {
if (in_buffer->bitstream_version() >= DRACO_BITSTREAM_VERSION(2, 3)) {
// Decode quantization data for each attribute that need it.
// TODO(ostava): This should be moved to AttributeQuantizationTransform.
std::vector<float> min_value;
for (int i = 0; i < GetNumAttributes(); ++i) {
const int att_id = GetAttributeId(i);
const PointAttribute *const att =
GetDecoder()->point_cloud()->attribute(att_id);
if (att->data_type() == DT_FLOAT32) {
const int num_components = att->num_components();
min_value.resize(num_components);
if (!in_buffer->Decode(&min_value[0], sizeof(float) * num_components)) {
return false;
}
float max_value_dif;
if (!in_buffer->Decode(&max_value_dif)) {
return false;
}
uint8_t quantization_bits;
if (!in_buffer->Decode(&quantization_bits) || quantization_bits > 31) {
return false;
}
AttributeQuantizationTransform transform;
if (!transform.SetParameters(quantization_bits, min_value.data(),
num_components, max_value_dif)) {
return false;
}
const int num_transforms =
static_cast<int>(attribute_quantization_transforms_.size());
if (!transform.TransferToAttribute(
quantized_portable_attributes_[num_transforms].get())) {
return false;
}
attribute_quantization_transforms_.push_back(transform);
}
}
// Decode transform data for signed integer attributes.
for (int i = 0; i < min_signed_values_.size(); ++i) {
int32_t val;
if (!DecodeVarint(&val, in_buffer)) {
return false;
}
min_signed_values_[i] = val;
}
return true;
}
#ifdef DRACO_BACKWARDS_COMPATIBILITY_SUPPORTED
// Handle old bitstream
// Figure out the total dimensionality of the point cloud
const uint32_t attribute_count = GetNumAttributes();
uint32_t total_dimensionality = 0; // position is a required dimension
std::vector<AttributeTuple> atts(attribute_count);
for (auto attribute_index = 0;
static_cast<uint32_t>(attribute_index) < attribute_count;
attribute_index += 1) // increment the dimensionality as needed...
{
const int att_id = GetAttributeId(attribute_index);
PointAttribute *const att = GetDecoder()->point_cloud()->attribute(att_id);
const DataType data_type = att->data_type();
const uint32_t data_size = (std::max)(0, DataTypeLength(data_type));
const uint32_t num_components = att->num_components();
if (data_size > 4) {
return false;
}
atts[attribute_index] = std::make_tuple(
att, total_dimensionality, data_type, data_size, num_components);
// everything is treated as 32bit in the encoder.
total_dimensionality += num_components;
}
const int att_id = GetAttributeId(0);
PointAttribute *const att = GetDecoder()->point_cloud()->attribute(att_id);
att->SetIdentityMapping();
// Decode method
uint8_t method;
if (!in_buffer->Decode(&method)) {
return false;
}
if (method == KdTreeAttributesEncodingMethod::kKdTreeQuantizationEncoding) {
// This method only supports one attribute with exactly three components.
if (atts.size() != 1 || std::get<4>(atts[0]) != 3) {
return false;
}
uint8_t compression_level = 0;
if (!in_buffer->Decode(&compression_level)) {
return false;
}
uint32_t num_points = 0;
if (!in_buffer->Decode(&num_points)) {
return false;
}
att->Reset(num_points);
FloatPointsTreeDecoder decoder;
decoder.set_num_points_from_header(num_points);
PointAttributeVectorOutputIterator<float> out_it(atts);
if (!decoder.DecodePointCloud(in_buffer, out_it)) {
return false;
}
} else if (method == KdTreeAttributesEncodingMethod::kKdTreeIntegerEncoding) {
uint8_t compression_level = 0;
if (!in_buffer->Decode(&compression_level)) {
return false;
}
if (6 < compression_level) {
DRACO_LOGE(
"KdTreeAttributesDecoder: compression level %i not supported.\n",
compression_level);
return false;
}
uint32_t num_points;
if (!in_buffer->Decode(&num_points)) {
return false;
}
for (auto attribute_index = 0;
static_cast<uint32_t>(attribute_index) < attribute_count;
attribute_index += 1) {
const int att_id = GetAttributeId(attribute_index);
PointAttribute *const attr =
GetDecoder()->point_cloud()->attribute(att_id);
attr->Reset(num_points);
attr->SetIdentityMapping();
}
PointAttributeVectorOutputIterator<uint32_t> out_it(atts);
switch (compression_level) {
case 0: {
DynamicIntegerPointsKdTreeDecoder<0> decoder(total_dimensionality);
if (!decoder.DecodePoints(in_buffer, out_it)) {
return false;
}
break;
}
case 1: {
DynamicIntegerPointsKdTreeDecoder<1> decoder(total_dimensionality);
if (!decoder.DecodePoints(in_buffer, out_it)) {
return false;
}
break;
}
case 2: {
DynamicIntegerPointsKdTreeDecoder<2> decoder(total_dimensionality);
if (!decoder.DecodePoints(in_buffer, out_it)) {
return false;
}
break;
}
case 3: {
DynamicIntegerPointsKdTreeDecoder<3> decoder(total_dimensionality);
if (!decoder.DecodePoints(in_buffer, out_it)) {
return false;
}
break;
}
case 4: {
DynamicIntegerPointsKdTreeDecoder<4> decoder(total_dimensionality);
if (!decoder.DecodePoints(in_buffer, out_it)) {
return false;
}
break;
}
case 5: {
DynamicIntegerPointsKdTreeDecoder<5> decoder(total_dimensionality);
if (!decoder.DecodePoints(in_buffer, out_it)) {
return false;
}
break;
}
case 6: {
DynamicIntegerPointsKdTreeDecoder<6> decoder(total_dimensionality);
if (!decoder.DecodePoints(in_buffer, out_it)) {
return false;
}
break;
}
default:
return false;
}
} else {
// Invalid method.
return false;
}
return true;
#else
return false;
#endif
}
template <typename SignedDataTypeT>
bool KdTreeAttributesDecoder::TransformAttributeBackToSignedType(
PointAttribute *att, int num_processed_signed_components) {
typedef typename std::make_unsigned<SignedDataTypeT>::type UnsignedType;
std::vector<UnsignedType> unsigned_val(att->num_components());
std::vector<SignedDataTypeT> signed_val(att->num_components());
for (AttributeValueIndex avi(0); avi < static_cast<uint32_t>(att->size());
++avi) {
att->GetValue(avi, &unsigned_val[0]);
for (int c = 0; c < att->num_components(); ++c) {
// Up-cast |unsigned_val| to int32_t to ensure we don't overflow it for
// smaller data types. But first check that the up-casting does not cause
// signed integer overflow.
if (unsigned_val[c] > std::numeric_limits<int32_t>::max()) {
return false;
}
signed_val[c] = static_cast<SignedDataTypeT>(
static_cast<int32_t>(unsigned_val[c]) +
min_signed_values_[num_processed_signed_components + c]);
}
att->SetAttributeValue(avi, &signed_val[0]);
}
return true;
}
bool KdTreeAttributesDecoder::TransformAttributesToOriginalFormat() {
if (quantized_portable_attributes_.empty() && min_signed_values_.empty()) {
return true;
}
int num_processed_quantized_attributes = 0;
int num_processed_signed_components = 0;
// Dequantize attributes that needed it.
for (int i = 0; i < GetNumAttributes(); ++i) {
const int att_id = GetAttributeId(i);
PointAttribute *const att = GetDecoder()->point_cloud()->attribute(att_id);
if (att->data_type() == DT_INT32 || att->data_type() == DT_INT16 ||
att->data_type() == DT_INT8) {
std::vector<uint32_t> unsigned_val(att->num_components());
std::vector<int32_t> signed_val(att->num_components());
// Values are stored as unsigned in the attribute, make them signed again.
if (att->data_type() == DT_INT32) {
if (!TransformAttributeBackToSignedType<int32_t>(
att, num_processed_signed_components)) {
return false;
}
} else if (att->data_type() == DT_INT16) {
if (!TransformAttributeBackToSignedType<int16_t>(
att, num_processed_signed_components)) {
return false;
}
} else if (att->data_type() == DT_INT8) {
if (!TransformAttributeBackToSignedType<int8_t>(
att, num_processed_signed_components)) {
return false;
}
}
num_processed_signed_components += att->num_components();
} else if (att->data_type() == DT_FLOAT32) {
// TODO(ostava): This code should be probably moved out to attribute
// transform and shared with the SequentialQuantizationAttributeDecoder.
const PointAttribute *const src_att =
quantized_portable_attributes_[num_processed_quantized_attributes]
.get();
const AttributeQuantizationTransform &transform =
attribute_quantization_transforms_
[num_processed_quantized_attributes];
num_processed_quantized_attributes++;
if (GetDecoder()->options()->GetAttributeBool(
att->attribute_type(), "skip_attribute_transform", false)) {
// Attribute transform should not be performed. In this case, we replace
// the output geometry attribute with the portable attribute.
// TODO(ostava): We can potentially avoid this copy by introducing a new
// mechanism that would allow to use the final attributes as portable
// attributes for predictors that may need them.
att->CopyFrom(*src_att);
continue;
}
// Convert all quantized values back to floats.
const int32_t max_quantized_value =
(1u << static_cast<uint32_t>(transform.quantization_bits())) - 1;
const int num_components = att->num_components();
const int entry_size = sizeof(float) * num_components;
const std::unique_ptr<float[]> att_val(new float[num_components]);
int quant_val_id = 0;
int out_byte_pos = 0;
Dequantizer dequantizer;
if (!dequantizer.Init(transform.range(), max_quantized_value)) {
return false;
}
const uint32_t *const portable_attribute_data =
reinterpret_cast<const uint32_t *>(
src_att->GetAddress(AttributeValueIndex(0)));
for (uint32_t i = 0; i < src_att->size(); ++i) {
for (int c = 0; c < num_components; ++c) {
float value = dequantizer.DequantizeFloat(
portable_attribute_data[quant_val_id++]);
value = value + transform.min_value(c);
att_val[c] = value;
}
// Store the floating point value into the attribute buffer.
att->buffer()->Write(out_byte_pos, att_val.get(), entry_size);
out_byte_pos += entry_size;
}
}
}
return true;
}
} // namespace draco

View File

@ -0,0 +1,50 @@
// Copyright 2016 The Draco Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
#ifndef DRACO_COMPRESSION_ATTRIBUTES_KD_TREE_ATTRIBUTES_DECODER_H_
#define DRACO_COMPRESSION_ATTRIBUTES_KD_TREE_ATTRIBUTES_DECODER_H_
#include "draco/attributes/attribute_quantization_transform.h"
#include "draco/compression/attributes/attributes_decoder.h"
namespace draco {
// Decodes attributes encoded with the KdTreeAttributesEncoder.
class KdTreeAttributesDecoder : public AttributesDecoder {
public:
KdTreeAttributesDecoder();
protected:
bool DecodePortableAttributes(DecoderBuffer *in_buffer) override;
bool DecodeDataNeededByPortableTransforms(DecoderBuffer *in_buffer) override;
bool TransformAttributesToOriginalFormat() override;
private:
template <int level_t, typename OutIteratorT>
bool DecodePoints(int total_dimensionality, int num_expected_points,
DecoderBuffer *in_buffer, OutIteratorT *out_iterator);
template <typename SignedDataTypeT>
bool TransformAttributeBackToSignedType(PointAttribute *att,
int num_processed_signed_components);
std::vector<AttributeQuantizationTransform>
attribute_quantization_transforms_;
std::vector<int32_t> min_signed_values_;
std::vector<std::unique_ptr<PointAttribute>> quantized_portable_attributes_;
};
} // namespace draco
#endif // DRACO_COMPRESSION_ATTRIBUTES_KD_TREE_ATTRIBUTES_DECODER_H_

View File

@ -0,0 +1,305 @@
// Copyright 2016 The Draco Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
#include "draco/compression/attributes/kd_tree_attributes_encoder.h"
#include "draco/compression/attributes/kd_tree_attributes_shared.h"
#include "draco/compression/attributes/point_d_vector.h"
#include "draco/compression/point_cloud/algorithms/dynamic_integer_points_kd_tree_encoder.h"
#include "draco/compression/point_cloud/algorithms/float_points_tree_encoder.h"
#include "draco/compression/point_cloud/point_cloud_encoder.h"
#include "draco/core/varint_encoding.h"
namespace draco {
KdTreeAttributesEncoder::KdTreeAttributesEncoder() : num_components_(0) {}
KdTreeAttributesEncoder::KdTreeAttributesEncoder(int att_id)
: AttributesEncoder(att_id), num_components_(0) {}
bool KdTreeAttributesEncoder::TransformAttributesToPortableFormat() {
// Convert any of the input attributes into a format that can be processed by
// the kd tree encoder (quantization of floating attributes for now).
const size_t num_points = encoder()->point_cloud()->num_points();
int num_components = 0;
for (uint32_t i = 0; i < num_attributes(); ++i) {
const int att_id = GetAttributeId(i);
const PointAttribute *const att =
encoder()->point_cloud()->attribute(att_id);
num_components += att->num_components();
}
num_components_ = num_components;
// Go over all attributes and quantize them if needed.
for (uint32_t i = 0; i < num_attributes(); ++i) {
const int att_id = GetAttributeId(i);
const PointAttribute *const att =
encoder()->point_cloud()->attribute(att_id);
if (att->data_type() == DT_FLOAT32) {
// Quantization path.
AttributeQuantizationTransform attribute_quantization_transform;
const int quantization_bits = encoder()->options()->GetAttributeInt(
att_id, "quantization_bits", -1);
if (quantization_bits < 1) {
return false;
}
if (encoder()->options()->IsAttributeOptionSet(att_id,
"quantization_origin") &&
encoder()->options()->IsAttributeOptionSet(att_id,
"quantization_range")) {
// Quantization settings are explicitly specified in the provided
// options.
std::vector<float> quantization_origin(att->num_components());
encoder()->options()->GetAttributeVector(att_id, "quantization_origin",
att->num_components(),
&quantization_origin[0]);
const float range = encoder()->options()->GetAttributeFloat(
att_id, "quantization_range", 1.f);
attribute_quantization_transform.SetParameters(
quantization_bits, quantization_origin.data(),
att->num_components(), range);
} else {
// Compute quantization settings from the attribute values.
if (!attribute_quantization_transform.ComputeParameters(
*att, quantization_bits)) {
return false;
}
}
attribute_quantization_transforms_.push_back(
attribute_quantization_transform);
// Store the quantized attribute in an array that will be used when we do
// the actual encoding of the data.
auto portable_att =
attribute_quantization_transform.InitTransformedAttribute(*att,
num_points);
attribute_quantization_transform.TransformAttribute(*att, {},
portable_att.get());
quantized_portable_attributes_.push_back(std::move(portable_att));
} else if (att->data_type() == DT_INT32 || att->data_type() == DT_INT16 ||
att->data_type() == DT_INT8) {
// For signed types, find the minimum value for each component. These
// values are going to be used to transform the attribute values to
// unsigned integers that can be processed by the core kd tree algorithm.
std::vector<int32_t> min_value(att->num_components(),
std::numeric_limits<int32_t>::max());
std::vector<int32_t> act_value(att->num_components());
for (AttributeValueIndex avi(0); avi < static_cast<uint32_t>(att->size());
++avi) {
att->ConvertValue<int32_t>(avi, &act_value[0]);
for (int c = 0; c < att->num_components(); ++c) {
if (min_value[c] > act_value[c]) {
min_value[c] = act_value[c];
}
}
}
for (int c = 0; c < att->num_components(); ++c) {
min_signed_values_.push_back(min_value[c]);
}
}
}
return true;
}
bool KdTreeAttributesEncoder::EncodeDataNeededByPortableTransforms(
EncoderBuffer *out_buffer) {
// Store quantization settings for all attributes that need it.
for (int i = 0; i < attribute_quantization_transforms_.size(); ++i) {
attribute_quantization_transforms_[i].EncodeParameters(out_buffer);
}
// Encode data needed for transforming signed integers to unsigned ones.
for (int i = 0; i < min_signed_values_.size(); ++i) {
EncodeVarint<int32_t>(min_signed_values_[i], out_buffer);
}
return true;
}
bool KdTreeAttributesEncoder::EncodePortableAttributes(
EncoderBuffer *out_buffer) {
// Encode the data using the kd tree encoder algorithm. The data is first
// copied to a PointDVector that provides all the API expected by the core
// encoding algorithm.
// We limit the maximum value of compression_level to 6 as we don't currently
// have viable algorithms for higher compression levels.
uint8_t compression_level =
std::min(10 - encoder()->options()->GetSpeed(), 6);
DRACO_DCHECK_LE(compression_level, 6);
if (compression_level == 6 && num_components_ > 15) {
// Don't use compression level for CL >= 6. Axis selection is currently
// encoded using 4 bits.
compression_level = 5;
}
out_buffer->Encode(compression_level);
// Init PointDVector. The number of dimensions is equal to the total number
// of dimensions across all attributes.
const int num_points = encoder()->point_cloud()->num_points();
PointDVector<uint32_t> point_vector(num_points, num_components_);
int num_processed_components = 0;
int num_processed_quantized_attributes = 0;
int num_processed_signed_components = 0;
// Copy data to the point vector.
for (uint32_t i = 0; i < num_attributes(); ++i) {
const int att_id = GetAttributeId(i);
const PointAttribute *const att =
encoder()->point_cloud()->attribute(att_id);
const PointAttribute *source_att = nullptr;
if (att->data_type() == DT_UINT32 || att->data_type() == DT_UINT16 ||
att->data_type() == DT_UINT8 || att->data_type() == DT_INT32 ||
att->data_type() == DT_INT16 || att->data_type() == DT_INT8) {
// Use the original attribute.
source_att = att;
} else if (att->data_type() == DT_FLOAT32) {
// Use the portable (quantized) attribute instead.
source_att =
quantized_portable_attributes_[num_processed_quantized_attributes]
.get();
num_processed_quantized_attributes++;
} else {
// Unsupported data type.
return false;
}
if (source_att == nullptr) {
return false;
}
// Copy source_att to the vector.
if (source_att->data_type() == DT_UINT32) {
// If the data type is the same as the one used by the point vector, we
// can directly copy individual elements.
for (PointIndex pi(0); pi < num_points; ++pi) {
const AttributeValueIndex avi = source_att->mapped_index(pi);
const uint8_t *const att_value_address = source_att->GetAddress(avi);
point_vector.CopyAttribute(source_att->num_components(),
num_processed_components, pi.value(),
att_value_address);
}
} else if (source_att->data_type() == DT_INT32 ||
source_att->data_type() == DT_INT16 ||
source_att->data_type() == DT_INT8) {
// Signed values need to be converted to unsigned before they are stored
// in the point vector.
std::vector<int32_t> signed_point(source_att->num_components());
std::vector<uint32_t> unsigned_point(source_att->num_components());
for (PointIndex pi(0); pi < num_points; ++pi) {
const AttributeValueIndex avi = source_att->mapped_index(pi);
source_att->ConvertValue<int32_t>(avi, &signed_point[0]);
for (int c = 0; c < source_att->num_components(); ++c) {
unsigned_point[c] =
signed_point[c] -
min_signed_values_[num_processed_signed_components + c];
}
point_vector.CopyAttribute(source_att->num_components(),
num_processed_components, pi.value(),
&unsigned_point[0]);
}
num_processed_signed_components += source_att->num_components();
} else {
// If the data type of the attribute is different, we have to convert the
// value before we put it to the point vector.
std::vector<uint32_t> point(source_att->num_components());
for (PointIndex pi(0); pi < num_points; ++pi) {
const AttributeValueIndex avi = source_att->mapped_index(pi);
source_att->ConvertValue<uint32_t>(avi, &point[0]);
point_vector.CopyAttribute(source_att->num_components(),
num_processed_components, pi.value(),
point.data());
}
}
num_processed_components += source_att->num_components();
}
// Compute the maximum bit length needed for the kd tree encoding.
int num_bits = 0;
const uint32_t *data = point_vector[0];
for (int i = 0; i < num_points * num_components_; ++i) {
if (data[i] > 0) {
const int msb = MostSignificantBit(data[i]) + 1;
if (msb > num_bits) {
num_bits = msb;
}
}
}
switch (compression_level) {
case 6: {
DynamicIntegerPointsKdTreeEncoder<6> points_encoder(num_components_);
if (!points_encoder.EncodePoints(point_vector.begin(), point_vector.end(),
num_bits, out_buffer)) {
return false;
}
break;
}
case 5: {
DynamicIntegerPointsKdTreeEncoder<5> points_encoder(num_components_);
if (!points_encoder.EncodePoints(point_vector.begin(), point_vector.end(),
num_bits, out_buffer)) {
return false;
}
break;
}
case 4: {
DynamicIntegerPointsKdTreeEncoder<4> points_encoder(num_components_);
if (!points_encoder.EncodePoints(point_vector.begin(), point_vector.end(),
num_bits, out_buffer)) {
return false;
}
break;
}
case 3: {
DynamicIntegerPointsKdTreeEncoder<3> points_encoder(num_components_);
if (!points_encoder.EncodePoints(point_vector.begin(), point_vector.end(),
num_bits, out_buffer)) {
return false;
}
break;
}
case 2: {
DynamicIntegerPointsKdTreeEncoder<2> points_encoder(num_components_);
if (!points_encoder.EncodePoints(point_vector.begin(), point_vector.end(),
num_bits, out_buffer)) {
return false;
}
break;
}
case 1: {
DynamicIntegerPointsKdTreeEncoder<1> points_encoder(num_components_);
if (!points_encoder.EncodePoints(point_vector.begin(), point_vector.end(),
num_bits, out_buffer)) {
return false;
}
break;
}
case 0: {
DynamicIntegerPointsKdTreeEncoder<0> points_encoder(num_components_);
if (!points_encoder.EncodePoints(point_vector.begin(), point_vector.end(),
num_bits, out_buffer)) {
return false;
}
break;
}
// Compression level and/or encoding speed seem wrong.
default:
return false;
}
return true;
}
} // namespace draco

View File

@ -0,0 +1,51 @@
// Copyright 2016 The Draco Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
#ifndef DRACO_COMPRESSION_ATTRIBUTES_KD_TREE_ATTRIBUTES_ENCODER_H_
#define DRACO_COMPRESSION_ATTRIBUTES_KD_TREE_ATTRIBUTES_ENCODER_H_
#include "draco/attributes/attribute_quantization_transform.h"
#include "draco/compression/attributes/attributes_encoder.h"
#include "draco/compression/config/compression_shared.h"
namespace draco {
// Encodes all attributes of a given PointCloud using one of the available
// Kd-tree compression methods.
// See compression/point_cloud/point_cloud_kd_tree_encoder.h for more details.
class KdTreeAttributesEncoder : public AttributesEncoder {
public:
KdTreeAttributesEncoder();
explicit KdTreeAttributesEncoder(int att_id);
uint8_t GetUniqueId() const override { return KD_TREE_ATTRIBUTE_ENCODER; }
protected:
bool TransformAttributesToPortableFormat() override;
bool EncodePortableAttributes(EncoderBuffer *out_buffer) override;
bool EncodeDataNeededByPortableTransforms(EncoderBuffer *out_buffer) override;
private:
std::vector<AttributeQuantizationTransform>
attribute_quantization_transforms_;
// Min signed values are used to transform signed integers into unsigned ones
// (by subtracting the min signed value for each component).
std::vector<int32_t> min_signed_values_;
std::vector<std::unique_ptr<PointAttribute>> quantized_portable_attributes_;
int num_components_;
};
} // namespace draco
#endif // DRACO_COMPRESSION_ATTRIBUTES_KD_TREE_ATTRIBUTES_ENCODER_H_

View File

@ -0,0 +1,28 @@
// Copyright 2016 The Draco Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
#ifndef DRACO_COMPRESSION_ATTRIBUTES_KD_TREE_ATTRIBUTES_SHARED_H_
#define DRACO_COMPRESSION_ATTRIBUTES_KD_TREE_ATTRIBUTES_SHARED_H_
namespace draco {
// Defines types of kD-tree compression
enum KdTreeAttributesEncodingMethod {
kKdTreeQuantizationEncoding = 0,
kKdTreeIntegerEncoding
};
} // namespace draco
#endif // DRACO_COMPRESSION_ATTRIBUTES_KD_TREE_ATTRIBUTES_SHARED_H_

View File

@ -0,0 +1,51 @@
// Copyright 2016 The Draco Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
#ifndef DRACO_COMPRESSION_ATTRIBUTES_LINEAR_SEQUENCER_H_
#define DRACO_COMPRESSION_ATTRIBUTES_LINEAR_SEQUENCER_H_
#include "draco/compression/attributes/points_sequencer.h"
namespace draco {
// A simple sequencer that generates a linear sequence [0, num_points - 1].
// I.e., the order of the points is preserved for the input data.
class LinearSequencer : public PointsSequencer {
public:
explicit LinearSequencer(int32_t num_points) : num_points_(num_points) {}
bool UpdatePointToAttributeIndexMapping(PointAttribute *attribute) override {
attribute->SetIdentityMapping();
return true;
}
protected:
bool GenerateSequenceInternal() override {
if (num_points_ < 0) {
return false;
}
out_point_ids()->resize(num_points_);
for (int i = 0; i < num_points_; ++i) {
out_point_ids()->at(i) = PointIndex(i);
}
return true;
}
private:
int32_t num_points_;
};
} // namespace draco
#endif // DRACO_COMPRESSION_ATTRIBUTES_LINEAR_SEQUENCER_H_

View File

@ -0,0 +1,58 @@
// Copyright 2016 The Draco Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
#ifndef DRACO_COMPRESSION_ATTRIBUTES_MESH_ATTRIBUTE_INDICES_ENCODING_DATA_H_
#define DRACO_COMPRESSION_ATTRIBUTES_MESH_ATTRIBUTE_INDICES_ENCODING_DATA_H_
#include <inttypes.h>
#include <vector>
#include "draco/attributes/geometry_indices.h"
namespace draco {
// Data used for encoding and decoding of mesh attributes.
struct MeshAttributeIndicesEncodingData {
MeshAttributeIndicesEncodingData() : num_values(0) {}
void Init(int num_vertices) {
vertex_to_encoded_attribute_value_index_map.resize(num_vertices);
// We expect to store one value for each vertex.
encoded_attribute_value_index_to_corner_map.reserve(num_vertices);
}
// Array for storing the corner ids in the order their associated attribute
// entries were encoded/decoded. For every encoded attribute value entry we
// store exactly one corner. I.e., this is the mapping between an encoded
// attribute entry ids and corner ids. This map is needed for example by
// prediction schemes. Note that not all corners are included in this map,
// e.g., if multiple corners share the same attribute value, only one of these
// corners will be usually included.
std::vector<CornerIndex> encoded_attribute_value_index_to_corner_map;
// Map for storing encoding order of attribute entries for each vertex.
// i.e. Mapping between vertices and their corresponding attribute entry ids
// that are going to be used by the decoder.
// -1 if an attribute entry hasn't been encoded/decoded yet.
std::vector<int32_t> vertex_to_encoded_attribute_value_index_map;
// Total number of encoded/decoded attribute entries.
int num_values;
};
} // namespace draco
#endif // DRACO_COMPRESSION_ATTRIBUTES_MESH_ATTRIBUTE_INDICES_ENCODING_DATA_H_

View File

@ -0,0 +1,372 @@
// Copyright 2016 The Draco Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
// Utilities for converting unit vectors to octahedral coordinates and back.
// For more details about octahedral coordinates, see for example Cigolle
// et al.'14 “A Survey of Efficient Representations for Independent Unit
// Vectors”.
//
// In short this is motivated by an octahedron inscribed into a sphere. The
// direction of the normal vector can be defined by a point on the octahedron.
// On the right hemisphere (x > 0) this point is projected onto the x = 0 plane,
// that is, the right side of the octahedron forms a diamond like shape. The
// left side of the octahedron is also projected onto the x = 0 plane, however,
// in this case we flap the triangles of the diamond outward. Afterwards we
// shift the resulting square such that all values are positive.
//
// Important values in this file:
// * q: number of quantization bits
// * max_quantized_value: the max value representable with q bits (odd)
// * max_value: max value of the diamond = max_quantized_value - 1 (even)
// * center_value: center of the diamond after shift
//
// Note that the parameter space is somewhat periodic, e.g. (0, 0) ==
// (max_value, max_value), which is also why the diamond is one smaller than the
// maximal representable value in order to have an odd range of values.
#ifndef DRACO_COMPRESSION_ATTRIBUTES_NORMAL_COMPRESSION_UTILS_H_
#define DRACO_COMPRESSION_ATTRIBUTES_NORMAL_COMPRESSION_UTILS_H_
#include <inttypes.h>
#include <algorithm>
#include <cmath>
#include "draco/core/macros.h"
namespace draco {
class OctahedronToolBox {
public:
OctahedronToolBox()
: quantization_bits_(-1),
max_quantized_value_(-1),
max_value_(-1),
dequantization_scale_(1.f),
center_value_(-1) {}
bool SetQuantizationBits(int32_t q) {
if (q < 2 || q > 30) {
return false;
}
quantization_bits_ = q;
max_quantized_value_ = (1u << quantization_bits_) - 1;
max_value_ = max_quantized_value_ - 1;
dequantization_scale_ = 2.f / max_value_;
center_value_ = max_value_ / 2;
return true;
}
bool IsInitialized() const { return quantization_bits_ != -1; }
// Convert all edge points in the top left and bottom right quadrants to
// their corresponding position in the bottom left and top right quadrants.
// Convert all corner edge points to the top right corner.
inline void CanonicalizeOctahedralCoords(int32_t s, int32_t t, int32_t *out_s,
int32_t *out_t) const {
if ((s == 0 && t == 0) || (s == 0 && t == max_value_) ||
(s == max_value_ && t == 0)) {
s = max_value_;
t = max_value_;
} else if (s == 0 && t > center_value_) {
t = center_value_ - (t - center_value_);
} else if (s == max_value_ && t < center_value_) {
t = center_value_ + (center_value_ - t);
} else if (t == max_value_ && s < center_value_) {
s = center_value_ + (center_value_ - s);
} else if (t == 0 && s > center_value_) {
s = center_value_ - (s - center_value_);
}
*out_s = s;
*out_t = t;
}
// Converts an integer vector to octahedral coordinates.
// Precondition: |int_vec| abs sum must equal center value.
inline void IntegerVectorToQuantizedOctahedralCoords(const int32_t *int_vec,
int32_t *out_s,
int32_t *out_t) const {
DRACO_DCHECK_EQ(
std::abs(int_vec[0]) + std::abs(int_vec[1]) + std::abs(int_vec[2]),
center_value_);
int32_t s, t;
if (int_vec[0] >= 0) {
// Right hemisphere.
s = (int_vec[1] + center_value_);
t = (int_vec[2] + center_value_);
} else {
// Left hemisphere.
if (int_vec[1] < 0) {
s = std::abs(int_vec[2]);
} else {
s = (max_value_ - std::abs(int_vec[2]));
}
if (int_vec[2] < 0) {
t = std::abs(int_vec[1]);
} else {
t = (max_value_ - std::abs(int_vec[1]));
}
}
CanonicalizeOctahedralCoords(s, t, out_s, out_t);
}
template <class T>
void FloatVectorToQuantizedOctahedralCoords(const T *vector, int32_t *out_s,
int32_t *out_t) const {
const double abs_sum = std::abs(static_cast<double>(vector[0])) +
std::abs(static_cast<double>(vector[1])) +
std::abs(static_cast<double>(vector[2]));
// Adjust values such that abs sum equals 1.
double scaled_vector[3];
if (abs_sum > 1e-6) {
// Scale needed to project the vector to the surface of an octahedron.
const double scale = 1.0 / abs_sum;
scaled_vector[0] = vector[0] * scale;
scaled_vector[1] = vector[1] * scale;
scaled_vector[2] = vector[2] * scale;
} else {
scaled_vector[0] = 1.0;
scaled_vector[1] = 0;
scaled_vector[2] = 0;
}
// Scale vector such that the sum equals the center value.
int32_t int_vec[3];
int_vec[0] =
static_cast<int32_t>(floor(scaled_vector[0] * center_value_ + 0.5));
int_vec[1] =
static_cast<int32_t>(floor(scaled_vector[1] * center_value_ + 0.5));
// Make sure the sum is exactly the center value.
int_vec[2] = center_value_ - std::abs(int_vec[0]) - std::abs(int_vec[1]);
if (int_vec[2] < 0) {
// If the sum of first two coordinates is too large, we need to decrease
// the length of one of the coordinates.
if (int_vec[1] > 0) {
int_vec[1] += int_vec[2];
} else {
int_vec[1] -= int_vec[2];
}
int_vec[2] = 0;
}
// Take care of the sign.
if (scaled_vector[2] < 0) {
int_vec[2] *= -1;
}
IntegerVectorToQuantizedOctahedralCoords(int_vec, out_s, out_t);
}
// Normalize |vec| such that its abs sum is equal to the center value;
template <class T>
void CanonicalizeIntegerVector(T *vec) const {
static_assert(std::is_integral<T>::value, "T must be an integral type.");
static_assert(std::is_signed<T>::value, "T must be a signed type.");
const int64_t abs_sum = static_cast<int64_t>(std::abs(vec[0])) +
static_cast<int64_t>(std::abs(vec[1])) +
static_cast<int64_t>(std::abs(vec[2]));
if (abs_sum == 0) {
vec[0] = center_value_; // vec[1] == v[2] == 0
} else {
vec[0] =
(static_cast<int64_t>(vec[0]) * static_cast<int64_t>(center_value_)) /
abs_sum;
vec[1] =
(static_cast<int64_t>(vec[1]) * static_cast<int64_t>(center_value_)) /
abs_sum;
if (vec[2] >= 0) {
vec[2] = center_value_ - std::abs(vec[0]) - std::abs(vec[1]);
} else {
vec[2] = -(center_value_ - std::abs(vec[0]) - std::abs(vec[1]));
}
}
}
inline void QuantizedOctahedralCoordsToUnitVector(int32_t in_s, int32_t in_t,
float *out_vector) const {
OctahedralCoordsToUnitVector(in_s * dequantization_scale_ - 1.f,
in_t * dequantization_scale_ - 1.f,
out_vector);
}
// |s| and |t| are expected to be signed values.
inline bool IsInDiamond(const int32_t &s, const int32_t &t) const {
// Expect center already at origin.
DRACO_DCHECK_LE(s, center_value_);
DRACO_DCHECK_LE(t, center_value_);
DRACO_DCHECK_GE(s, -center_value_);
DRACO_DCHECK_GE(t, -center_value_);
const uint32_t st =
static_cast<uint32_t>(std::abs(s)) + static_cast<uint32_t>(std::abs(t));
return st <= center_value_;
}
void InvertDiamond(int32_t *s, int32_t *t) const {
// Expect center already at origin.
DRACO_DCHECK_LE(*s, center_value_);
DRACO_DCHECK_LE(*t, center_value_);
DRACO_DCHECK_GE(*s, -center_value_);
DRACO_DCHECK_GE(*t, -center_value_);
int32_t sign_s = 0;
int32_t sign_t = 0;
if (*s >= 0 && *t >= 0) {
sign_s = 1;
sign_t = 1;
} else if (*s <= 0 && *t <= 0) {
sign_s = -1;
sign_t = -1;
} else {
sign_s = (*s > 0) ? 1 : -1;
sign_t = (*t > 0) ? 1 : -1;
}
// Perform the addition and subtraction using unsigned integers to avoid
// signed integer overflows for bad data. Note that the result will be
// unchanged for non-overflowing cases.
const uint32_t corner_point_s = sign_s * center_value_;
const uint32_t corner_point_t = sign_t * center_value_;
uint32_t us = *s;
uint32_t ut = *t;
us = us + us - corner_point_s;
ut = ut + ut - corner_point_t;
if (sign_s * sign_t >= 0) {
uint32_t temp = us;
us = -ut;
ut = -temp;
} else {
std::swap(us, ut);
}
us = us + corner_point_s;
ut = ut + corner_point_t;
*s = us;
*t = ut;
*s /= 2;
*t /= 2;
}
void InvertDirection(int32_t *s, int32_t *t) const {
// Expect center already at origin.
DRACO_DCHECK_LE(*s, center_value_);
DRACO_DCHECK_LE(*t, center_value_);
DRACO_DCHECK_GE(*s, -center_value_);
DRACO_DCHECK_GE(*t, -center_value_);
*s *= -1;
*t *= -1;
this->InvertDiamond(s, t);
}
// For correction values.
int32_t ModMax(int32_t x) const {
if (x > this->center_value()) {
return x - this->max_quantized_value();
}
if (x < -this->center_value()) {
return x + this->max_quantized_value();
}
return x;
}
// For correction values.
int32_t MakePositive(int32_t x) const {
DRACO_DCHECK_LE(x, this->center_value() * 2);
if (x < 0) {
return x + this->max_quantized_value();
}
return x;
}
int32_t quantization_bits() const { return quantization_bits_; }
int32_t max_quantized_value() const { return max_quantized_value_; }
int32_t max_value() const { return max_value_; }
int32_t center_value() const { return center_value_; }
private:
inline void OctahedralCoordsToUnitVector(float in_s_scaled, float in_t_scaled,
float *out_vector) const {
// Background about the encoding:
// A normal is encoded in a normalized space <s, t> depicted below. The
// encoding correponds to an octahedron that is unwrapped to a 2D plane.
// During encoding, a normal is projected to the surface of the octahedron
// and the projection is then unwrapped to the 2D plane. Decoding is the
// reverse of this process.
// All points in the central diamond are located on triangles on the
// right "hemisphere" of the octahedron while all points outside of the
// diamond are on the left hemisphere (basically, they would have to be
// wrapped along the diagonal edges to form the octahedron). The central
// point corresponds to the right most vertex of the octahedron and all
// corners of the plane correspond to the left most vertex of the
// octahedron.
//
// t
// ^ *-----*-----*
// | | /|\ |
// | / | \ |
// | / | \ |
// | / | \ |
// *-----*---- *
// | \ | / |
// | \ | / |
// | \ | / |
// | \|/ |
// *-----*-----* --> s
// Note that the input |in_s_scaled| and |in_t_scaled| are already scaled to
// <-1, 1> range. This way, the central point is at coordinate (0, 0).
float y = in_s_scaled;
float z = in_t_scaled;
// Remaining coordinate can be computed by projecting the (y, z) values onto
// the surface of the octahedron.
const float x = 1.f - std::abs(y) - std::abs(z);
// |x| is essentially a signed distance from the diagonal edges of the
// diamond shown on the figure above. It is positive for all points in the
// diamond (right hemisphere) and negative for all points outside the
// diamond (left hemisphere). For all points on the left hemisphere we need
// to update their (y, z) coordinates to account for the wrapping along
// the edges of the diamond.
float x_offset = -x;
x_offset = x_offset < 0 ? 0 : x_offset;
// This will do nothing for the points on the right hemisphere but it will
// mirror the (y, z) location along the nearest diagonal edge of the
// diamond.
y += y < 0 ? x_offset : -x_offset;
z += z < 0 ? x_offset : -x_offset;
// Normalize the computed vector.
const float norm_squared = x * x + y * y + z * z;
if (norm_squared < 1e-6) {
out_vector[0] = 0;
out_vector[1] = 0;
out_vector[2] = 0;
} else {
const float d = 1.0f / std::sqrt(norm_squared);
out_vector[0] = x * d;
out_vector[1] = y * d;
out_vector[2] = z * d;
}
}
int32_t quantization_bits_;
int32_t max_quantized_value_;
int32_t max_value_;
float dequantization_scale_;
int32_t center_value_;
};
} // namespace draco
#endif // DRACO_COMPRESSION_ATTRIBUTES_NORMAL_COMPRESSION_UTILS_H_

View File

@ -0,0 +1,287 @@
// Copyright 2018 The Draco Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
#ifndef DRACO_COMPRESSION_ATTRIBUTES_POINT_D_VECTOR_H_
#define DRACO_COMPRESSION_ATTRIBUTES_POINT_D_VECTOR_H_
#include <cstddef>
#include <cstring>
#include <iterator>
#include <memory>
#include <vector>
#include "draco/core/macros.h"
namespace draco {
// The main class of this file is PointDVector providing an interface similar to
// std::vector<PointD> for arbitrary number of dimensions (without a template
// argument). PointDVectorIterator is a random access iterator, which allows for
// compatibility with existing algorithms. PseudoPointD provides for a view on
// the individual items in a contiguous block of memory, which is compatible
// with the swap function and is returned by a dereference of
// PointDVectorIterator. Swap functions provide for compatibility/specialization
// that allows these classes to work with currently utilized STL functions.
// This class allows for swap functionality from the RandomIterator
// It seems problematic to bring this inside PointDVector due to templating.
template <typename internal_t>
class PseudoPointD {
public:
PseudoPointD(internal_t *mem, internal_t dimension)
: mem_(mem), dimension_(dimension) {}
// Specifically copies referenced memory
void swap(PseudoPointD &other) noexcept {
for (internal_t dim = 0; dim < dimension_; dim += 1) {
std::swap(mem_[dim], other.mem_[dim]);
}
}
PseudoPointD(const PseudoPointD &other)
: mem_(other.mem_), dimension_(other.dimension_) {}
const internal_t &operator[](const size_t &n) const {
DRACO_DCHECK_LT(n, dimension_);
return mem_[n];
}
internal_t &operator[](const size_t &n) {
DRACO_DCHECK_LT(n, dimension_);
return mem_[n];
}
bool operator==(const PseudoPointD &other) const {
for (auto dim = 0; dim < dimension_; dim += 1) {
if (mem_[dim] != other.mem_[dim]) {
return false;
}
}
return true;
}
bool operator!=(const PseudoPointD &other) const {
return !this->operator==(other);
}
private:
internal_t *const mem_;
const internal_t dimension_;
};
// It seems problematic to bring this inside PointDVector due to templating.
template <typename internal_t>
void swap(draco::PseudoPointD<internal_t> &&a,
draco::PseudoPointD<internal_t> &&b) noexcept {
a.swap(b);
};
template <typename internal_t>
void swap(draco::PseudoPointD<internal_t> &a,
draco::PseudoPointD<internal_t> &b) noexcept {
a.swap(b);
};
template <typename internal_t>
class PointDVector {
public:
PointDVector(const uint32_t n_items, const uint32_t dimensionality)
: n_items_(n_items),
dimensionality_(dimensionality),
item_size_bytes_(dimensionality * sizeof(internal_t)),
data_(n_items * dimensionality),
data0_(data_.data()) {}
// random access iterator
class PointDVectorIterator {
friend class PointDVector;
public:
// Iterator traits expected by std libraries.
using iterator_category = std::random_access_iterator_tag;
using value_type = size_t;
using difference_type = size_t;
using pointer = PointDVector *;
using reference = PointDVector &;
// std::iter_swap is called inside of std::partition and needs this
// specialized support
PseudoPointD<internal_t> operator*() const {
return PseudoPointD<internal_t>(vec_->data0_ + item_ * dimensionality_,
dimensionality_);
}
const PointDVectorIterator &operator++() {
item_ += 1;
return *this;
}
const PointDVectorIterator &operator--() {
item_ -= 1;
return *this;
}
PointDVectorIterator operator++(int32_t) {
PointDVectorIterator copy(*this);
item_ += 1;
return copy;
}
PointDVectorIterator operator--(int32_t) {
PointDVectorIterator copy(*this);
item_ -= 1;
return copy;
}
PointDVectorIterator &operator=(const PointDVectorIterator &other) {
this->item_ = other.item_;
return *this;
}
bool operator==(const PointDVectorIterator &ref) const {
return item_ == ref.item_;
}
bool operator!=(const PointDVectorIterator &ref) const {
return item_ != ref.item_;
}
bool operator<(const PointDVectorIterator &ref) const {
return item_ < ref.item_;
}
bool operator>(const PointDVectorIterator &ref) const {
return item_ > ref.item_;
}
bool operator<=(const PointDVectorIterator &ref) const {
return item_ <= ref.item_;
}
bool operator>=(const PointDVectorIterator &ref) const {
return item_ >= ref.item_;
}
PointDVectorIterator operator+(const int32_t &add) const {
PointDVectorIterator copy(vec_, item_ + add);
return copy;
}
PointDVectorIterator &operator+=(const int32_t &add) {
item_ += add;
return *this;
}
PointDVectorIterator operator-(const int32_t &sub) const {
PointDVectorIterator copy(vec_, item_ - sub);
return copy;
}
size_t operator-(const PointDVectorIterator &sub) const {
return (item_ - sub.item_);
}
PointDVectorIterator &operator-=(const int32_t &sub) {
item_ -= sub;
return *this;
}
internal_t *operator[](const size_t &n) const {
return vec_->data0_ + (item_ + n) * dimensionality_;
}
protected:
explicit PointDVectorIterator(PointDVector *vec, size_t start_item)
: item_(start_item), vec_(vec), dimensionality_(vec->dimensionality_) {}
private:
size_t item_; // this counts the item that should be referenced.
PointDVector *const vec_; // the thing that we're iterating on
const uint32_t dimensionality_; // local copy from vec_
};
PointDVectorIterator begin() { return PointDVectorIterator(this, 0); }
PointDVectorIterator end() { return PointDVectorIterator(this, n_items_); }
// operator[] allows for unprotected user-side usage of operator[] on the
// return value AS IF it were a natively indexable type like Point3*
internal_t *operator[](const uint32_t index) {
DRACO_DCHECK_LT(index, n_items_);
return data0_ + index * dimensionality_;
}
const internal_t *operator[](const uint32_t index) const {
DRACO_DCHECK_LT(index, n_items_);
return data0_ + index * dimensionality_;
}
uint32_t size() const { return n_items_; }
size_t GetBufferSize() const { return data_.size(); }
// copy a single contiguous 'item' from one PointDVector into this one.
void CopyItem(const PointDVector &source, const internal_t source_index,
const internal_t destination_index) {
DRACO_DCHECK(&source != this ||
(&source == this && source_index != destination_index));
DRACO_DCHECK_LT(destination_index, n_items_);
DRACO_DCHECK_LT(source_index, source.n_items_);
// DRACO_DCHECK_EQ(source.n_items_, n_items_); // not technically necessary
DRACO_DCHECK_EQ(source.dimensionality_, dimensionality_);
const internal_t *ref = source[source_index];
internal_t *const dest = this->operator[](destination_index);
std::memcpy(dest, ref, item_size_bytes_);
}
// Copy data directly off of an attribute buffer interleaved into internal
// memory.
void CopyAttribute(
// The dimensionality of the attribute being integrated
const internal_t attribute_dimensionality,
// The offset in dimensions to insert this attribute.
const internal_t offset_dimensionality, const internal_t index,
// The direct pointer to the data
const void *const attribute_item_data) {
// chunk copy
const size_t copy_size = sizeof(internal_t) * attribute_dimensionality;
// a multiply and add can be optimized away with an iterator
std::memcpy(data0_ + index * dimensionality_ + offset_dimensionality,
attribute_item_data, copy_size);
}
// Copy data off of a contiguous buffer interleaved into internal memory
void CopyAttribute(
// The dimensionality of the attribute being integrated
const internal_t attribute_dimensionality,
// The offset in dimensions to insert this attribute.
const internal_t offset_dimensionality,
const internal_t *const attribute_mem) {
DRACO_DCHECK_LT(offset_dimensionality,
dimensionality_ - attribute_dimensionality);
// degenerate case block copy the whole buffer.
if (dimensionality_ == attribute_dimensionality) {
DRACO_DCHECK_EQ(offset_dimensionality, 0);
const size_t copy_size =
sizeof(internal_t) * attribute_dimensionality * n_items_;
std::memcpy(data0_, attribute_mem, copy_size);
} else { // chunk copy
const size_t copy_size = sizeof(internal_t) * attribute_dimensionality;
internal_t *internal_data;
const internal_t *attribute_data;
internal_t item;
for (internal_data = data0_ + offset_dimensionality,
attribute_data = attribute_mem, item = 0;
item < n_items_; internal_data += dimensionality_,
attribute_data += attribute_dimensionality, item += 1) {
std::memcpy(internal_data, attribute_data, copy_size);
}
}
}
private:
// internal parameters.
const uint32_t n_items_;
const uint32_t dimensionality_; // The dimension of the points in the buffer
const uint32_t item_size_bytes_;
std::vector<internal_t> data_; // contiguously stored data. Never resized.
internal_t *const data0_; // raw pointer to base data.
};
} // namespace draco
#endif // DRACO_COMPRESSION_ATTRIBUTES_POINT_D_VECTOR_H_

View File

@ -0,0 +1,360 @@
// Copyright 2018 The Draco Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
#include "draco/compression/attributes/point_d_vector.h"
#include "draco/compression/point_cloud/algorithms/point_cloud_types.h"
#include "draco/core/draco_test_base.h"
namespace draco {
class PointDVectorTest : public ::testing::Test {
protected:
template <typename PT>
void TestIntegrity() {}
template <typename PT>
void TestSize() {
for (uint32_t n_items = 0; n_items <= 10; ++n_items) {
for (uint32_t dimensionality = 1; dimensionality <= 10;
++dimensionality) {
draco::PointDVector<PT> var(n_items, dimensionality);
ASSERT_EQ(n_items, var.size());
ASSERT_EQ(n_items * dimensionality, var.GetBufferSize());
}
}
}
template <typename PT>
void TestContentsContiguous() {
for (uint32_t n_items = 1; n_items <= 1000; n_items *= 10) {
for (uint32_t dimensionality = 1; dimensionality < 10;
dimensionality += 2) {
for (uint32_t att_dimensionality = 1;
att_dimensionality <= dimensionality; att_dimensionality += 2) {
for (uint32_t offset_dimensionality = 0;
offset_dimensionality < dimensionality - att_dimensionality;
++offset_dimensionality) {
PointDVector<PT> var(n_items, dimensionality);
std::vector<PT> att(n_items * att_dimensionality);
for (PT val = 0; val < n_items; val += 1) {
for (PT att_dim = 0; att_dim < att_dimensionality; att_dim += 1) {
att[val * att_dimensionality + att_dim] = val;
}
}
const PT *const attribute_data = att.data();
var.CopyAttribute(att_dimensionality, offset_dimensionality,
attribute_data);
for (PT val = 0; val < n_items; val += 1) {
for (PT att_dim = 0; att_dim < att_dimensionality; att_dim += 1) {
ASSERT_EQ(var[val][offset_dimensionality + att_dim], val);
}
}
}
}
}
}
}
template <typename PT>
void TestContentsDiscrete() {
for (uint32_t n_items = 1; n_items <= 1000; n_items *= 10) {
for (uint32_t dimensionality = 1; dimensionality < 10;
dimensionality += 2) {
for (uint32_t att_dimensionality = 1;
att_dimensionality <= dimensionality; att_dimensionality += 2) {
for (uint32_t offset_dimensionality = 0;
offset_dimensionality < dimensionality - att_dimensionality;
++offset_dimensionality) {
PointDVector<PT> var(n_items, dimensionality);
std::vector<PT> att(n_items * att_dimensionality);
for (PT val = 0; val < n_items; val += 1) {
for (PT att_dim = 0; att_dim < att_dimensionality; att_dim += 1) {
att[val * att_dimensionality + att_dim] = val;
}
}
const PT *const attribute_data = att.data();
for (PT item = 0; item < n_items; item += 1) {
var.CopyAttribute(att_dimensionality, offset_dimensionality, item,
attribute_data + item * att_dimensionality);
}
for (PT val = 0; val < n_items; val += 1) {
for (PT att_dim = 0; att_dim < att_dimensionality; att_dim += 1) {
ASSERT_EQ(var[val][offset_dimensionality + att_dim], val);
}
}
}
}
}
}
}
template <typename PT>
void TestContentsCopy() {
for (uint32_t n_items = 1; n_items <= 1000; n_items *= 10) {
for (uint32_t dimensionality = 1; dimensionality < 10;
dimensionality += 2) {
for (uint32_t att_dimensionality = 1;
att_dimensionality <= dimensionality; att_dimensionality += 2) {
for (uint32_t offset_dimensionality = 0;
offset_dimensionality < dimensionality - att_dimensionality;
++offset_dimensionality) {
PointDVector<PT> var(n_items, dimensionality);
PointDVector<PT> dest(n_items, dimensionality);
std::vector<PT> att(n_items * att_dimensionality);
for (PT val = 0; val < n_items; val += 1) {
for (PT att_dim = 0; att_dim < att_dimensionality; att_dim += 1) {
att[val * att_dimensionality + att_dim] = val;
}
}
const PT *const attribute_data = att.data();
var.CopyAttribute(att_dimensionality, offset_dimensionality,
attribute_data);
for (PT item = 0; item < n_items; item += 1) {
dest.CopyItem(var, item, item);
}
for (PT val = 0; val < n_items; val += 1) {
for (PT att_dim = 0; att_dim < att_dimensionality; att_dim += 1) {
ASSERT_EQ(var[val][offset_dimensionality + att_dim], val);
ASSERT_EQ(dest[val][offset_dimensionality + att_dim], val);
}
}
}
}
}
}
}
template <typename PT>
void TestIterator() {
for (uint32_t n_items = 1; n_items <= 1000; n_items *= 10) {
for (uint32_t dimensionality = 1; dimensionality < 10;
dimensionality += 2) {
for (uint32_t att_dimensionality = 1;
att_dimensionality <= dimensionality; att_dimensionality += 2) {
for (uint32_t offset_dimensionality = 0;
offset_dimensionality < dimensionality - att_dimensionality;
++offset_dimensionality) {
PointDVector<PT> var(n_items, dimensionality);
PointDVector<PT> dest(n_items, dimensionality);
std::vector<PT> att(n_items * att_dimensionality);
for (PT val = 0; val < n_items; val += 1) {
for (PT att_dim = 0; att_dim < att_dimensionality; att_dim += 1) {
att[val * att_dimensionality + att_dim] = val;
}
}
const PT *const attribute_data = att.data();
var.CopyAttribute(att_dimensionality, offset_dimensionality,
attribute_data);
for (PT item = 0; item < n_items; item += 1) {
dest.CopyItem(var, item, item);
}
auto V0 = var.begin();
auto VE = var.end();
auto D0 = dest.begin();
auto DE = dest.end();
while (V0 != VE && D0 != DE) {
ASSERT_EQ(*D0, *V0); // compare PseudoPointD
// verify elemental values
for (auto index = 0; index < dimensionality; index += 1) {
ASSERT_EQ((*D0)[index], (*V0)[index]);
}
++V0;
++D0;
}
for (PT val = 0; val < n_items; val += 1) {
for (PT att_dim = 0; att_dim < att_dimensionality; att_dim += 1) {
ASSERT_EQ(var[val][offset_dimensionality + att_dim], val);
ASSERT_EQ(dest[val][offset_dimensionality + att_dim], val);
}
}
}
}
}
}
}
template <typename PT>
void TestPoint3Iterator() {
for (uint32_t n_items = 1; n_items <= 1000; n_items *= 10) {
const uint32_t dimensionality = 3;
// for (uint32_t dimensionality = 1; dimensionality < 10;
// dimensionality += 2) {
const uint32_t att_dimensionality = 3;
// for (uint32_t att_dimensionality = 1;
// att_dimensionality <= dimensionality; att_dimensionality += 2) {
for (uint32_t offset_dimensionality = 0;
offset_dimensionality < dimensionality - att_dimensionality;
++offset_dimensionality) {
PointDVector<PT> var(n_items, dimensionality);
PointDVector<PT> dest(n_items, dimensionality);
std::vector<PT> att(n_items * att_dimensionality);
std::vector<draco::Point3ui> att3(n_items);
for (PT val = 0; val < n_items; val += 1) {
att3[val][0] = val;
att3[val][1] = val;
att3[val][2] = val;
for (PT att_dim = 0; att_dim < att_dimensionality; att_dim += 1) {
att[val * att_dimensionality + att_dim] = val;
}
}
const PT *const attribute_data = att.data();
var.CopyAttribute(att_dimensionality, offset_dimensionality,
attribute_data);
for (PT item = 0; item < n_items; item += 1) {
dest.CopyItem(var, item, item);
}
auto aV0 = att3.begin();
auto aVE = att3.end();
auto V0 = var.begin();
auto VE = var.end();
auto D0 = dest.begin();
auto DE = dest.end();
while (aV0 != aVE && V0 != VE && D0 != DE) {
ASSERT_EQ(*D0, *V0); // compare PseudoPointD
// verify elemental values
for (auto index = 0; index < dimensionality; index += 1) {
ASSERT_EQ((*D0)[index], (*V0)[index]);
ASSERT_EQ((*D0)[index], (*aV0)[index]);
ASSERT_EQ((*aV0)[index], (*V0)[index]);
}
++aV0;
++V0;
++D0;
}
for (PT val = 0; val < n_items; val += 1) {
for (PT att_dim = 0; att_dim < att_dimensionality; att_dim += 1) {
ASSERT_EQ(var[val][offset_dimensionality + att_dim], val);
ASSERT_EQ(dest[val][offset_dimensionality + att_dim], val);
}
}
}
}
}
void TestPseudoPointDSwap() {
draco::Point3ui val = {0, 1, 2};
draco::Point3ui dest = {10, 11, 12};
draco::PseudoPointD<uint32_t> val_src1(&val[0], 3);
draco::PseudoPointD<uint32_t> dest_src1(&dest[0], 3);
ASSERT_EQ(val_src1[0], 0);
ASSERT_EQ(val_src1[1], 1);
ASSERT_EQ(val_src1[2], 2);
ASSERT_EQ(dest_src1[0], 10);
ASSERT_EQ(dest_src1[1], 11);
ASSERT_EQ(dest_src1[2], 12);
ASSERT_NE(val_src1, dest_src1);
swap(val_src1, dest_src1);
ASSERT_EQ(dest_src1[0], 0);
ASSERT_EQ(dest_src1[1], 1);
ASSERT_EQ(dest_src1[2], 2);
ASSERT_EQ(val_src1[0], 10);
ASSERT_EQ(val_src1[1], 11);
ASSERT_EQ(val_src1[2], 12);
ASSERT_NE(val_src1, dest_src1);
}
void TestPseudoPointDEquality() {
draco::Point3ui val = {0, 1, 2};
draco::Point3ui dest = {0, 1, 2};
draco::PseudoPointD<uint32_t> val_src1(&val[0], 3);
draco::PseudoPointD<uint32_t> val_src2(&val[0], 3);
draco::PseudoPointD<uint32_t> dest_src1(&dest[0], 3);
draco::PseudoPointD<uint32_t> dest_src2(&dest[0], 3);
ASSERT_EQ(val_src1, val_src1);
ASSERT_EQ(val_src1, val_src2);
ASSERT_EQ(dest_src1, val_src1);
ASSERT_EQ(dest_src1, val_src2);
ASSERT_EQ(val_src2, val_src1);
ASSERT_EQ(val_src2, val_src2);
ASSERT_EQ(dest_src2, val_src1);
ASSERT_EQ(dest_src2, val_src2);
for (auto i = 0; i < 3; i++) {
ASSERT_EQ(val_src1[i], val_src1[i]);
ASSERT_EQ(val_src1[i], val_src2[i]);
ASSERT_EQ(dest_src1[i], val_src1[i]);
ASSERT_EQ(dest_src1[i], val_src2[i]);
ASSERT_EQ(val_src2[i], val_src1[i]);
ASSERT_EQ(val_src2[i], val_src2[i]);
ASSERT_EQ(dest_src2[i], val_src1[i]);
ASSERT_EQ(dest_src2[i], val_src2[i]);
}
}
void TestPseudoPointDInequality() {
draco::Point3ui val = {0, 1, 2};
draco::Point3ui dest = {1, 2, 3};
draco::PseudoPointD<uint32_t> val_src1(&val[0], 3);
draco::PseudoPointD<uint32_t> val_src2(&val[0], 3);
draco::PseudoPointD<uint32_t> dest_src1(&dest[0], 3);
draco::PseudoPointD<uint32_t> dest_src2(&dest[0], 3);
ASSERT_EQ(val_src1, val_src1);
ASSERT_EQ(val_src1, val_src2);
ASSERT_NE(dest_src1, val_src1);
ASSERT_NE(dest_src1, val_src2);
ASSERT_EQ(val_src2, val_src1);
ASSERT_EQ(val_src2, val_src2);
ASSERT_NE(dest_src2, val_src1);
ASSERT_NE(dest_src2, val_src2);
for (auto i = 0; i < 3; i++) {
ASSERT_EQ(val_src1[i], val_src1[i]);
ASSERT_EQ(val_src1[i], val_src2[i]);
ASSERT_NE(dest_src1[i], val_src1[i]);
ASSERT_NE(dest_src1[i], val_src2[i]);
ASSERT_EQ(val_src2[i], val_src1[i]);
ASSERT_EQ(val_src2[i], val_src2[i]);
ASSERT_NE(dest_src2[i], val_src1[i]);
ASSERT_NE(dest_src2[i], val_src2[i]);
}
}
};
TEST_F(PointDVectorTest, VectorTest) {
TestSize<uint32_t>();
TestContentsDiscrete<uint32_t>();
TestContentsContiguous<uint32_t>();
TestContentsCopy<uint32_t>();
TestIterator<uint32_t>();
TestPoint3Iterator<uint32_t>();
}
TEST_F(PointDVectorTest, PseudoPointDTest) {
TestPseudoPointDSwap();
TestPseudoPointDEquality();
TestPseudoPointDInequality();
}
} // namespace draco

View File

@ -0,0 +1,63 @@
// Copyright 2016 The Draco Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
#ifndef DRACO_COMPRESSION_ATTRIBUTES_POINTS_SEQUENCER_H_
#define DRACO_COMPRESSION_ATTRIBUTES_POINTS_SEQUENCER_H_
#include <vector>
#include "draco/attributes/point_attribute.h"
namespace draco {
// Class for generating a sequence of point ids that can be used to encode
// or decode attribute values in a specific order.
// See sequential_attribute_encoders/decoders_controller.h for more details.
class PointsSequencer {
public:
PointsSequencer() : out_point_ids_(nullptr) {}
virtual ~PointsSequencer() = default;
// Fills the |out_point_ids| with the generated sequence of point ids.
bool GenerateSequence(std::vector<PointIndex> *out_point_ids) {
out_point_ids_ = out_point_ids;
return GenerateSequenceInternal();
}
// Appends a point to the sequence.
void AddPointId(PointIndex point_id) { out_point_ids_->push_back(point_id); }
// Sets the correct mapping between point ids and value ids. I.e., the inverse
// of the |out_point_ids|. In general, |out_point_ids_| does not contain
// sufficient information to compute the inverse map, because not all point
// ids are necessarily contained within the map.
// Must be implemented for sequencers that are used by attribute decoders.
virtual bool UpdatePointToAttributeIndexMapping(PointAttribute * /* attr */) {
return false;
}
protected:
// Method that needs to be implemented by the derived classes. The
// implementation is responsible for filling |out_point_ids_| with the valid
// sequence of point ids.
virtual bool GenerateSequenceInternal() = 0;
std::vector<PointIndex> *out_point_ids() const { return out_point_ids_; }
private:
std::vector<PointIndex> *out_point_ids_;
};
} // namespace draco
#endif // DRACO_COMPRESSION_ATTRIBUTES_POINTS_SEQUENCER_H_

View File

@ -0,0 +1,236 @@
// Copyright 2016 The Draco Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
#ifndef DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_MESH_PREDICTION_SCHEME_CONSTRAINED_MULTI_PARALLELOGRAM_DECODER_H_
#define DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_MESH_PREDICTION_SCHEME_CONSTRAINED_MULTI_PARALLELOGRAM_DECODER_H_
#include <algorithm>
#include <cmath>
#include "draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_constrained_multi_parallelogram_shared.h"
#include "draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_decoder.h"
#include "draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_parallelogram_shared.h"
#include "draco/compression/bit_coders/rans_bit_decoder.h"
#include "draco/core/math_utils.h"
#include "draco/core/varint_decoding.h"
#include "draco/draco_features.h"
namespace draco {
// Decoder for predictions encoded with the constrained multi-parallelogram
// encoder. See the corresponding encoder for more details about the prediction
// method.
template <typename DataTypeT, class TransformT, class MeshDataT>
class MeshPredictionSchemeConstrainedMultiParallelogramDecoder
: public MeshPredictionSchemeDecoder<DataTypeT, TransformT, MeshDataT> {
public:
using CorrType =
typename PredictionSchemeDecoder<DataTypeT, TransformT>::CorrType;
using CornerTable = typename MeshDataT::CornerTable;
explicit MeshPredictionSchemeConstrainedMultiParallelogramDecoder(
const PointAttribute *attribute)
: MeshPredictionSchemeDecoder<DataTypeT, TransformT, MeshDataT>(
attribute),
selected_mode_(Mode::OPTIMAL_MULTI_PARALLELOGRAM) {}
MeshPredictionSchemeConstrainedMultiParallelogramDecoder(
const PointAttribute *attribute, const TransformT &transform,
const MeshDataT &mesh_data)
: MeshPredictionSchemeDecoder<DataTypeT, TransformT, MeshDataT>(
attribute, transform, mesh_data),
selected_mode_(Mode::OPTIMAL_MULTI_PARALLELOGRAM) {}
bool ComputeOriginalValues(const CorrType *in_corr, DataTypeT *out_data,
int size, int num_components,
const PointIndex *entry_to_point_id_map) override;
bool DecodePredictionData(DecoderBuffer *buffer) override;
PredictionSchemeMethod GetPredictionMethod() const override {
return MESH_PREDICTION_CONSTRAINED_MULTI_PARALLELOGRAM;
}
bool IsInitialized() const override {
return this->mesh_data().IsInitialized();
}
private:
typedef constrained_multi_parallelogram::Mode Mode;
static constexpr int kMaxNumParallelograms =
constrained_multi_parallelogram::kMaxNumParallelograms;
// Crease edges are used to store whether any given edge should be used for
// parallelogram prediction or not. New values are added in the order in which
// the edges are processed. For better compression, the flags are stored in
// in separate contexts based on the number of available parallelograms at a
// given vertex.
std::vector<bool> is_crease_edge_[kMaxNumParallelograms];
Mode selected_mode_;
};
template <typename DataTypeT, class TransformT, class MeshDataT>
bool MeshPredictionSchemeConstrainedMultiParallelogramDecoder<
DataTypeT, TransformT, MeshDataT>::
ComputeOriginalValues(const CorrType *in_corr, DataTypeT *out_data,
int /* size */, int num_components,
const PointIndex * /* entry_to_point_id_map */) {
this->transform().Init(num_components);
// Predicted values for all simple parallelograms encountered at any given
// vertex.
std::vector<DataTypeT> pred_vals[kMaxNumParallelograms];
for (int i = 0; i < kMaxNumParallelograms; ++i) {
pred_vals[i].resize(num_components, 0);
}
this->transform().ComputeOriginalValue(pred_vals[0].data(), in_corr,
out_data);
const CornerTable *const table = this->mesh_data().corner_table();
const std::vector<int32_t> *const vertex_to_data_map =
this->mesh_data().vertex_to_data_map();
// Current position in the |is_crease_edge_| array for each context.
std::vector<int> is_crease_edge_pos(kMaxNumParallelograms, 0);
// Used to store predicted value for multi-parallelogram prediction.
std::vector<DataTypeT> multi_pred_vals(num_components);
const int corner_map_size =
static_cast<int>(this->mesh_data().data_to_corner_map()->size());
for (int p = 1; p < corner_map_size; ++p) {
const CornerIndex start_corner_id =
this->mesh_data().data_to_corner_map()->at(p);
CornerIndex corner_id(start_corner_id);
int num_parallelograms = 0;
bool first_pass = true;
while (corner_id != kInvalidCornerIndex) {
if (ComputeParallelogramPrediction(
p, corner_id, table, *vertex_to_data_map, out_data,
num_components, &(pred_vals[num_parallelograms][0]))) {
// Parallelogram prediction applied and stored in
// |pred_vals[num_parallelograms]|
++num_parallelograms;
// Stop processing when we reach the maximum number of allowed
// parallelograms.
if (num_parallelograms == kMaxNumParallelograms) {
break;
}
}
// Proceed to the next corner attached to the vertex. First swing left
// and if we reach a boundary, swing right from the start corner.
if (first_pass) {
corner_id = table->SwingLeft(corner_id);
} else {
corner_id = table->SwingRight(corner_id);
}
if (corner_id == start_corner_id) {
break;
}
if (corner_id == kInvalidCornerIndex && first_pass) {
first_pass = false;
corner_id = table->SwingRight(start_corner_id);
}
}
// Check which of the available parallelograms are actually used and compute
// the final predicted value.
int num_used_parallelograms = 0;
if (num_parallelograms > 0) {
for (int i = 0; i < num_components; ++i) {
multi_pred_vals[i] = 0;
}
// Check which parallelograms are actually used.
for (int i = 0; i < num_parallelograms; ++i) {
const int context = num_parallelograms - 1;
const int pos = is_crease_edge_pos[context]++;
if (is_crease_edge_[context].size() <= pos) {
return false;
}
const bool is_crease = is_crease_edge_[context][pos];
if (!is_crease) {
++num_used_parallelograms;
for (int j = 0; j < num_components; ++j) {
multi_pred_vals[j] =
AddAsUnsigned(multi_pred_vals[j], pred_vals[i][j]);
}
}
}
}
const int dst_offset = p * num_components;
if (num_used_parallelograms == 0) {
// No parallelogram was valid.
// We use the last decoded point as a reference.
const int src_offset = (p - 1) * num_components;
this->transform().ComputeOriginalValue(
out_data + src_offset, in_corr + dst_offset, out_data + dst_offset);
} else {
// Compute the correction from the predicted value.
for (int c = 0; c < num_components; ++c) {
multi_pred_vals[c] /= num_used_parallelograms;
}
this->transform().ComputeOriginalValue(
multi_pred_vals.data(), in_corr + dst_offset, out_data + dst_offset);
}
}
return true;
}
template <typename DataTypeT, class TransformT, class MeshDataT>
bool MeshPredictionSchemeConstrainedMultiParallelogramDecoder<
DataTypeT, TransformT, MeshDataT>::DecodePredictionData(DecoderBuffer
*buffer) {
#ifdef DRACO_BACKWARDS_COMPATIBILITY_SUPPORTED
if (buffer->bitstream_version() < DRACO_BITSTREAM_VERSION(2, 2)) {
// Decode prediction mode.
uint8_t mode;
if (!buffer->Decode(&mode)) {
return false;
}
if (mode != Mode::OPTIMAL_MULTI_PARALLELOGRAM) {
// Unsupported mode.
return false;
}
}
#endif
// Encode selected edges using separate rans bit coder for each context.
for (int i = 0; i < kMaxNumParallelograms; ++i) {
uint32_t num_flags;
if (!DecodeVarint<uint32_t>(&num_flags, buffer)) {
return false;
}
if (num_flags > this->mesh_data().corner_table()->num_corners()) {
return false;
}
if (num_flags > 0) {
is_crease_edge_[i].resize(num_flags);
RAnsBitDecoder decoder;
if (!decoder.StartDecoding(buffer)) {
return false;
}
for (uint32_t j = 0; j < num_flags; ++j) {
is_crease_edge_[i][j] = decoder.DecodeNextBit();
}
decoder.EndDecoding();
}
}
return MeshPredictionSchemeDecoder<DataTypeT, TransformT,
MeshDataT>::DecodePredictionData(buffer);
}
} // namespace draco
#endif // DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_MESH_PREDICTION_SCHEME_CONSTRAINED_MULTI_PARALLELOGRAM_DECODER_H_

View File

@ -0,0 +1,413 @@
// Copyright 2016 The Draco Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
#ifndef DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_MESH_PREDICTION_SCHEME_CONSTRAINED_MULTI_PARALLELOGRAM_ENCODER_H_
#define DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_MESH_PREDICTION_SCHEME_CONSTRAINED_MULTI_PARALLELOGRAM_ENCODER_H_
#include <algorithm>
#include <cmath>
#include "draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_constrained_multi_parallelogram_shared.h"
#include "draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_encoder.h"
#include "draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_parallelogram_shared.h"
#include "draco/compression/bit_coders/rans_bit_encoder.h"
#include "draco/compression/entropy/shannon_entropy.h"
#include "draco/core/varint_encoding.h"
namespace draco {
// Compared to standard multi-parallelogram, constrained multi-parallelogram can
// explicitly select which of the available parallelograms are going to be used
// for the prediction by marking crease edges between two triangles. This
// requires storing extra data, but it allows the predictor to avoid using
// parallelograms that would lead to poor predictions. For improved efficiency,
// our current implementation limits the maximum number of used parallelograms
// to four, which covers >95% of the cases (on average, there are only two
// parallelograms available for any given vertex).
// All bits of the explicitly chosen configuration are stored together in a
// single context chosen by the total number of parallelograms available to
// choose from.
template <typename DataTypeT, class TransformT, class MeshDataT>
class MeshPredictionSchemeConstrainedMultiParallelogramEncoder
: public MeshPredictionSchemeEncoder<DataTypeT, TransformT, MeshDataT> {
public:
using CorrType =
typename PredictionSchemeEncoder<DataTypeT, TransformT>::CorrType;
using CornerTable = typename MeshDataT::CornerTable;
explicit MeshPredictionSchemeConstrainedMultiParallelogramEncoder(
const PointAttribute *attribute)
: MeshPredictionSchemeEncoder<DataTypeT, TransformT, MeshDataT>(
attribute),
selected_mode_(Mode::OPTIMAL_MULTI_PARALLELOGRAM) {}
MeshPredictionSchemeConstrainedMultiParallelogramEncoder(
const PointAttribute *attribute, const TransformT &transform,
const MeshDataT &mesh_data)
: MeshPredictionSchemeEncoder<DataTypeT, TransformT, MeshDataT>(
attribute, transform, mesh_data),
selected_mode_(Mode::OPTIMAL_MULTI_PARALLELOGRAM) {}
bool ComputeCorrectionValues(
const DataTypeT *in_data, CorrType *out_corr, int size,
int num_components, const PointIndex *entry_to_point_id_map) override;
bool EncodePredictionData(EncoderBuffer *buffer) override;
PredictionSchemeMethod GetPredictionMethod() const override {
return MESH_PREDICTION_CONSTRAINED_MULTI_PARALLELOGRAM;
}
bool IsInitialized() const override {
return this->mesh_data().IsInitialized();
}
private:
// Function used to compute number of bits needed to store overhead of the
// predictor. In this case, we consider overhead to be all bits that mark
// whether a parallelogram should be used for prediction or not. The input
// to this method is the total number of parallelograms that were evaluated so
// far(total_parallelogram), and the number of parallelograms we decided to
// use for prediction (total_used_parallelograms).
// Returns number of bits required to store the overhead.
int64_t ComputeOverheadBits(int64_t total_used_parallelograms,
int64_t total_parallelogram) const {
// For now we assume RAns coding for the bits where the total required size
// is directly correlated to the binary entropy of the input stream.
// TODO(ostava): This should be generalized in case we use other binary
// coding scheme.
const double entropy = ComputeBinaryShannonEntropy(
static_cast<uint32_t>(total_parallelogram),
static_cast<uint32_t>(total_used_parallelograms));
// Round up to the nearest full bit.
return static_cast<int64_t>(
ceil(static_cast<double>(total_parallelogram) * entropy));
}
// Struct that contains data used for measuring the error of each available
// parallelogram configuration.
struct Error {
Error() : num_bits(0), residual_error(0) {}
// Primary metric: number of bits required to store the data as a result of
// the selected prediction configuration.
int num_bits;
// Secondary metric: absolute difference of residuals for the given
// configuration.
int residual_error;
bool operator<(const Error &e) const {
if (num_bits < e.num_bits) {
return true;
}
if (num_bits > e.num_bits) {
return false;
}
return residual_error < e.residual_error;
}
};
// Computes error for predicting |predicted_val| instead of |actual_val|.
// Error is computed as the number of bits needed to encode the difference
// between the values.
Error ComputeError(const DataTypeT *predicted_val,
const DataTypeT *actual_val, int *out_residuals,
int num_components) {
Error error;
for (int i = 0; i < num_components; ++i) {
const int dif = (predicted_val[i] - actual_val[i]);
error.residual_error += std::abs(dif);
out_residuals[i] = dif;
// Entropy needs unsigned symbols, so convert the signed difference to an
// unsigned symbol.
entropy_symbols_[i] = ConvertSignedIntToSymbol(dif);
}
// Generate entropy data for case that this configuration was used.
// Note that the entropy stream is NOT updated in this case.
const auto entropy_data =
entropy_tracker_.Peek(entropy_symbols_.data(), num_components);
error.num_bits = entropy_tracker_.GetNumberOfDataBits(entropy_data) +
entropy_tracker_.GetNumberOfRAnsTableBits(entropy_data);
return error;
}
typedef constrained_multi_parallelogram::Mode Mode;
static constexpr int kMaxNumParallelograms =
constrained_multi_parallelogram::kMaxNumParallelograms;
// Crease edges are used to store whether any given edge should be used for
// parallelogram prediction or not. New values are added in the order in which
// the edges are processed. For better compression, the flags are stored in
// in separate contexts based on the number of available parallelograms at a
// given vertex.
// TODO(draco-eng) reconsider std::vector<bool> (performance/space).
std::vector<bool> is_crease_edge_[kMaxNumParallelograms];
Mode selected_mode_;
ShannonEntropyTracker entropy_tracker_;
// Temporary storage for symbols that are fed into the |entropy_stream|.
// Always contains only |num_components| entries.
std::vector<uint32_t> entropy_symbols_;
};
template <typename DataTypeT, class TransformT, class MeshDataT>
bool MeshPredictionSchemeConstrainedMultiParallelogramEncoder<
DataTypeT, TransformT, MeshDataT>::
ComputeCorrectionValues(const DataTypeT *in_data, CorrType *out_corr,
int size, int num_components,
const PointIndex * /* entry_to_point_id_map */) {
this->transform().Init(in_data, size, num_components);
const CornerTable *const table = this->mesh_data().corner_table();
const std::vector<int32_t> *const vertex_to_data_map =
this->mesh_data().vertex_to_data_map();
// Predicted values for all simple parallelograms encountered at any given
// vertex.
std::vector<DataTypeT> pred_vals[kMaxNumParallelograms];
for (int i = 0; i < kMaxNumParallelograms; ++i) {
pred_vals[i].resize(num_components);
}
// Used to store predicted value for various multi-parallelogram predictions
// (combinations of simple parallelogram predictions).
std::vector<DataTypeT> multi_pred_vals(num_components);
entropy_symbols_.resize(num_components);
// Struct for holding data about prediction configuration for different sets
// of used parallelograms.
struct PredictionConfiguration {
PredictionConfiguration()
: error(), configuration(0), num_used_parallelograms(0) {}
Error error;
uint8_t configuration; // Bitfield, 1 use parallelogram, 0 don't use it.
int num_used_parallelograms;
std::vector<DataTypeT> predicted_value;
std::vector<int32_t> residuals;
};
// Bit-field used for computing permutations of excluded edges
// (parallelograms).
bool excluded_parallelograms[kMaxNumParallelograms];
// Data about the number of used parallelogram and total number of available
// parallelogram for each context. Used to compute overhead needed for storing
// the parallelogram choices made by the encoder.
int64_t total_used_parallelograms[kMaxNumParallelograms] = {0};
int64_t total_parallelograms[kMaxNumParallelograms] = {0};
std::vector<int> current_residuals(num_components);
// We start processing the vertices from the end because this prediction uses
// data from previous entries that could be overwritten when an entry is
// processed.
for (int p =
static_cast<int>(this->mesh_data().data_to_corner_map()->size()) - 1;
p > 0; --p) {
const CornerIndex start_corner_id =
this->mesh_data().data_to_corner_map()->at(p);
// Go over all corners attached to the vertex and compute the predicted
// value from the parallelograms defined by their opposite faces.
CornerIndex corner_id(start_corner_id);
int num_parallelograms = 0;
bool first_pass = true;
while (corner_id != kInvalidCornerIndex) {
if (ComputeParallelogramPrediction(
p, corner_id, table, *vertex_to_data_map, in_data, num_components,
&(pred_vals[num_parallelograms][0]))) {
// Parallelogram prediction applied and stored in
// |pred_vals[num_parallelograms]|
++num_parallelograms;
// Stop processing when we reach the maximum number of allowed
// parallelograms.
if (num_parallelograms == kMaxNumParallelograms) {
break;
}
}
// Proceed to the next corner attached to the vertex. First swing left
// and if we reach a boundary, swing right from the start corner.
if (first_pass) {
corner_id = table->SwingLeft(corner_id);
} else {
corner_id = table->SwingRight(corner_id);
}
if (corner_id == start_corner_id) {
break;
}
if (corner_id == kInvalidCornerIndex && first_pass) {
first_pass = false;
corner_id = table->SwingRight(start_corner_id);
}
}
// Offset to the target (destination) vertex.
const int dst_offset = p * num_components;
Error error;
// Compute all prediction errors for all possible configurations of
// available parallelograms.
// Variable for holding the best configuration that has been found so far.
PredictionConfiguration best_prediction;
// Compute delta coding error (configuration when no parallelogram is
// selected).
const int src_offset = (p - 1) * num_components;
error = ComputeError(in_data + src_offset, in_data + dst_offset,
&current_residuals[0], num_components);
if (num_parallelograms > 0) {
total_parallelograms[num_parallelograms - 1] += num_parallelograms;
const int64_t new_overhead_bits =
ComputeOverheadBits(total_used_parallelograms[num_parallelograms - 1],
total_parallelograms[num_parallelograms - 1]);
error.num_bits += new_overhead_bits;
}
best_prediction.error = error;
best_prediction.configuration = 0;
best_prediction.num_used_parallelograms = 0;
best_prediction.predicted_value.assign(
in_data + src_offset, in_data + src_offset + num_components);
best_prediction.residuals.assign(current_residuals.begin(),
current_residuals.end());
// Compute prediction error for different cases of used parallelograms.
for (int num_used_parallelograms = 1;
num_used_parallelograms <= num_parallelograms;
++num_used_parallelograms) {
// Mark all parallelograms as excluded.
std::fill(excluded_parallelograms,
excluded_parallelograms + num_parallelograms, true);
// TODO(draco-eng) maybe this should be another std::fill.
// Mark the first |num_used_parallelograms| as not excluded.
for (int j = 0; j < num_used_parallelograms; ++j) {
excluded_parallelograms[j] = false;
}
// Permute over the excluded edges and compute error for each
// configuration (permutation of excluded parallelograms).
do {
// Reset the multi-parallelogram predicted values.
for (int j = 0; j < num_components; ++j) {
multi_pred_vals[j] = 0;
}
uint8_t configuration = 0;
for (int j = 0; j < num_parallelograms; ++j) {
if (excluded_parallelograms[j]) {
continue;
}
for (int c = 0; c < num_components; ++c) {
multi_pred_vals[c] += pred_vals[j][c];
}
// Set jth bit of the configuration.
configuration |= (1 << j);
}
for (int j = 0; j < num_components; ++j) {
multi_pred_vals[j] /= num_used_parallelograms;
}
error = ComputeError(multi_pred_vals.data(), in_data + dst_offset,
&current_residuals[0], num_components);
const int64_t new_overhead_bits = ComputeOverheadBits(
total_used_parallelograms[num_parallelograms - 1] +
num_used_parallelograms,
total_parallelograms[num_parallelograms - 1]);
// Add overhead bits to the total error.
error.num_bits += new_overhead_bits;
if (error < best_prediction.error) {
best_prediction.error = error;
best_prediction.configuration = configuration;
best_prediction.num_used_parallelograms = num_used_parallelograms;
best_prediction.predicted_value.assign(multi_pred_vals.begin(),
multi_pred_vals.end());
best_prediction.residuals.assign(current_residuals.begin(),
current_residuals.end());
}
} while (
std::next_permutation(excluded_parallelograms,
excluded_parallelograms + num_parallelograms));
}
if (num_parallelograms > 0) {
total_used_parallelograms[num_parallelograms - 1] +=
best_prediction.num_used_parallelograms;
}
// Update the entropy stream by adding selected residuals as symbols to the
// stream.
for (int i = 0; i < num_components; ++i) {
entropy_symbols_[i] =
ConvertSignedIntToSymbol(best_prediction.residuals[i]);
}
entropy_tracker_.Push(entropy_symbols_.data(), num_components);
for (int i = 0; i < num_parallelograms; ++i) {
if ((best_prediction.configuration & (1 << i)) == 0) {
// Parallelogram not used, mark the edge as crease.
is_crease_edge_[num_parallelograms - 1].push_back(true);
} else {
// Parallelogram used. Add it to the predicted value and mark the
// edge as not a crease.
is_crease_edge_[num_parallelograms - 1].push_back(false);
}
}
this->transform().ComputeCorrection(in_data + dst_offset,
best_prediction.predicted_value.data(),
out_corr + dst_offset);
}
// First element is always fixed because it cannot be predicted.
for (int i = 0; i < num_components; ++i) {
pred_vals[0][i] = static_cast<DataTypeT>(0);
}
this->transform().ComputeCorrection(in_data, pred_vals[0].data(), out_corr);
return true;
}
template <typename DataTypeT, class TransformT, class MeshDataT>
bool MeshPredictionSchemeConstrainedMultiParallelogramEncoder<
DataTypeT, TransformT, MeshDataT>::EncodePredictionData(EncoderBuffer
*buffer) {
// Encode selected edges using separate rans bit coder for each context.
for (int i = 0; i < kMaxNumParallelograms; ++i) {
// |i| is the context based on the number of available parallelograms, which
// is always equal to |i + 1|.
const int num_used_parallelograms = i + 1;
EncodeVarint<uint32_t>(is_crease_edge_[i].size(), buffer);
if (is_crease_edge_[i].size()) {
RAnsBitEncoder encoder;
encoder.StartEncoding();
// Encode the crease edge flags in the reverse vertex order that is needed
// by the decoder. Note that for the currently supported mode, each vertex
// has exactly |num_used_parallelograms| edges that need to be encoded.
for (int j = static_cast<int>(is_crease_edge_[i].size()) -
num_used_parallelograms;
j >= 0; j -= num_used_parallelograms) {
// Go over all edges of the current vertex.
for (int k = 0; k < num_used_parallelograms; ++k) {
encoder.EncodeBit(is_crease_edge_[i][j + k]);
}
}
encoder.EndEncoding(buffer);
}
}
return MeshPredictionSchemeEncoder<DataTypeT, TransformT,
MeshDataT>::EncodePredictionData(buffer);
}
} // namespace draco
#endif // DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_MESH_PREDICTION_SCHEME_CONSTRAINED_MULTI_PARALLELOGRAM_ENCODER_H_

View File

@ -0,0 +1,34 @@
// Copyright 2016 The Draco Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
#ifndef DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_MESH_PREDICTION_SCHEME_CONSTRAINED_MULTI_PARALLELOGRAM_SHARED_H_
#define DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_MESH_PREDICTION_SCHEME_CONSTRAINED_MULTI_PARALLELOGRAM_SHARED_H_
namespace draco {
// Data shared between constrained multi-parallelogram encoder and decoder.
namespace constrained_multi_parallelogram {
enum Mode {
// Selects the optimal multi-parallelogram from up to 4 available
// parallelograms.
OPTIMAL_MULTI_PARALLELOGRAM = 0,
};
static constexpr int kMaxNumParallelograms = 4;
} // namespace constrained_multi_parallelogram
} // namespace draco
#endif // DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_MESH_PREDICTION_SCHEME_CONSTRAINED_MULTI_PARALLELOGRAM_SHARED_H_

View File

@ -0,0 +1,72 @@
// Copyright 2016 The Draco Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
#ifndef DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_MESH_PREDICTION_SCHEME_DATA_H_
#define DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_MESH_PREDICTION_SCHEME_DATA_H_
#include "draco/mesh/corner_table.h"
#include "draco/mesh/mesh.h"
namespace draco {
// Class stores data about the connectivity data of the mesh and information
// about how the connectivity was encoded/decoded.
template <class CornerTableT>
class MeshPredictionSchemeData {
public:
typedef CornerTableT CornerTable;
MeshPredictionSchemeData()
: mesh_(nullptr),
corner_table_(nullptr),
vertex_to_data_map_(nullptr),
data_to_corner_map_(nullptr) {}
void Set(const Mesh *mesh, const CornerTable *table,
const std::vector<CornerIndex> *data_to_corner_map,
const std::vector<int32_t> *vertex_to_data_map) {
mesh_ = mesh;
corner_table_ = table;
data_to_corner_map_ = data_to_corner_map;
vertex_to_data_map_ = vertex_to_data_map;
}
const Mesh *mesh() const { return mesh_; }
const CornerTable *corner_table() const { return corner_table_; }
const std::vector<int32_t> *vertex_to_data_map() const {
return vertex_to_data_map_;
}
const std::vector<CornerIndex> *data_to_corner_map() const {
return data_to_corner_map_;
}
bool IsInitialized() const {
return mesh_ != nullptr && corner_table_ != nullptr &&
vertex_to_data_map_ != nullptr && data_to_corner_map_ != nullptr;
}
private:
const Mesh *mesh_;
const CornerTable *corner_table_;
// Mapping between vertices and their encoding order. I.e. when an attribute
// entry on a given vertex was encoded.
const std::vector<int32_t> *vertex_to_data_map_;
// Array that stores which corner was processed when a given attribute entry
// was encoded or decoded.
const std::vector<CornerIndex> *data_to_corner_map_;
};
} // namespace draco
#endif // DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_MESH_PREDICTION_SCHEME_DATA_H_

View File

@ -0,0 +1,46 @@
// Copyright 2016 The Draco Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
#ifndef DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_MESH_PREDICTION_SCHEME_DECODER_H_
#define DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_MESH_PREDICTION_SCHEME_DECODER_H_
#include "draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_data.h"
#include "draco/compression/attributes/prediction_schemes/prediction_scheme_decoder.h"
namespace draco {
// Base class for all mesh prediction scheme decoders that use the mesh
// connectivity data. |MeshDataT| can be any class that provides the same
// interface as the PredictionSchemeMeshData class.
template <typename DataTypeT, class TransformT, class MeshDataT>
class MeshPredictionSchemeDecoder
: public PredictionSchemeDecoder<DataTypeT, TransformT> {
public:
typedef MeshDataT MeshData;
MeshPredictionSchemeDecoder(const PointAttribute *attribute,
const TransformT &transform,
const MeshDataT &mesh_data)
: PredictionSchemeDecoder<DataTypeT, TransformT>(attribute, transform),
mesh_data_(mesh_data) {}
protected:
const MeshData &mesh_data() const { return mesh_data_; }
private:
MeshData mesh_data_;
};
} // namespace draco
#endif // DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_MESH_PREDICTION_SCHEME_DECODER_H_

View File

@ -0,0 +1,46 @@
// Copyright 2016 The Draco Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
#ifndef DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_MESH_PREDICTION_SCHEME_ENCODER_H_
#define DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_MESH_PREDICTION_SCHEME_ENCODER_H_
#include "draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_data.h"
#include "draco/compression/attributes/prediction_schemes/prediction_scheme_encoder.h"
namespace draco {
// Base class for all mesh prediction scheme encoders that use the mesh
// connectivity data. |MeshDataT| can be any class that provides the same
// interface as the PredictionSchemeMeshData class.
template <typename DataTypeT, class TransformT, class MeshDataT>
class MeshPredictionSchemeEncoder
: public PredictionSchemeEncoder<DataTypeT, TransformT> {
public:
typedef MeshDataT MeshData;
MeshPredictionSchemeEncoder(const PointAttribute *attribute,
const TransformT &transform,
const MeshDataT &mesh_data)
: PredictionSchemeEncoder<DataTypeT, TransformT>(attribute, transform),
mesh_data_(mesh_data) {}
protected:
const MeshData &mesh_data() const { return mesh_data_; }
private:
MeshData mesh_data_;
};
} // namespace draco
#endif // DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_MESH_PREDICTION_SCHEME_ENCODER_H_

View File

@ -0,0 +1,176 @@
// Copyright 2017 The Draco Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
#ifndef DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_MESH_PREDICTION_SCHEME_GEOMETRIC_NORMAL_DECODER_H_
#define DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_MESH_PREDICTION_SCHEME_GEOMETRIC_NORMAL_DECODER_H_
#include "draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_decoder.h"
#include "draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_geometric_normal_predictor_area.h"
#include "draco/compression/bit_coders/rans_bit_decoder.h"
#include "draco/draco_features.h"
namespace draco {
// See MeshPredictionSchemeGeometricNormalEncoder for documentation.
template <typename DataTypeT, class TransformT, class MeshDataT>
class MeshPredictionSchemeGeometricNormalDecoder
: public MeshPredictionSchemeDecoder<DataTypeT, TransformT, MeshDataT> {
public:
using CorrType = typename MeshPredictionSchemeDecoder<DataTypeT, TransformT,
MeshDataT>::CorrType;
MeshPredictionSchemeGeometricNormalDecoder(const PointAttribute *attribute,
const TransformT &transform,
const MeshDataT &mesh_data)
: MeshPredictionSchemeDecoder<DataTypeT, TransformT, MeshDataT>(
attribute, transform, mesh_data),
predictor_(mesh_data) {}
private:
MeshPredictionSchemeGeometricNormalDecoder() {}
public:
bool ComputeOriginalValues(const CorrType *in_corr, DataTypeT *out_data,
int size, int num_components,
const PointIndex *entry_to_point_id_map) override;
bool DecodePredictionData(DecoderBuffer *buffer) override;
PredictionSchemeMethod GetPredictionMethod() const override {
return MESH_PREDICTION_GEOMETRIC_NORMAL;
}
bool IsInitialized() const override {
if (!predictor_.IsInitialized()) {
return false;
}
if (!this->mesh_data().IsInitialized()) {
return false;
}
if (!octahedron_tool_box_.IsInitialized()) {
return false;
}
return true;
}
int GetNumParentAttributes() const override { return 1; }
GeometryAttribute::Type GetParentAttributeType(int i) const override {
DRACO_DCHECK_EQ(i, 0);
(void)i;
return GeometryAttribute::POSITION;
}
bool SetParentAttribute(const PointAttribute *att) override {
if (att->attribute_type() != GeometryAttribute::POSITION) {
return false; // Invalid attribute type.
}
if (att->num_components() != 3) {
return false; // Currently works only for 3 component positions.
}
predictor_.SetPositionAttribute(*att);
return true;
}
void SetQuantizationBits(int q) {
octahedron_tool_box_.SetQuantizationBits(q);
}
private:
MeshPredictionSchemeGeometricNormalPredictorArea<DataTypeT, TransformT,
MeshDataT>
predictor_;
OctahedronToolBox octahedron_tool_box_;
RAnsBitDecoder flip_normal_bit_decoder_;
};
template <typename DataTypeT, class TransformT, class MeshDataT>
bool MeshPredictionSchemeGeometricNormalDecoder<
DataTypeT, TransformT,
MeshDataT>::ComputeOriginalValues(const CorrType *in_corr,
DataTypeT *out_data, int /* size */,
int num_components,
const PointIndex *entry_to_point_id_map) {
this->SetQuantizationBits(this->transform().quantization_bits());
predictor_.SetEntryToPointIdMap(entry_to_point_id_map);
DRACO_DCHECK(this->IsInitialized());
// Expecting in_data in octahedral coordinates, i.e., portable attribute.
DRACO_DCHECK_EQ(num_components, 2);
const int corner_map_size =
static_cast<int>(this->mesh_data().data_to_corner_map()->size());
VectorD<int32_t, 3> pred_normal_3d;
int32_t pred_normal_oct[2];
for (int data_id = 0; data_id < corner_map_size; ++data_id) {
const CornerIndex corner_id =
this->mesh_data().data_to_corner_map()->at(data_id);
predictor_.ComputePredictedValue(corner_id, pred_normal_3d.data());
// Compute predicted octahedral coordinates.
octahedron_tool_box_.CanonicalizeIntegerVector(pred_normal_3d.data());
DRACO_DCHECK_EQ(pred_normal_3d.AbsSum(),
octahedron_tool_box_.center_value());
if (flip_normal_bit_decoder_.DecodeNextBit()) {
pred_normal_3d = -pred_normal_3d;
}
octahedron_tool_box_.IntegerVectorToQuantizedOctahedralCoords(
pred_normal_3d.data(), pred_normal_oct, pred_normal_oct + 1);
const int data_offset = data_id * 2;
this->transform().ComputeOriginalValue(
pred_normal_oct, in_corr + data_offset, out_data + data_offset);
}
flip_normal_bit_decoder_.EndDecoding();
return true;
}
template <typename DataTypeT, class TransformT, class MeshDataT>
bool MeshPredictionSchemeGeometricNormalDecoder<
DataTypeT, TransformT, MeshDataT>::DecodePredictionData(DecoderBuffer
*buffer) {
// Get data needed for transform
if (!this->transform().DecodeTransformData(buffer)) {
return false;
}
#ifdef DRACO_BACKWARDS_COMPATIBILITY_SUPPORTED
if (buffer->bitstream_version() < DRACO_BITSTREAM_VERSION(2, 2)) {
uint8_t prediction_mode;
if (!buffer->Decode(&prediction_mode)) {
return false;
}
if (prediction_mode > TRIANGLE_AREA) {
// Invalid prediction mode.
return false;
}
if (!predictor_.SetNormalPredictionMode(
NormalPredictionMode(prediction_mode))) {
return false;
}
}
#endif
// Init normal flips.
if (!flip_normal_bit_decoder_.StartDecoding(buffer)) {
return false;
}
return true;
}
} // namespace draco
#endif // DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_MESH_PREDICTION_SCHEME_GEOMETRIC_NORMAL_DECODER_H_

Some files were not shown because too many files have changed in this diff Show More