Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
48 commits
Select commit Hold shift + click to select a range
ce06e53
also buffer write output, with same size as for read buffer
datdenkikniet Aug 2, 2023
d43d138
Add counting option to converting, so that all cubes are counted
datdenkikniet Aug 1, 2023
e6fe238
Fix bar & progress for validate
datdenkikniet Aug 4, 2023
eeb6488
(ab)use LEB128 for fixed-width header so we can write the count without
datdenkikniet Aug 4, 2023
a37c83c
Converting now longer needs a counting option, as it is always done
datdenkikniet Aug 5, 2023
7e05d7a
Merge pull request #39 from datdenkikniet/more-buffers
bertie2 Aug 8, 2023
15977da
Memory mapped file API (mapped_file library)
JATothrim Jul 28, 2023
cd076b5
- fixup region::remap() mremap case not saving the correct size.
JATothrim Jul 30, 2023
fc6b410
fixup missing const in struct_region and array_region
JATothrim Jul 30, 2023
b0ff53e
libmappedfile: implement oversized mapped region
JATothrim Jul 31, 2023
adba818
libmappedfile: Provide writeAt() readAt() API
JATothrim Aug 2, 2023
747cca1
libmappedfile: Misc changes
JATothrim Aug 2, 2023
9b8f463
libmapped_file: Make region moveable
JATothrim Aug 9, 2023
774760e
libmappedfile: Implement region::window()
JATothrim Aug 10, 2023
1a5c426
libmapped_file: Tune the memory mapping a bit
JATothrim Aug 10, 2023
574876b
libmappedfile: comment fixups
JATothrim Aug 11, 2023
3d197a1
libmappedfile: Locking and discard work
JATothrim Aug 12, 2023
cf4aeef
OpenCubes/next repository for C++ implementation.
JATothrim Aug 17, 2023
f2b1f8c
Do const safety pass
JATothrim Jul 30, 2023
2a39964
Close the `friend class Workset` trick.
JATothrim Jul 30, 2023
f8a5671
Update newCache to use libmappedfile
JATothrim Jul 30, 2023
f62780e
fixup tests not compiling.
JATothrim Aug 13, 2023
c760944
Make DEBUG_PRINT less noisy
JATothrim Aug 2, 2023
064b973
Hack Cube struct into 8-bytes
JATothrim Aug 10, 2023
ca14b55
Hashy const qualifiers.
JATothrim Aug 11, 2023
74c0dc3
cubes: Refactor thread scheduling
JATothrim Aug 11, 2023
9ea1c7e
CacheWriter class
JATothrim Aug 13, 2023
d850fdb
CacheWriter: Parallel serialization
JATothrim Aug 14, 2023
5e0d245
Remove include/cache.hpp src/cache.cpp
JATothrim Aug 14, 2023
95c6a07
CacheWriter: Fix-up synchronization
JATothrim Aug 14, 2023
9f315e1
Merge branch 'feature/newCachev2' into next
JATothrim Aug 17, 2023
e5a7bce
Update Readme.md to reflect the state of the C++ implementation.
JATothrim Aug 17, 2023
8a9a7b8
Merge branch 'readmeUpdate' into next
JATothrim Aug 17, 2023
9658905
Add build time configure options.
JATothrim Jul 19, 2023
b12bb50
Provide configure option for the Cube struct compaction.
JATothrim Aug 21, 2023
3753655
Hashy refactor
JATothrim Aug 15, 2023
cda5b3a
Hashy refactor: SubsubHashy
JATothrim Aug 15, 2023
e57e436
Hashy refactor: SubHashy
JATothrim Aug 15, 2023
67ae09a
Hashy refactor: Hashy class
JATothrim Aug 15, 2023
b79f161
libmappedfile: Provide standalone I/O operations for file
JATothrim Aug 19, 2023
64278c8
Hashy CubeSwapper
JATothrim Aug 16, 2023
ea70329
CubeSwapper: I/O optimizations
JATothrim Aug 19, 2023
2f86284
CubeStorage: Memory map 2 MiB area at end of the file.
JATothrim Aug 25, 2023
e8b3d69
Merge branch 'feature/cubeSwapSet' into next
JATothrim Aug 25, 2023
55e15f9
Fix-up asserts and debug build.
JATothrim Aug 25, 2023
713b063
CacheReader: Abstract CubeIterator interface
JATothrim Aug 25, 2023
37d51e5
CacheReader: Implement cache file reading one Cube at time
JATothrim Aug 26, 2023
d7a9661
Merge branch 'feature/cacheReader' into next
JATothrim Aug 26, 2023
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
33 changes: 31 additions & 2 deletions cpp/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -3,11 +3,36 @@ project(cubes CXX)

# default to release build because speed maters.
if(NOT CMAKE_BUILD_TYPE)
set(CMAKE_BUILD_TYPE "Release")
set(CMAKE_BUILD_TYPE "Release" CACHE STRING "CMAKE_BUILD_TYPE: Release, Debug or RelWithDebInfo" FORCE)
endif()

if(NOT BUILD_CUBES_MAX_N)
set(BUILD_CUBES_MAX_N 20 CACHE STRING "Limit of maximum N Polycubes to be computed")
endif()

if(NOT CUBES_PACK_CUBE_XYZ_ADDR)
set(CUBES_PACK_CUBE_XYZ_ADDR ON CACHE BOOL "Pack Cube struct XYZ memory address into 56-bit field.")
endif()

# Try extract current HEAD commit-id in git
find_package(Git)
if(GIT_FOUND)
execute_process(
COMMAND ${GIT_EXECUTABLE} rev-list -n1 HEAD
WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}
OUTPUT_STRIP_TRAILING_WHITESPACE
RESULT_VARIABLE RESULT
OUTPUT_VARIABLE CONFIG_GIT_VERSION)
message(STATUS "Set ${CONFIG_GIT_VERSION} to build version info")
endif()

# generate config.hpp header in build directory.
set(CONFIG_IS_READONLY "Warning: this file is overwritten during build. Do not edit.")
configure_file("config.hpp.in" "config.hpp")

include_directories("include")
include_directories("libraries")
include_directories("${PROJECT_BINARY_DIR}")

macro(ConfigureTarget Target)
# Enable C++17
Expand Down Expand Up @@ -38,18 +63,22 @@ macro(ConfigureTarget Target)
)
endmacro()

add_library(mapped_file STATIC "libraries/mapped_file.cpp")
ConfigureTarget(mapped_file)

# Source files
add_library(CubeObjs OBJECT
"src/cubes.cpp"
"src/cache.cpp"
"src/rotations.cpp"
"src/newCache.cpp"
"src/cubeSwapSet.cpp"
)
ConfigureTarget(CubeObjs)

# Build main program
add_executable(${PROJECT_NAME} "program.cpp" $<TARGET_OBJECTS:CubeObjs>)
target_link_libraries(${PROJECT_NAME} pthread)
target_link_libraries(${PROJECT_NAME} mapped_file)
ConfigureTarget(${PROJECT_NAME})

# Optionally build tests
Expand Down
52 changes: 48 additions & 4 deletions cpp/Readme.md
Original file line number Diff line number Diff line change
@@ -1,27 +1,71 @@
# C++ implementation of opencubes
- uses list representation of coordinates with ones
- hashfunction for coordinate is simple concatination of bytes
- can split problem into threads, but performance can be improoved
- can split problem into threads, but performance can be improved

## usage:
```bash
./cubes -n N
```
options:
### options:
```
-n --cube_size
the size of polycube to generate up to
This parameter is required.

-t --threads
the number of threads to use while generating
This parameter is optional. The default value is '1'.

-c --use_cache
whether to load cache files
whether to load cache files.
The last N-1 run must have used -w parameter and that process
must have completed without errors. The cache file
must be present under the cache folder. (-f parameter)
This parameter is optional. The default value is '0'.

-w --write_cache
wheather to save cache files
whether to save cache files
This parameter is optional. The default value is '0'.

-s --split_cache
whether to save separated cache files per output shape.
requires -w parameter to take affect.
No combined cache file is saved when -s is present.
This parameter is optional. The default value is '0'.

-u --use_split_cache
whether to load separated cache files per output shape.
The last N-1 run must have used -s parameter and that process
must have completed without errors. The split cache file(s)
must be present under the cache folder. (-f parameter)
This parameter is optional. The default value is '0'.

-f --cache_file_folder
where to store cache files.
This parameter is optional. The default value is './cache/'.
```

### split cache usage:
Starting with N=9 and beyond it makes sense to use the disk cache system.
To generate starting cache run:
```bash
./cubes -n 9 -w -s
```

Above saves of the results into the cache folder (specified with -f parameter)
as split cache files. Next N=10 run can continue processing from where the last N=9 process stopped:
```bash
./cubes -n 10 -w -s -u
```
The split cache file mode attempts to minimize memory usage.
All following runs can use above command by incrementing the N by one each time.

If required you can merge the split cache files
back into single file at last run by dropping the `-s` parameter.
Merging the split cache this way however uses vastly more memory.
(Tool should be developed to export/merge the split cache files as standard cube format file)

## building (cmake)
To build a release version (with optimisations , default)
```bash
Expand Down
18 changes: 18 additions & 0 deletions cpp/config.hpp.in
Original file line number Diff line number Diff line change
@@ -0,0 +1,18 @@
#pragma once
#ifndef OPENCUBES_CONFIG_HPP
#define OPENCUBES_CONFIG_HPP

// @CONFIG_IS_READONLY@

// Version info embedded into the build
#define CONFIG_VERSION "@CONFIG_GIT_VERSION@"
#define CONFIG_BUILDTYPE "@CMAKE_BUILD_TYPE@"
#define CONFIG_COMPILERID "@CMAKE_CXX_COMPILER_ID@ @CMAKE_CXX_COMPILER_VERSION@"

// Enable Cube struct pointer compaction
#cmakedefine01 CUBES_PACK_CUBE_XYZ_ADDR

// Maximum Polycubes N that may be computed
#define CUBES_MAX_N @BUILD_CUBES_MAX_N@

#endif
35 changes: 0 additions & 35 deletions cpp/include/cache.hpp

This file was deleted.

99 changes: 78 additions & 21 deletions cpp/include/cube.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -3,11 +3,13 @@
#define OPENCUBES_CUBE_HPP

#include <algorithm>
#include <cassert>
#include <cstdint>
#include <memory>
#include <unordered_set>
#include <vector>

#include "config.hpp"
#include "utils.hpp"

struct XYZ {
Expand Down Expand Up @@ -45,20 +47,69 @@ using XYZSet = std::unordered_set<XYZ, HashXYZ, std::equal_to<XYZ>>;

struct Cube {
private:
struct {
// cube memory is stored two ways:
// normal, new'd buffer: is_shared == false
// shared, external memory: is_shared == true
#if CUBES_PACK_CUBE_XYZ_ADDR == 1
struct bits_t {
uint64_t is_shared : 1;
uint64_t size : 7; // MAX 127
uint64_t addr : 56; // low 56-bits of memory address.
};
static_assert(sizeof(bits_t) == sizeof(void *));
#else
struct bits_t {
uint64_t addr;
uint8_t is_shared : 1;
uint8_t size : 7; // MAX 127
} bits;
XYZ *array = nullptr;
};
#endif
// fields
bits_t fields;
// extract the pointer from bits_t
static XYZ *get(bits_t key) {
// pointer bit-hacking:
uint64_t addr = key.addr;
#if CUBES_PACK_CUBE_XYZ_ADDR == 1
// todo: on x86-64 depending if 5-level-paging is enabled
// either 47-bit or 56-bit should be replicated to the high
// part of the address. Don't know how to do this check yet,
// so the high 8-bits is left zeroed.
// If we get segfaults dereferencing get(fields)
// then CUBES_PACK_CUBE_XYZ_ADDR must be disabled.
#endif
return reinterpret_cast<XYZ *>(addr);
}

static_assert(sizeof(bits) == sizeof(uint8_t));
static bits_t put(bool is_shared, int size, XYZ *addr) {
#if CUBES_PACK_CUBE_XYZ_ADDR == 1
// pack the memory address into 56-bits
// on x86-64 it is not used by the hardware (yet).
// This hack actually saves 8 bytes because previously
// the uint8_t caused padding to 16 bytes.
uint64_t tmp = reinterpret_cast<uint64_t>((void *)addr);
assert((tmp & ~0xffffffffffffff) == 0 && "BUG: CUBES_PACK_CUBE_XYZ_ADDR should be disabled");
tmp &= 0xffffffffffffff;
bits_t bits;
bits.addr = tmp;
bits.is_shared = is_shared;
bits.size = size;
return bits;
#else
bits_t bits;
bits.addr = reinterpret_cast<uint64_t>((void *)addr);
bits.is_shared = is_shared;
bits.size = size;
return bits;
#endif
}

public:
// Empty cube
Cube() : bits{0, 0} {}
Cube() : fields{put(0, 0, nullptr)} {}

// Cube with N capacity
explicit Cube(uint8_t N) : bits{0, N}, array(new XYZ[bits.size]) {}
explicit Cube(uint8_t N) : fields{put(0, N, new XYZ[N])} {}

// Construct from pieces
Cube(std::initializer_list<XYZ> il) : Cube(il.size()) { std::copy(il.begin(), il.end(), begin()); }
Expand All @@ -69,20 +120,23 @@ struct Cube {
// Construct from external source.
// Cube shares this the memory until modified.
// Caller guarantees the memory given will live longer than *this
Cube(XYZ *start, uint8_t n) : bits{1, n}, array(start) {}
Cube(const XYZ *start, uint8_t n) : fields{put(1, n, const_cast<XYZ *>(start))} {}

// Copy ctor.
Cube(const Cube &copy) : Cube(copy.size()) { std::copy(copy.begin(), copy.end(), begin()); }

~Cube() {
bits_t bits = fields;
if (!bits.is_shared) {
delete[] array;
delete[] get(bits);
}
}
friend void swap(Cube &a, Cube &b) {
using std::swap;
swap(a.array, b.array);
swap(a.bits, b.bits);
bits_t abits = a.fields;
bits_t bbits = b.fields;
a.fields = bbits;
b.fields = abits;
}

Cube(Cube &&mv) : Cube() { swap(*this, mv); }
Expand All @@ -98,19 +152,11 @@ struct Cube {
return *this;
}

size_t size() const { return bits.size; }
size_t size() const { return fields.size; }

XYZ *data() {
if (bits.is_shared) {
// lift to RAM: this should never happen really.
Cube tmp(array, bits.size);
swap(*this, tmp);
std::printf("Bad use of Cube\n");
}
return array;
}
XYZ *data() { return get(fields); }

const XYZ *data() const { return array; }
const XYZ *data() const { return get(fields); }

XYZ *begin() { return data(); }

Expand Down Expand Up @@ -138,8 +184,19 @@ struct Cube {
void print() const {
for (auto &p : *this) std::printf(" (%2d %2d %2d)\n\r", p.x(), p.y(), p.z());
}

/**
* Copy cube data into destination buffer.
*/
void copyout(int num, XYZ *dest) const {
assert(num <= (signed)size());
std::copy_n(begin(), num, dest);
}
};

#if CUBES_PACK_CUBE_XYZ_ADDR == 1
static_assert(sizeof(Cube) == 8, "Unexpected sizeof(Cube) for Cube");
#endif
static_assert(std::is_move_assignable_v<Cube>, "Cube must be moveable");
static_assert(std::is_swappable_v<Cube>, "Cube must swappable");

Expand Down
Loading