Skip to content

Commit

Permalink
feat(rpmalloc): update to 1.4.0
Browse files Browse the repository at this point in the history
  • Loading branch information
Spasi committed Aug 17, 2019
1 parent a5d9047 commit f066809
Show file tree
Hide file tree
Showing 16 changed files with 2,056 additions and 767 deletions.
2 changes: 1 addition & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -179,7 +179,7 @@ improve the chances of a quick and useful response.
|[Meow hash](https://github.com/cmuratori/meow_hash)|An extremely fast non-cryptographic hash.|
|[ODBC](https://docs.microsoft.com/en-us/sql/odbc/microsoft-open-database-connectivity-odbc)|A C programming language interface that makes it possible for applications to access data from a variety of database management systems (DBMSs).|
|[Remotery](https://github.com/Celtoys/Remotery)|A realtime CPU/GPU profiler hosted in a single C file with a viewer that runs in a web browser.|
|[rpmalloc](https://github.com/rampantpixels/rpmalloc)|A public domain cross platform lock free thread caching 16-byte aligned memory allocator implemented in C.|
|[rpmalloc](https://github.com/mjansson/rpmalloc)|A public domain cross platform lock free thread caching 16-byte aligned memory allocator implemented in C.|
|[xxHash](https://github.com/Cyan4973/xxHash)|An Extremely fast Hash algorithm, running at RAM speed limits.|
|[Zstandard](http://facebook.github.io/zstd/) (zstd)|A fast lossless compression algorithm, targeting real-time compression scenarios at zlib-level and better compression ratios.|

Expand Down
2 changes: 1 addition & 1 deletion doc/notes/3.1.3.md
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@ This build includes the following changes:

#### Bindings

- Added [rpmalloc](https://github.com/rampantpixels/rpmalloc) bindings.
- Added [rpmalloc](https://github.com/mjansson/rpmalloc) bindings.
* Use `-Dorg.lwjgl.system.allocator=rpmalloc` to make it the default memory allocator.
- Added new extensions to OpenCL, EGL, OpenGL and OpenGL ES.
- Assimp: Updated to 4.0.1 (up from 3.3.1)
Expand Down
1 change: 1 addition & 0 deletions doc/notes/3.2.3.md
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,7 @@ This build includes the following changes:
- par: Updated `par_shapes` to latest version.
* Added `par_shapes_create_cone`.
- par: Added [par_streamlines](https://prideout.net/blog/par_streamlines) bindings.
- rpmalloc: Updated to 1.4.0 (up from 1.3.2)
- stb
* Updated `stb_image` to 2.23 (up from 2.22)
* Updated `stb_truetype` to 1.22 (up from 1.21)
Expand Down
2 changes: 1 addition & 1 deletion doc/notes/full.md
Original file line number Diff line number Diff line change
Expand Up @@ -422,7 +422,7 @@ This build includes the following changes:

#### Bindings

- Added [rpmalloc](https://github.com/rampantpixels/rpmalloc) bindings.
- Added [rpmalloc](https://github.com/mjansson/rpmalloc) bindings.
* Use `-Dorg.lwjgl.system.allocator=rpmalloc` to make it the default memory allocator.
- Added new extensions to OpenCL, EGL, OpenGL and OpenGL ES.
- Assimp: Updated to 4.0.1 (up from 3.3.1)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -520,7 +520,7 @@ enum class Module(
"rpmalloc",
"org.lwjgl.system.rpmalloc",
"""
Contains bindings to the ${url("https://github.com/rampantpixels/rpmalloc", "rpmalloc")} library. rpmalloc is a public domain cross platform lock free
Contains bindings to the ${url("https://github.com/mjansson/rpmalloc", "rpmalloc")} library. rpmalloc is a public domain cross platform lock free
thread caching 16-byte aligned memory allocator implemented in C.
""",
library = JNILibrary.create("LibRPmalloc"),
Expand Down
31 changes: 1 addition & 30 deletions modules/lwjgl/rpmalloc/rpmalloc_license.txt
Original file line number Diff line number Diff line change
Expand Up @@ -21,33 +21,4 @@ OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.

For more information, please refer to <http://unlicense.org>


For a commercial license including support SLA contact Rampant Pixels at
[email protected]

Or, if you so choose, you can also use this software under the MIT license.


The MIT License (MIT)

Copyright (c) 2017 Rampant Pixels AB

Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:

The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.

THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
For more information, please refer to <http://unlicense.org>
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,7 @@
#include "common_tools.h"
DISABLE_WARNINGS()
//#define ENABLE_STATISTICS 1
#define RPMALLOC_CONFIGURABLE 1
#include "rpmalloc.c"
ENABLE_WARNINGS()

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -34,11 +34,15 @@
* the size of memory pages.
*
* <p>The page size MUST be a power of two in {@code [512,16384]} range (2<sup>9</sup> to 2<sup>14</sup>) unless 0 - set to 0 to use system page size. All
* memory mapping requests to {@code memory_map} will be made with size set to a multiple of the page size.</p></li>
* memory mapping requests to {@code memory_map} will be made with size set to a multiple of the page size.</p>
*
* <p>Used if {@code RPMALLOC_CONFIGURABLE} is defined to 1, otherwise system page size is used.</p></li>
* <li>{@code span_size} &ndash;
* size of a span of memory blocks.
*
* <p>MUST be a power of two, and in {@code [4096,262144]} range (unless 0 - set to 0 to use the default span size).</p></li>
* <p>MUST be a power of two, and in {@code [4096,262144]} range (unless 0 - set to 0 to use the default span size).</p>
*
* <p>Used if {@code RPMALLOC_CONFIGURABLE} is defined to 1.</p></li>
* <li>{@code span_map_count} &ndash;
* number of spans to map at each request to map new virtual memory blocks.
*
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@
import static org.lwjgl.system.MemoryUtil.*;

/**
* Native bindings to the <a target="_blank" href="https://github.com/rampantpixels/rpmalloc">rpmalloc</a> library. rpmalloc is a public domain cross platform lock free
* Native bindings to the <a target="_blank" href="https://github.com/mjansson/rpmalloc">rpmalloc</a> library. rpmalloc is a public domain cross platform lock free
* thread caching 16-byte aligned memory allocator implemented in C.
*
* <p>You are required to call these functions from your own code in order to initialize and finalize the allocator in your process and threads:</p>
Expand All @@ -32,6 +32,8 @@
* <p>Then simply use the {@link #rpmalloc malloc}/{@link #rpfree free} and the other malloc style replacement functions. Remember all allocations are 16-byte aligned, so no need to
* call the explicit {@link #rpmemalign memalign}/{@link #rpaligned_alloc aligned_alloc}/{@link #rpposix_memalign posix_memalign} functions unless you need greater alignment, they are simply wrappers to make it
* easier to replace in existing code.</p>
*
* <p>The rpmalloc build in LWJGL is configured with {@code RPMALLOC_CONFIGURABLE=1} and {@code ENABLE_STATISTICS=0}.</p>
*/
public class RPmalloc {

Expand Down Expand Up @@ -247,7 +249,12 @@ public static ByteBuffer rprealloc(@Nullable @NativeType("void *") ByteBuffer pt
/** Unsafe version of: {@link #rpaligned_realloc aligned_realloc} */
public static native long nrpaligned_realloc(long ptr, long alignment, long size, long oldsize, int flags);

/** Reallocates the given block to at least the given size and alignment, with optional control flags (see {@link #RPMALLOC_NO_PRESERVE MALLOC_NO_PRESERVE}) */
/**
* Reallocates the given block to at least the given size and alignment, with optional control flags (see {@link #RPMALLOC_NO_PRESERVE MALLOC_NO_PRESERVE}).
*
* <p>Alignment must be a power of two and a multiple of {@code sizeof(void*)}, and should ideally be less than memory page size. A caveat of rpmalloc
* internals is that this must also be strictly less than the span size (default {@code 64KiB}).</p>
*/
@Nullable
@NativeType("void *")
public static ByteBuffer rpaligned_realloc(@Nullable @NativeType("void *") ByteBuffer ptr, @NativeType("size_t") long alignment, @NativeType("size_t") long size, @NativeType("unsigned int") int flags) {
Expand All @@ -260,7 +267,12 @@ public static ByteBuffer rpaligned_realloc(@Nullable @NativeType("void *") ByteB
/** Unsafe version of: {@link #rpaligned_alloc aligned_alloc} */
public static native long nrpaligned_alloc(long alignment, long size);

/** Allocates a memory block of at least the given size and alignment. */
/**
* Allocates a memory block of at least the given size and alignment.
*
* <p>Alignment must be a power of two and a multiple of {@code sizeof(void*)}, and should ideally be less than memory page size. A caveat of rpmalloc
* internals is that this must also be strictly less than the span size (default {@code 64KiB}).</p>
*/
@Nullable
@NativeType("void *")
public static ByteBuffer rpaligned_alloc(@NativeType("size_t") long alignment, @NativeType("size_t") long size) {
Expand All @@ -273,7 +285,12 @@ public static ByteBuffer rpaligned_alloc(@NativeType("size_t") long alignment, @
/** Unsafe version of: {@link #rpmemalign memalign} */
public static native long nrpmemalign(long alignment, long size);

/** Allocates a memory block of at least the given size and alignment. */
/**
* Allocates a memory block of at least the given size and alignment.
*
* <p>Alignment must be a power of two and a multiple of {@code sizeof(void*)}, and should ideally be less than memory page size. A caveat of rpmalloc
* internals is that this must also be strictly less than the span size (default {@code 64KiB}).</p>
*/
@Nullable
@NativeType("void *")
public static ByteBuffer rpmemalign(@NativeType("size_t") long alignment, @NativeType("size_t") long size) {
Expand All @@ -286,7 +303,12 @@ public static ByteBuffer rpmemalign(@NativeType("size_t") long alignment, @Nativ
/** Unsafe version of: {@link #rpposix_memalign posix_memalign} */
public static native int nrpposix_memalign(long memptr, long alignment, long size);

/** Allocates a memory block of at least the given size and alignment. */
/**
* Allocates a memory block of at least the given size and alignment.
*
* <p>Alignment must be a power of two and a multiple of {@code sizeof(void*)}, and should ideally be less than memory page size. A caveat of rpmalloc
* internals is that this must also be strictly less than the span size (default {@code 64KiB}).</p>
*/
public static int rpposix_memalign(@NativeType("void **") PointerBuffer memptr, @NativeType("size_t") long alignment, @NativeType("size_t") long size) {
if (CHECKS) {
check(memptr, 1);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -19,18 +19,28 @@
* <h3>Member documentation</h3>
*
* <ul>
* <li>{@code mapped} &ndash; Current amount of virtual memory mapped (only if {@code ENABLE_STATISTICS=1})</li>
* <li>{@code cached} &ndash; Current amount of memory in global caches for small and medium sizes (&lt;64KiB)</li>
* <li>{@code mapped_total} &ndash; Total amount of memory mapped (only if {@code ENABLE_STATISTICS=1})</li>
* <li>{@code unmapped_total} &ndash; Total amount of memory unmapped (only if {@code ENABLE_STATISTICS=1})</li>
* <li>{@code mapped} &ndash; Current amount of virtual memory mapped, all of which might not have been committed (only if {@code ENABLE_STATISTICS=1})</li>
* <li>{@code mapped_peak} &ndash; Peak amount of virtual memory mapped, all of which might not have been committed (only if {@code ENABLE_STATISTICS=1})</li>
* <li>{@code cached} &ndash; Current amount of memory in global caches for small and medium sizes (&lt;32KiB)</li>
* <li>{@code huge_alloc} &ndash;
* Current amount of memory allocated in huge allocations, i.e larger than {@code LARGE_SIZE_LIMIT} which is 2MiB by default (only if
* {@code ENABLE_STATISTICS=1})</li>
* <li>{@code huge_alloc_peak} &ndash;
* Peak amount of memory allocated in huge allocations, i.e larger than {@code LARGE_SIZE_LIMIT} which is 2MiB by default (only if
* {@code ENABLE_STATISTICS=1})</li>
* <li>{@code mapped_total} &ndash; Total amount of memory mapped since initialization (only if {@code ENABLE_STATISTICS=1})</li>
* <li>{@code unmapped_total} &ndash; Total amount of memory unmapped since initialization (only if {@code ENABLE_STATISTICS=1})</li>
* </ul>
*
* <h3>Layout</h3>
*
* <pre><code>
* struct rpmalloc_global_statistics_t {
* size_t mapped;
* size_t mapped_peak;
* size_t cached;
* size_t huge_alloc;
* size_t huge_alloc_peak;
* size_t mapped_total;
* size_t unmapped_total;
* }</code></pre>
Expand All @@ -47,12 +57,18 @@ public class RPmallocGlobalStatistics extends Struct implements NativeResource {
/** The struct member offsets. */
public static final int
MAPPED,
MAPPED_PEAK,
CACHED,
HUGE_ALLOC,
HUGE_ALLOC_PEAK,
MAPPED_TOTAL,
UNMAPPED_TOTAL;

static {
Layout layout = __struct(
__member(POINTER_SIZE),
__member(POINTER_SIZE),
__member(POINTER_SIZE),
__member(POINTER_SIZE),
__member(POINTER_SIZE),
__member(POINTER_SIZE),
Expand All @@ -63,9 +79,12 @@ public class RPmallocGlobalStatistics extends Struct implements NativeResource {
ALIGNOF = layout.getAlignment();

MAPPED = layout.offsetof(0);
CACHED = layout.offsetof(1);
MAPPED_TOTAL = layout.offsetof(2);
UNMAPPED_TOTAL = layout.offsetof(3);
MAPPED_PEAK = layout.offsetof(1);
CACHED = layout.offsetof(2);
HUGE_ALLOC = layout.offsetof(3);
HUGE_ALLOC_PEAK = layout.offsetof(4);
MAPPED_TOTAL = layout.offsetof(5);
UNMAPPED_TOTAL = layout.offsetof(6);
}

/**
Expand All @@ -84,9 +103,18 @@ public RPmallocGlobalStatistics(ByteBuffer container) {
/** Returns the value of the {@code mapped} field. */
@NativeType("size_t")
public long mapped() { return nmapped(address()); }
/** Returns the value of the {@code mapped_peak} field. */
@NativeType("size_t")
public long mapped_peak() { return nmapped_peak(address()); }
/** Returns the value of the {@code cached} field. */
@NativeType("size_t")
public long cached() { return ncached(address()); }
/** Returns the value of the {@code huge_alloc} field. */
@NativeType("size_t")
public long huge_alloc() { return nhuge_alloc(address()); }
/** Returns the value of the {@code huge_alloc_peak} field. */
@NativeType("size_t")
public long huge_alloc_peak() { return nhuge_alloc_peak(address()); }
/** Returns the value of the {@code mapped_total} field. */
@NativeType("size_t")
public long mapped_total() { return nmapped_total(address()); }
Expand Down Expand Up @@ -239,8 +267,14 @@ public static RPmallocGlobalStatistics.Buffer callocStack(int capacity, MemorySt

/** Unsafe version of {@link #mapped}. */
public static long nmapped(long struct) { return memGetAddress(struct + RPmallocGlobalStatistics.MAPPED); }
/** Unsafe version of {@link #mapped_peak}. */
public static long nmapped_peak(long struct) { return memGetAddress(struct + RPmallocGlobalStatistics.MAPPED_PEAK); }
/** Unsafe version of {@link #cached}. */
public static long ncached(long struct) { return memGetAddress(struct + RPmallocGlobalStatistics.CACHED); }
/** Unsafe version of {@link #huge_alloc}. */
public static long nhuge_alloc(long struct) { return memGetAddress(struct + RPmallocGlobalStatistics.HUGE_ALLOC); }
/** Unsafe version of {@link #huge_alloc_peak}. */
public static long nhuge_alloc_peak(long struct) { return memGetAddress(struct + RPmallocGlobalStatistics.HUGE_ALLOC_PEAK); }
/** Unsafe version of {@link #mapped_total}. */
public static long nmapped_total(long struct) { return memGetAddress(struct + RPmallocGlobalStatistics.MAPPED_TOTAL); }
/** Unsafe version of {@link #unmapped_total}. */
Expand Down Expand Up @@ -287,9 +321,18 @@ protected RPmallocGlobalStatistics getElementFactory() {
/** Returns the value of the {@code mapped} field. */
@NativeType("size_t")
public long mapped() { return RPmallocGlobalStatistics.nmapped(address()); }
/** Returns the value of the {@code mapped_peak} field. */
@NativeType("size_t")
public long mapped_peak() { return RPmallocGlobalStatistics.nmapped_peak(address()); }
/** Returns the value of the {@code cached} field. */
@NativeType("size_t")
public long cached() { return RPmallocGlobalStatistics.ncached(address()); }
/** Returns the value of the {@code huge_alloc} field. */
@NativeType("size_t")
public long huge_alloc() { return RPmallocGlobalStatistics.nhuge_alloc(address()); }
/** Returns the value of the {@code huge_alloc_peak} field. */
@NativeType("size_t")
public long huge_alloc_peak() { return RPmallocGlobalStatistics.nhuge_alloc_peak(address()); }
/** Returns the value of the {@code mapped_total} field. */
@NativeType("size_t")
public long mapped_total() { return RPmallocGlobalStatistics.nmapped_total(address()); }
Expand Down
Loading

0 comments on commit f066809

Please sign in to comment.