Skip to content

Commit

Permalink
feat(lz4): update to 1.9.3
Browse files Browse the repository at this point in the history
  • Loading branch information
Spasi committed Mar 25, 2021
1 parent 9f096f6 commit f489247
Show file tree
Hide file tree
Showing 21 changed files with 928 additions and 701 deletions.
1 change: 1 addition & 0 deletions doc/notes/3.3.0.md
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,7 @@ This build includes the following changes:
* Added standard cursors for diagonal and omnidirectional resize/move and operation-not-allowed.
* Added OSMesa native access functions.
- lmdb: Updated to 0.9.28 (up from 0.9.24)
- lz4: Updated to 1.9.3 (up from 1.9.2)
- Nuklear: Updated to 4.04.1 (up from 4.01.0)
- OpenAL Soft: Updated to 1.21.1 (up from 1.19.1)
* Added `AL_SOFT_bformat_ex` extension.
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -773,7 +773,7 @@ $indentation}"""
if (mallocable || members.any { m ->
m.nativeType.let {
(it.mapping === PointerMapping.DATA_POINTER && it is PointerType<*> && (it.elementType !is StructType || m is StructMemberArray)) ||
(it.mapping === PrimitiveMapping.POINTER && m is StructMemberArray && it !is StructType)
(m is StructMemberArray && m.arrayType.elementType.isPointer && m.arrayType.elementType !is StructType)
}
})
println("import org.lwjgl.*;")
Expand Down
42 changes: 26 additions & 16 deletions modules/lwjgl/lz4/src/generated/java/org/lwjgl/util/lz4/LZ4.java
Original file line number Diff line number Diff line change
Expand Up @@ -78,7 +78,7 @@ public class LZ4 {
public static final int
LZ4_VERSION_MAJOR = 1,
LZ4_VERSION_MINOR = 9,
LZ4_VERSION_RELEASE = 2;
LZ4_VERSION_RELEASE = 3;

/** Version number. */
public static final int LZ4_VERSION_NUMBER = (LZ4_VERSION_MAJOR *100*100 + LZ4_VERSION_MINOR *100 + LZ4_VERSION_RELEASE);
Expand All @@ -103,9 +103,9 @@ public class LZ4 {

public static final int LZ4_HASH_SIZE_U32 = (1 << LZ4_HASHLOG);

public static final int LZ4_STREAMSIZE_U64 = (1 << (LZ4_MEMORY_USAGE-3)) + 4 + (Pointer.POINTER_SIZE == 16 ? 4 : 0);
public static final int LZ4_STREAMSIZE = 16416;

public static final int LZ4_STREAMSIZE = (LZ4_STREAMSIZE_U64 * Long.BYTES);
public static final int LZ4_STREAMSIZE_VOIDP = LZ4_STREAMSIZE / Pointer.POINTER_SIZE;

public static final int LZ4_STREAMDECODESIZE_U64 = 4 + (Pointer.POINTER_SIZE == 16 ? 2 : 0);

Expand Down Expand Up @@ -223,7 +223,8 @@ public static int LZ4_COMPRESSBOUND(int isize) {
*
* <p>The larger the acceleration value, the faster the algorithm, but also the lesser the compression. It's a trade-off. It can be fine tuned, with each
* successive value providing roughly +~3% to speed. An acceleration value of "1" is the same as regular {@link #LZ4_compress_default compress_default}. Values &le; 0 will be
* replaced by {@code ACCELERATION_DEFAULT} (currently == 1, see lz4.c).</p>
* replaced by {@code LZ4_ACCELERATION_DEFAULT} (currently == 1, see lz4.c). Values &gt; {@code LZ4_ACCELERATION_MAX} will be replaced by
* {@code LZ4_ACCELERATION_MAX} (currently {@code == 65537}, see lz4.c).</p>
*/
public static int LZ4_compress_fast(@NativeType("char const *") ByteBuffer src, @NativeType("char *") ByteBuffer dst, int acceleration) {
return nLZ4_compress_fast(memAddress(src), memAddress(dst), src.remaining(), dst.remaining(), acceleration);
Expand Down Expand Up @@ -283,19 +284,28 @@ public static int LZ4_compress_destSize(@NativeType("char const *") ByteBuffer s

/**
* Decompresses an LZ4 compressed block, of size {@code srcSize} at position {@code src}, into destination buffer {@code dst} of size {@code dstCapacity}.
* Up to {@code targetOutputSize} bytes will be decoded. The function stops decoding on reaching this objective, which can boost performance when only the
* beginning of a block is required.
*
* <p>Note: this function features 2 parameters, {@code targetOutputSize} and {@code dstCapacity}, and expects {@code targetOutputSize &le; dstCapacity}. It
* effectively stops decoding on reaching {@code targetOutputSize}, so {@code dstCapacity} is kind of redundant. This is because in a previous version of
* this function, decoding operation would not "break" a sequence in the middle. As a consequence, there was no guarantee that decoding would stop at
* exactly {@code targetOutputSize}, it could write more bytes, though only up to {@code dstCapacity}. Some "margin" used to be required for this
* operation to work properly. This is no longer necessary. The function nonetheless keeps its signature, in an effort to not break API.</p>
*
* <p>Up to {@code targetOutputSize} bytes will be decoded. The function stops decoding on reaching this objective. This can be useful to boost performance
* whenever only the beginning of a block is required.</p>
*
* <p>Notes:</p>
*
* <ol>
* <li>result can be &lt; {@code targetOutputSize}, if compressed block contains less data.</li>
* <li>{@code targetOutputSize} must be &le; {@code dstCapacity}</li>
* <li>this function effectively stops decoding on reaching {@code targetOutputSize}, so {@code dstCapacity} is kind of redundant. This is because in
* older versions of this function, decoding operation would still write complete sequences. Therefore, there was no guarantee that it would stop
* writing at exactly {@code targetOutputSize}, it could write more bytes, though only up to {@code dstCapacity}. Some "margin" used to be required
* for this operation to work properly. Thankfully, this is no longer necessary. The function nonetheless keeps the same signature, in an effort to
* preserve API compatibility.</li>
* <li>if {@code srcSize} is the exact size of the block, then {@code targetOutputSize} can be any value, including larger than the block's decompressed
* size. The function will, at most, generate block's decompressed size.</li>
* <li>if {@code srcSize} is <em>larger</em> than block's compressed size, then {@code targetOutputSize} <b>MUST</b> be &le; block's decompressed size.
* Otherwise, <em>silent corruption will occur</em>.</li>
* </ol>
*
* @return the number of bytes decoded in {@code dst} (necessarily &le; {@code dstCapacity}). If source stream is detected malformed, function returns a negative
* result.
*
* <p>Note: can be &lt; {@code targetOutputSize}, if compressed block contains less data.</p>
* @return the number of bytes decoded in {@code dst} (necessarily &le; {@code targetOutputSize}). If source stream is detected malformed, function returns a
* negative result.
*/
public static int LZ4_decompress_safe_partial(@NativeType("char const *") ByteBuffer src, @NativeType("char *") ByteBuffer dst, int targetOutputSize) {
return nLZ4_decompress_safe_partial(memAddress(src), memAddress(dst), src.remaining(), targetOutputSize, dst.remaining());
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -325,15 +325,17 @@ public static long LZ4F_compressBegin(@NativeType("LZ4F_cctx *") long cctx, @Nat
public static native long nLZ4F_compressBound(long srcSize, long prefsPtr);

/**
* Provides minimum {@code dstCapacity} required to guarantee compression success given a {@code srcSize} and preferences, covering worst case scenario.
* Provides minimum {@code dstCapacity} required to guarantee success of {@link #LZ4F_compressUpdate compressUpdate}, given a {@code srcSize} and preferences, for a worst case
* scenario.
*
* <p>Estimation is valid for either {@link #LZ4F_compressUpdate compressUpdate}, {@link #LZ4F_flush flush} or {@link #LZ4F_compressEnd compressEnd}. Estimation includes the possibility that internal buffer might already
* be filled by up to {@code (blockSize-1)} bytes. It also includes frame footer {@code (ending + checksum)}, which would have to be generated by
* {@link #LZ4F_compressEnd compressEnd}. Estimation doesn't include frame header, as it was already generated by {@link #LZ4F_compressBegin compressBegin}.</p>
* <p>When {@code srcSize==0}, {@code LZ4F_compressBound()} provides an upper bound for {@link #LZ4F_flush flush} and {@link #LZ4F_compressEnd compressEnd} instead. Note that the result is only
* valid for a single invocation of {@code LZ4F_compressUpdate()}. When invoking {@code LZ4F_compressUpdate()} multiple times, if the output buffer is
* gradually filled up instead of emptied and re-used from its start, one must check if there is enough remaining capacity before each invocation, using
* {@code LZ4F_compressBound()}.</p>
*
* <p>Result is always the same for a {@code srcSize} and {@code prefsPtr}, so it can be trusted to size reusable buffers.</p>
*
* <p>When {@code srcSize==0}, {@code LZ4F_compressBound()} provides an upper bound for {@link #LZ4F_flush flush} and {@link #LZ4F_compressEnd compressEnd} operations.</p>
* <p>Result is always the same for a {@code srcSize} and {@code prefsPtr}. If automatic flushing is not enabled, includes the possibility that internal
* buffer might already be filled by up to {@code (blockSize-1)} bytes. It also includes frame footer {@code (ending + checksum)}, since it might be
* generated by {@code LZ4F_compressEnd()}. The result doesn't include frame header, as it was already generated by {@code LZ4F_compressBegin()}.</p>
*
* @param prefsPtr optional: when {@code NULL} is provided, preferences will be set to cover worst case scenario
*/
Expand Down Expand Up @@ -508,8 +510,10 @@ public static long LZ4F_getFrameInfo(@NativeType("LZ4F_dctx *") long dctx, @Nati
public static native long nLZ4F_decompress(long dctx, long dstBuffer, long dstSizePtr, long srcBuffer, long srcSizePtr, long dOptPtr);

/**
* Call this function repetitively to regenerate compressed data from {@code srcBuffer}. The function will read up to {@code *srcSizePtr}
* bytes from {@code srcBuffer}, and decompress data into {@code dstBuffer}, of capacity {@code *dstSizePtr}.
* Call this function repetitively to regenerate data compressed in {@code srcBuffer}.
*
* <p>The function requires a valid {@code dctx} state. It will read up to {@code *srcSizePtr} bytes from {@code srcBuffer}, and decompress data into
* {@code dstBuffer}, of capacity {@code *dstSizePtr}.</p>
*
* <p>The nb of bytes consumed from {@code srcBuffer} will be written into {@code *srcSizePtr} (necessarily &le; original value). The number of bytes
* decompressed into {@code dstBuffer} will be written into {@code *dstSizePtr} (necessarily &le; original value).</p>
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -69,9 +69,9 @@ public class LZ4HC {

public static final int LZ4HC_HASH_MASK = (LZ4HC_HASHTABLESIZE - 1);

public static final int LZ4_STREAMHCSIZE = 4 * LZ4HC_HASHTABLESIZE + 2 * LZ4HC_MAXD + 56 + (Pointer.POINTER_SIZE == 16 ? 56 : 0);
public static final int LZ4_STREAMHCSIZE = 262200;

public static final int LZ4_STREAMHCSIZE_SIZET = (LZ4_STREAMHCSIZE / Pointer.POINTER_SIZE);
public static final int LZ4_STREAMHCSIZE_VOIDP = (LZ4_STREAMHCSIZE / Pointer.POINTER_SIZE);

protected LZ4HC() {
throw new UnsupportedOperationException();
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -21,17 +21,17 @@
*
* <pre><code>
* struct LZ4HC_CCtx_internal {
* uint32_t hashTable[LZ4HC_HASHTABLESIZE];
* uint16_t chainTable[LZ4HC_MAXD];
* uint8_t const * {@link #end};
* uint8_t const * {@link #base};
* uint8_t const * {@link #dictBase};
* uint32_t {@link #dictLimit};
* uint32_t {@link #lowLimit};
* uint32_t {@link #nextToUpdate};
* LZ4_u32 hashTable[LZ4HC_HASHTABLESIZE];
* LZ4_u16 chainTable[LZ4HC_MAXD];
* LZ4_byte const * {@link #end};
* LZ4_byte const * {@link #base};
* LZ4_byte const * {@link #dictBase};
* LZ4_u32 {@link #dictLimit};
* LZ4_u32 {@link #lowLimit};
* LZ4_u32 {@link #nextToUpdate};
* short compressionLevel;
* int8_t {@link #favorDecSpeed};
* int8_t {@link #dirty};
* LZ4_i8 {@link #favorDecSpeed};
* LZ4_i8 {@link #dirty};
* {@link LZ4HCCCtxInternal LZ4HC_CCtx_internal} * const dictCtx;
* }</code></pre>
*/
Expand Down Expand Up @@ -106,54 +106,54 @@ public LZ4HCCCtxInternal(ByteBuffer container) {
public int sizeof() { return SIZEOF; }

/** @return a {@link IntBuffer} view of the {@code hashTable} field. */
@NativeType("uint32_t[LZ4HC_HASHTABLESIZE]")
@NativeType("LZ4_u32[LZ4HC_HASHTABLESIZE]")
public IntBuffer hashTable() { return nhashTable(address()); }
/** @return the value at the specified index of the {@code hashTable} field. */
@NativeType("uint32_t")
@NativeType("LZ4_u32")
public int hashTable(int index) { return nhashTable(address(), index); }
/** @return a {@link ShortBuffer} view of the {@code chainTable} field. */
@NativeType("uint16_t[LZ4HC_MAXD]")
@NativeType("LZ4_u16[LZ4HC_MAXD]")
public ShortBuffer chainTable() { return nchainTable(address()); }
/** @return the value at the specified index of the {@code chainTable} field. */
@NativeType("uint16_t")
@NativeType("LZ4_u16")
public short chainTable(int index) { return nchainTable(address(), index); }
/**
* @param capacity the number of elements in the returned buffer
*
* @return next block here to continue on current prefix
*/
@NativeType("uint8_t const *")
@NativeType("LZ4_byte const *")
public ByteBuffer end(int capacity) { return nend(address(), capacity); }
/**
* @param capacity the number of elements in the returned buffer
*
* @return All index relative to this position
*/
@NativeType("uint8_t const *")
@NativeType("LZ4_byte const *")
public ByteBuffer base(int capacity) { return nbase(address(), capacity); }
/**
* @param capacity the number of elements in the returned buffer
*
* @return alternate base for {@code extDict}
*/
@NativeType("uint8_t const *")
@NativeType("LZ4_byte const *")
public ByteBuffer dictBase(int capacity) { return ndictBase(address(), capacity); }
/** below that point, need {@code extDict} */
@NativeType("uint32_t")
@NativeType("LZ4_u32")
public int dictLimit() { return ndictLimit(address()); }
/** below that point, no more {@code dict} */
@NativeType("uint32_t")
@NativeType("LZ4_u32")
public int lowLimit() { return nlowLimit(address()); }
/** index from which to continue dictionary update */
@NativeType("uint32_t")
@NativeType("LZ4_u32")
public int nextToUpdate() { return nnextToUpdate(address()); }
/** @return the value of the {@code compressionLevel} field. */
public short compressionLevel() { return ncompressionLevel(address()); }
/** favor decompression speed if this flag set, otherwise, favor compression ratio */
@NativeType("int8_t")
@NativeType("LZ4_i8")
public byte favorDecSpeed() { return nfavorDecSpeed(address()); }
/** stream has to be fully reset if this flag is set */
@NativeType("int8_t")
@NativeType("LZ4_i8")
public byte dirty() { return ndirty(address()); }
/** @return a {@link LZ4HCCCtxInternal} view of the struct pointed to by the {@code dictCtx} field. */
@NativeType("LZ4HC_CCtx_internal * const")
Expand Down Expand Up @@ -262,54 +262,54 @@ protected LZ4HCCCtxInternal getElementFactory() {
}

/** @return a {@link IntBuffer} view of the {@code hashTable} field. */
@NativeType("uint32_t[LZ4HC_HASHTABLESIZE]")
@NativeType("LZ4_u32[LZ4HC_HASHTABLESIZE]")
public IntBuffer hashTable() { return LZ4HCCCtxInternal.nhashTable(address()); }
/** @return the value at the specified index of the {@code hashTable} field. */
@NativeType("uint32_t")
@NativeType("LZ4_u32")
public int hashTable(int index) { return LZ4HCCCtxInternal.nhashTable(address(), index); }
/** @return a {@link ShortBuffer} view of the {@code chainTable} field. */
@NativeType("uint16_t[LZ4HC_MAXD]")
@NativeType("LZ4_u16[LZ4HC_MAXD]")
public ShortBuffer chainTable() { return LZ4HCCCtxInternal.nchainTable(address()); }
/** @return the value at the specified index of the {@code chainTable} field. */
@NativeType("uint16_t")
@NativeType("LZ4_u16")
public short chainTable(int index) { return LZ4HCCCtxInternal.nchainTable(address(), index); }
/**
* @return a {@link ByteBuffer} view of the data pointed to by the {@link LZ4HCCCtxInternal#end} field.
*
* @param capacity the number of elements in the returned buffer
*/
@NativeType("uint8_t const *")
@NativeType("LZ4_byte const *")
public ByteBuffer end(int capacity) { return LZ4HCCCtxInternal.nend(address(), capacity); }
/**
* @return a {@link ByteBuffer} view of the data pointed to by the {@link LZ4HCCCtxInternal#base} field.
*
* @param capacity the number of elements in the returned buffer
*/
@NativeType("uint8_t const *")
@NativeType("LZ4_byte const *")
public ByteBuffer base(int capacity) { return LZ4HCCCtxInternal.nbase(address(), capacity); }
/**
* @return a {@link ByteBuffer} view of the data pointed to by the {@link LZ4HCCCtxInternal#dictBase} field.
*
* @param capacity the number of elements in the returned buffer
*/
@NativeType("uint8_t const *")
@NativeType("LZ4_byte const *")
public ByteBuffer dictBase(int capacity) { return LZ4HCCCtxInternal.ndictBase(address(), capacity); }
/** @return the value of the {@link LZ4HCCCtxInternal#dictLimit} field. */
@NativeType("uint32_t")
@NativeType("LZ4_u32")
public int dictLimit() { return LZ4HCCCtxInternal.ndictLimit(address()); }
/** @return the value of the {@link LZ4HCCCtxInternal#lowLimit} field. */
@NativeType("uint32_t")
@NativeType("LZ4_u32")
public int lowLimit() { return LZ4HCCCtxInternal.nlowLimit(address()); }
/** @return the value of the {@link LZ4HCCCtxInternal#nextToUpdate} field. */
@NativeType("uint32_t")
@NativeType("LZ4_u32")
public int nextToUpdate() { return LZ4HCCCtxInternal.nnextToUpdate(address()); }
/** @return the value of the {@code compressionLevel} field. */
public short compressionLevel() { return LZ4HCCCtxInternal.ncompressionLevel(address()); }
/** @return the value of the {@link LZ4HCCCtxInternal#favorDecSpeed} field. */
@NativeType("int8_t")
@NativeType("LZ4_i8")
public byte favorDecSpeed() { return LZ4HCCCtxInternal.nfavorDecSpeed(address()); }
/** @return the value of the {@link LZ4HCCCtxInternal#dirty} field. */
@NativeType("int8_t")
@NativeType("LZ4_i8")
public byte dirty() { return LZ4HCCCtxInternal.ndirty(address()); }
/** @return a {@link LZ4HCCCtxInternal} view of the struct pointed to by the {@code dictCtx} field. */
@NativeType("LZ4HC_CCtx_internal * const")
Expand Down
Loading

0 comments on commit f489247

Please sign in to comment.