Skip to content

Commit

Permalink
fix: cleanup how mining cache is handled
Browse files Browse the repository at this point in the history
mining cache now tracks sub-chunks stored and assumes difficulty
0 sub-chunks are the size of a full chunk. Also allow user
to specify mining cache size in MiB rather than chunks
  • Loading branch information
JamesPiechota committed Oct 28, 2024
1 parent e32aaa2 commit e03de26
Show file tree
Hide file tree
Showing 10 changed files with 482 additions and 240 deletions.
2 changes: 1 addition & 1 deletion apps/arweave/include/ar_config.hrl
Original file line number Diff line number Diff line change
Expand Up @@ -132,7 +132,7 @@
diff = ?DEFAULT_DIFF,
mining_addr = not_set,
hashing_threads = ?NUM_HASHING_PROCESSES,
mining_server_chunk_cache_size_limit,
mining_cache_size_mb,
packing_cache_size_limit,
data_cache_size_limit,
tx_validators,
Expand Down
21 changes: 15 additions & 6 deletions apps/arweave/src/ar.erl
Original file line number Diff line number Diff line change
Expand Up @@ -165,10 +165,14 @@ show_help() ->
"kept in memory by the syncing processes."},
{"packing_cache_size_limit (num)", "The approximate maximum number of data chunks "
"kept in memory by the packing process."},
{"mining_server_chunk_cache_size_limit (num)", "The mining server will not read "
"new data unless the number of already fetched unprocessed chunks does "
"not exceed this number. When omitted, it is determined based on the "
"number of mining partitions and available RAM."},
{"mining_cache_size_mb (num)", "The total amount of cache "
"(in MiB) allocated to store unprocessed chunks while mining. The mining "
"server will only read new data when there is room in the cache to store "
"more chunks. This cache is subdivided into sub-caches for each mined "
"partition. When omitted, it is determined based on the number of "
"mining partitions."},
{"mining_server_chunk_cache_size_limit (num)", "DEPRECATED. Use "
"mining_cache_size_mb instead."},
{"max_emitters (num)", io_lib:format("The number of transaction propagation "
"processes to spawn. Default is ~B.", [?NUM_EMITTER_PROCESSES])},
{"tx_validators (num)", "Ignored. Set the post_tx key in the semaphores object"
Expand Down Expand Up @@ -466,9 +470,14 @@ parse_cli_args(["data_cache_size_limit", Num | Rest], C) ->
parse_cli_args(["packing_cache_size_limit", Num | Rest], C) ->
parse_cli_args(Rest, C#config{
packing_cache_size_limit = list_to_integer(Num) });
parse_cli_args(["mining_server_chunk_cache_size_limit", Num | Rest], C) ->
parse_cli_args(["mining_cache_size_mb", Num | Rest], C) ->
parse_cli_args(Rest, C#config{
mining_server_chunk_cache_size_limit = list_to_integer(Num) });
mining_cache_size_mb = list_to_integer(Num) });
parse_cli_args(["mining_server_chunk_cache_size_limit", _Num | Rest], C) ->
?LOG_WARNING("Deprecated option found 'mining_server_chunk_cache_size_limit': "
"this option has been removed and is a no-op. Please use mining_cache_size_mb "
"instead.", []),
parse_cli_args(Rest, C#config{ });
parse_cli_args(["max_emitters", Num | Rest], C) ->
parse_cli_args(Rest, C#config{ max_emitters = list_to_integer(Num) });
parse_cli_args(["disk_space", Size | Rest], C) ->
Expand Down
50 changes: 25 additions & 25 deletions apps/arweave/src/ar_block.erl
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,8 @@
validate_proof_size/1, vdf_step_number/1, get_packing/2,
validate_packing_difficulty/2, validate_packing_difficulty/1,
get_max_nonce/1, get_recall_range_size/1, get_recall_byte/3,
get_recall_step_size/1, get_nonces_per_chunk/1, get_sub_chunk_index/2]).
get_sub_chunk_size/1, get_nonces_per_chunk/1, get_nonces_per_recall_range/1,
get_sub_chunk_index/2]).

-include_lib("arweave/include/ar.hrl").
-include_lib("arweave/include/ar_pricing.hrl").
Expand Down Expand Up @@ -552,13 +553,6 @@ validate_packing_difficulty(Height, PackingDifficulty) ->
validate_packing_difficulty(PackingDifficulty) ->
PackingDifficulty >= 0 andalso PackingDifficulty =< ?MAX_PACKING_DIFFICULTY.

get_max_nonce(0) ->
max(0, ?LEGACY_RECALL_RANGE_SIZE div ?DATA_CHUNK_SIZE - 1);
get_max_nonce(PackingDifficulty) when PackingDifficulty >= 1 ->
RecallRangeSize = ?RECALL_RANGE_SIZE div PackingDifficulty,
MaxChunkNumber = max(0, RecallRangeSize div ?DATA_CHUNK_SIZE - 1),
(MaxChunkNumber + 1) * ?COMPOSITE_PACKING_SUB_CHUNK_COUNT - 1.

get_recall_range_size(0) ->
?LEGACY_RECALL_RANGE_SIZE;
get_recall_range_size(PackingDifficulty) ->
Expand All @@ -570,29 +564,35 @@ get_recall_byte(RecallRangeStart, Nonce, _PackingDifficulty) ->
ChunkNumber = Nonce div ?COMPOSITE_PACKING_SUB_CHUNK_COUNT,
RecallRangeStart + ChunkNumber * ?DATA_CHUNK_SIZE.

%% @doc Return the number of bytes - how far each mining nonce increment shifts the
%% recall byte.
get_recall_step_size(PackingDifficulty) ->
case PackingDifficulty >= 1 of
true ->
?COMPOSITE_PACKING_SUB_CHUNK_SIZE;
false ->
?DATA_CHUNK_SIZE
end.
%% @doc Return the number of bytes per sub-chunk. This also drives how far each mining nonce
%% increments the recall byte.
get_sub_chunk_size(0) ->
?DATA_CHUNK_SIZE;
get_sub_chunk_size(_PackingDifficulty) ->
?COMPOSITE_PACKING_SUB_CHUNK_SIZE.

%% @doc Return the number of mining nonces contained in each data chunk.
get_nonces_per_chunk(PackingDifficulty) ->
case PackingDifficulty >= 1 of
true ->
?COMPOSITE_PACKING_SUB_CHUNK_COUNT;
false ->
1
end.
get_nonces_per_chunk(0) ->
1;
get_nonces_per_chunk(_PackingDifficulty) ->
?COMPOSITE_PACKING_SUB_CHUNK_COUNT.

get_nonces_per_recall_range(PackingDifficulty) ->
max(1, get_recall_range_size(PackingDifficulty) div get_sub_chunk_size(PackingDifficulty)).

%% @doc For packing difficulty 0 (aka spora_2_6 packing), there is one nonce per chunk, so
%% the max nonce is the same as the max chunk number. For packing difficulty >= 1 (aka
%% composite packing), there are ?COMPOSITE_PACKING_SUB_CHUNK_COUNT nonces per chunk.
get_max_nonce(PackingDifficulty) ->
%% The max(...) is included mostly for testing, where the recall range can be less than
%% a chunk.
max(get_nonces_per_chunk(PackingDifficulty) - 1,
get_nonces_per_recall_range(PackingDifficulty) - 1).

%% @doc Return the 0-based sub-chunk index the mining nonce is pointing to.
get_sub_chunk_index(0, _Nonce) ->
-1;
get_sub_chunk_index(PackingDifficulty, Nonce) when PackingDifficulty >= 1 ->
get_sub_chunk_index(_PackingDifficulty, Nonce) ->
Nonce rem ?COMPOSITE_PACKING_SUB_CHUNK_COUNT.

%%%===================================================================
Expand Down
13 changes: 10 additions & 3 deletions apps/arweave/src/ar_config.erl
Original file line number Diff line number Diff line change
Expand Up @@ -270,11 +270,18 @@ parse_options([{<<"packing_cache_size_limit">>, Limit} | Rest], Config)
parse_options([{<<"packing_cache_size_limit">>, Limit} | _], _) ->
{error, {bad_type, packing_cache_size_limit, number}, Limit};

parse_options([{<<"mining_cache_size_mb">>, Limit} | Rest], Config)
when is_integer(Limit) ->
parse_options(Rest, Config#config{ mining_cache_size_mb = Limit });
parse_options([{<<"mining_cache_size_mb">>, Limit} | _], _) ->
{error, {bad_type, mining_cache_size_mb, number}, Limit};

parse_options([{<<"mining_server_chunk_cache_size_limit">>, Limit} | Rest], Config)
when is_integer(Limit) ->
parse_options(Rest, Config#config{ mining_server_chunk_cache_size_limit = Limit });
parse_options([{<<"mining_server_chunk_cache_size_limit">>, Limit} | _], _) ->
{error, {bad_type, mining_server_chunk_cache_size_limit, number}, Limit};
?LOG_WARNING("Deprecated option found 'mining_server_chunk_cache_size_limit': "
"this option has been removed and is a no-op. Please use mining_cache_size_mb "
"instead.", []),
parse_options(Rest, Config);

parse_options([{<<"max_emitters">>, Value} | Rest], Config) when is_integer(Value) ->
parse_options(Rest, Config#config{ max_emitters = Value });
Expand Down
Loading

0 comments on commit e03de26

Please sign in to comment.