diff --git a/apps/arweave/src/ar.erl b/apps/arweave/src/ar.erl index 34bce598f..9328251ec 100644 --- a/apps/arweave/src/ar.erl +++ b/apps/arweave/src/ar.erl @@ -42,7 +42,7 @@ show_help() -> fun({Opt, Desc}) -> io:format("\t~s~s~n", [ - string:pad(Opt, 40, trailing, $ ), + string:pad(Opt, 40, trailing, $\s), Desc ] ) @@ -303,7 +303,7 @@ parse_cli_args(["mine" | Rest], C) -> parse_cli_args(["peer", Peer | Rest], C = #config{ peers = Ps }) -> case ar_util:safe_parse_peer(Peer) of {ok, ValidPeer} -> - parse_cli_args(Rest, C#config{ peers = [ValidPeer|Ps] }); + parse_cli_args(Rest, C#config{ peers = [ValidPeer | Ps] }); {error, _} -> io:format("Peer ~p is invalid.~n", [Peer]), parse_cli_args(Rest, C) @@ -328,16 +328,16 @@ parse_cli_args(["local_peer", Peer | Rest], C = #config{ local_peers = Peers }) parse_cli_args(["sync_from_local_peers_only" | Rest], C) -> parse_cli_args(Rest, C#config{ sync_from_local_peers_only = true }); parse_cli_args(["transaction_blacklist", File | Rest], - C = #config{ transaction_blacklist_files = Files } ) -> + C = #config{transaction_blacklist_files = Files}) -> parse_cli_args(Rest, C#config{ transaction_blacklist_files = [File | Files] }); parse_cli_args(["transaction_blacklist_url", URL | Rest], - C = #config{ transaction_blacklist_urls = URLs} ) -> + C = #config{transaction_blacklist_urls = URLs}) -> parse_cli_args(Rest, C#config{ transaction_blacklist_urls = [URL | URLs] }); parse_cli_args(["transaction_whitelist", File | Rest], - C = #config{ transaction_whitelist_files = Files } ) -> + C = #config{transaction_whitelist_files = Files}) -> parse_cli_args(Rest, C#config{ transaction_whitelist_files = [File | Files] }); parse_cli_args(["transaction_whitelist_url", URL | Rest], - C = #config{ transaction_whitelist_urls = URLs} ) -> + C = #config{transaction_whitelist_urls = URLs}) -> parse_cli_args(Rest, C#config{ transaction_whitelist_urls = [URL | URLs] }); parse_cli_args(["port", Port | Rest], C) -> parse_cli_args(Rest, C#config{ port = list_to_integer(Port) }); @@ -415,7 +415,7 @@ parse_cli_args(["start_from_block", H | Rest], C) -> end; parse_cli_args(["start_from_latest_state" | Rest], C) -> parse_cli_args(Rest, C#config{ start_from_latest_state = true }); -parse_cli_args(["init" | Rest], C)-> +parse_cli_args(["init" | Rest], C) -> parse_cli_args(Rest, C#config{ init = true }); parse_cli_args(["internal_api_secret", Secret | Rest], C) when length(Secret) >= ?INTERNAL_API_SECRET_MIN_LEN -> @@ -450,7 +450,7 @@ parse_cli_args(["tx_validators", Num | Rest], C) -> parse_cli_args(Rest, C#config{ tx_validators = list_to_integer(Num) }); parse_cli_args(["post_tx_timeout", Num | Rest], C) -> parse_cli_args(Rest, C#config { post_tx_timeout = list_to_integer(Num) }); -parse_cli_args(["tx_propagation_parallelization", Num|Rest], C) -> +parse_cli_args(["tx_propagation_parallelization", Num | Rest], C) -> parse_cli_args(Rest, C#config{ tx_propagation_parallelization = list_to_integer(Num) }); parse_cli_args(["max_connections", Num | Rest], C) -> parse_cli_args(Rest, C#config{ max_connections = list_to_integer(Num) }); @@ -528,7 +528,7 @@ parse_cli_args(["cm_poll_interval", Num | Rest], C) -> parse_cli_args(["cm_peer", Peer | Rest], C = #config{ cm_peers = Ps }) -> case ar_util:safe_parse_peer(Peer) of {ok, ValidPeer} -> - parse_cli_args(Rest, C#config{ cm_peers = [ValidPeer|Ps] }); + parse_cli_args(Rest, C#config{ cm_peers = [ValidPeer | Ps] }); {error, _} -> io:format("Peer ~p is invalid.~n", [Peer]), parse_cli_args(Rest, C) @@ -557,7 +557,7 @@ start(Config) -> % Set logger to output all levels of logs to the console % when running in a dumb terminal. logger:add_handler(console, logger_std_h, #{level => all}); - _-> + _ -> ok end, case Config#config.init of @@ -595,7 +595,7 @@ start(normal, _Args) -> chars_limit => 16256, max_size => 8128, depth => 256, - template => [time," [",level,"] ",mfa,":",line," ",msg,"\n"] + template => [time, " [", level, "] ", mfa, ":", line, " ", msg, "\n"] }, logger:set_handler_config(default, formatter, {logger_formatter, LoggerFormatterConsole}), logger:set_handler_config(default, level, error), @@ -630,7 +630,7 @@ start(normal, _Args) -> depth => 256, legacy_header => false, single_line => true, - template => [time," [",level,"] ",mfa,":",line," ",msg,"\n"] + template => [time, " [", level, "] ", mfa, ":", line, " ", msg, "\n"] }, logger:set_handler_config(disk_log, formatter, {logger_formatter, LoggerFormatterDisk}), logger:set_application_level(arweave, Level), @@ -814,11 +814,10 @@ commandline_parser_test_() -> Addr = crypto:strong_rand_bytes(32), Tests = [ - {"peer 1.2.3.4 peer 5.6.7.8:9", #config.peers, [{5,6,7,8,9},{1,2,3,4,1984}]}, + {"peer 1.2.3.4 peer 5.6.7.8:9", #config.peers, [{5, 6, 7, 8, 9}, {1, 2, 3, 4, 1984}]}, {"mine", #config.mine, true}, {"port 22", #config.port, 22}, - {"mining_addr " - ++ binary_to_list(ar_util:encode(Addr)), #config.mining_addr, Addr} + {"mining_addr " ++ binary_to_list(ar_util:encode(Addr)), #config.mining_addr, Addr} ], X = string:split(string:join([ L || {L, _, _} <- Tests ], " "), " ", all), C = parse_cli_args(X, #config{}), diff --git a/apps/arweave/src/ar_arql_db.erl b/apps/arweave/src/ar_arql_db.erl index 75578428f..861d6ab85 100644 --- a/apps/arweave/src/ar_arql_db.erl +++ b/apps/arweave/src/ar_arql_db.erl @@ -247,7 +247,9 @@ handle_call({select_txs_by, Opts}, _, #{ conn := Conn } = State) -> {Time, Reply} = timer:tc(fun() -> case sql_fetchall(Conn, SQL, Params, ?DRIVER_TIMEOUT) of Rows when is_list(Rows) -> - lists:map(fun tx_map/1, Rows) + lists:map(fun tx_map/1, Rows); + {error, Reason} -> + {error, Reason} end end), record_query_time(select_txs_by, Time), @@ -259,7 +261,7 @@ handle_call({select_block_by_tx_id, TXID}, _, State) -> {Time, Reply} = timer:tc(fun() -> case ar_sqlite3:step(Stmt, ?DRIVER_TIMEOUT) of {row, Row} -> {ok, block_map(Row)}; - done -> not_found + done -> {error, not_found} end end), ar_sqlite3:reset(Stmt, ?DRIVER_TIMEOUT), @@ -268,10 +270,13 @@ handle_call({select_block_by_tx_id, TXID}, _, State) -> handle_call({select_tags_by_tx_id, TXID}, _, State) -> #{ select_tags_by_tx_id_stmt := Stmt } = State, + ok = ar_sqlite3:bind(Stmt, [TXID], ?DRIVER_TIMEOUT), {Time, Reply} = timer:tc(fun() -> case stmt_fetchall(Stmt, [TXID], ?DRIVER_TIMEOUT) of Rows when is_list(Rows) -> - lists:map(fun tags_map/1, Rows) + {ok, lists:map(fun tags_map/1, Rows)}; + {error, Reason} -> + {error, Reason} end end), record_query_time(select_tags_by_tx_id, Time), @@ -279,20 +284,18 @@ handle_call({select_tags_by_tx_id, TXID}, _, State) -> handle_call({eval_legacy_arql, Query}, _, #{ conn := Conn } = State) -> {Time, {Reply, _SQL, _Params}} = timer:tc(fun() -> - case catch eval_legacy_arql_where_clause(Query) of - {WhereClause, Params} -> - SQL = lists:concat([ - "SELECT tx.id FROM tx ", - "JOIN block ON tx.block_indep_hash = block.indep_hash ", - "WHERE ", WhereClause, - " ORDER BY block.height DESC, tx.id DESC" - ]), - case sql_fetchall(Conn, SQL, Params, ?DRIVER_TIMEOUT) of - Rows when is_list(Rows) -> - {lists:map(fun([TXID]) -> TXID end, Rows), SQL, Params} - end; - bad_query -> - {bad_query, 'n/a', 'n/a'} + try + {WhereClause, Params} = eval_legacy_arql_where_clause(Query), + SQL = lists:concat([ + "SELECT tx.id FROM tx ", + "JOIN block ON tx.block_indep_hash = block.indep_hash ", + "WHERE ", WhereClause, + " ORDER BY block.height DESC, tx.id DESC" + ]), + Rows = sql_fetchall(Conn, SQL, Params, ?DRIVER_TIMEOUT), + {ok, {lists:map(fun([TXID]) -> TXID end, Rows), SQL, Params}} + catch + _:_ -> {error, {bad_query, 'n/a', 'n/a'}} end end), record_query_time(eval_legacy_arql, Time), @@ -551,7 +554,7 @@ eval_legacy_arql_where_clause({equals, Key, Value}) "tx.id IN (SELECT tx_id FROM tag WHERE name = ? and value = ?)", [Key, Value] }; -eval_legacy_arql_where_clause({'and',E1,E2}) -> +eval_legacy_arql_where_clause({'and', E1, E2}) -> {E1WhereClause, E1Params} = eval_legacy_arql_where_clause(E1), {E2WhereClause, E2Params} = eval_legacy_arql_where_clause(E2), { @@ -564,7 +567,7 @@ eval_legacy_arql_where_clause({'and',E1,E2}) -> ]), E1Params ++ E2Params }; -eval_legacy_arql_where_clause({'or',E1,E2}) -> +eval_legacy_arql_where_clause({'or', E1, E2}) -> {E1WhereClause, E1Params} = eval_legacy_arql_where_clause(E1), {E2WhereClause, E2Params} = eval_legacy_arql_where_clause(E2), { diff --git a/apps/arweave/src/ar_arql_middleware.erl b/apps/arweave/src/ar_arql_middleware.erl index ecbd30058..2797781c4 100644 --- a/apps/arweave/src/ar_arql_middleware.erl +++ b/apps/arweave/src/ar_arql_middleware.erl @@ -1,5 +1,5 @@ -module(ar_arql_middleware). --behavior(cowboy_middleware). +-behaviour(cowboy_middleware). -export([execute/2]). diff --git a/apps/arweave/src/ar_base32.erl b/apps/arweave/src/ar_base32.erl index bfb43d690..21c1f6c28 100644 --- a/apps/arweave/src/ar_base32.erl +++ b/apps/arweave/src/ar_base32.erl @@ -62,7 +62,7 @@ encode_binary(<>, A) -> -compile({inline, [{b32e, 1}]}). b32e(X) -> - element(X+1, { + element(X + 1, { $a, $b, $c, $d, $e, $f, $g, $h, $i, $j, $k, $l, $m, $n, $o, $p, $q, $r, $s, $t, $u, $v, $w, $x, $y, $z, $2, $3, $4, $5, $6, $7, $8, $9 diff --git a/apps/arweave/src/ar_bench_packing.erl b/apps/arweave/src/ar_bench_packing.erl index 423fb739c..7b5cbb532 100644 --- a/apps/arweave/src/ar_bench_packing.erl +++ b/apps/arweave/src/ar_bench_packing.erl @@ -71,7 +71,7 @@ run_benchmark(Test, TotalMegaBytes, JIT, LargePages, HardwareAES, VDF) -> erlang:system_time() div 1000000000, Test, TotalMegaBytes, JIT, LargePages, HardwareAES, VDF, Init, Total]), - + file:write(File, Output), io:format("~n"), io:format(Output), @@ -109,7 +109,7 @@ generate_input(TotalMegaBytes, Root, RewardAddress) -> Spora25Filename = spora_2_5_filename(TotalMegaBytes), io:format("~s", [Spora25Filename]), - + {ok, Spora25FileHandle} = file:open(Spora25Filename, [write, binary]), ar_bench_timer:record({wall}, fun dirty_test/4, [ @@ -140,7 +140,7 @@ write_random_data(UnpackedFilename, TotalBytes) -> write_chunks(File, TotalBytes), file:close(File). write_chunks(File, TotalBytes) -> - ChunkSize = 1024*1024, % 1MB + ChunkSize = 1024 * 1024, % 1MB RemainingBytes = TotalBytes, write_chunks_loop(File, RemainingBytes, ChunkSize). write_chunks_loop(_File, 0, _) -> @@ -262,7 +262,7 @@ dirty_test({TotalMegaBytes, _, _, _} = Permutation, WorkerFun, Args, NumWorkers) Workers = [spawn_monitor( fun() -> dirty_worker( N, - Permutation, + Permutation, WorkerFun, Args, WorkerSize * (N - 1), @@ -272,7 +272,7 @@ dirty_test({TotalMegaBytes, _, _, _} = Permutation, WorkerFun, Args, NumWorkers) [ receive {'DOWN', Ref, process, Pid, _Result} -> erlang:demonitor(Ref), ok - after + after 60000 -> timeout end || {Pid, Ref} <- Workers ], @@ -299,7 +299,7 @@ baseline_pack_chunks(WorkerID, _, JIT, LargePages, HardwareAES } = Permutation, { - RandomXState, UnpackedFileHandle, PackedFileHandle, + RandomXState, UnpackedFileHandle, PackedFileHandle, Root, RewardAddress, PackingType } = Args, Offset, Size) -> @@ -318,7 +318,7 @@ baseline_pack_chunks(WorkerID, io:format("Error reading file: ~p~n", [Reason]), 0 end, - baseline_pack_chunks(WorkerID, Permutation, Args, Offset+ChunkSize, RemainingSize). + baseline_pack_chunks(WorkerID, Permutation, Args, Offset + ChunkSize, RemainingSize). %% -------------------------------------------------------------------------------------------- %% Baseline Repacking Test @@ -345,7 +345,7 @@ baseline_repack_chunks(WorkerID, JIT, LargePages, HardwareAES), {ok, RepackedChunk} =ar_mine_randomx:randomx_encrypt_chunk_nif( RandomXState, RepackKey, UnpackedChunk, RepackingRounds, - JIT, LargePages, HardwareAES), + JIT, LargePages, HardwareAES), file:pwrite(RepackedFileHandle, Offset, RepackedChunk), (Size - ChunkSize); eof -> @@ -354,7 +354,7 @@ baseline_repack_chunks(WorkerID, io:format("Error reading file: ~p~n", [Reason]), 0 end, - baseline_repack_chunks(WorkerID, Permutation, Args, Offset+ChunkSize, RemainingSize). + baseline_repack_chunks(WorkerID, Permutation, Args, Offset + ChunkSize, RemainingSize). %% -------------------------------------------------------------------------------------------- %% NIF Repacking Test @@ -387,7 +387,7 @@ nif_repack_chunks(WorkerID, io:format("Error reading file: ~p~n", [Reason]), 0 end, - nif_repack_chunks(WorkerID, Permutation, Args, Offset+ChunkSize, RemainingSize). + nif_repack_chunks(WorkerID, Permutation, Args, Offset + ChunkSize, RemainingSize). %% -------------------------------------------------------------------------------------------- %% Helpers diff --git a/apps/arweave/src/ar_bench_timer.erl b/apps/arweave/src/ar_bench_timer.erl index 696a54bf3..0270c0197 100644 --- a/apps/arweave/src/ar_bench_timer.erl +++ b/apps/arweave/src/ar_bench_timer.erl @@ -62,7 +62,7 @@ get_avg(Times) when is_list(Times) -> end; get_avg(Key) -> get_avg(get_times(Key)). - + get_times(Key) -> [Match || [Match] <- ets:match(total_time, {{Key, '_'}, '$1'})]. get_timing_keys() -> diff --git a/apps/arweave/src/ar_blacklist_middleware.erl b/apps/arweave/src/ar_blacklist_middleware.erl index 15a90a611..716f83aa4 100644 --- a/apps/arweave/src/ar_blacklist_middleware.erl +++ b/apps/arweave/src/ar_blacklist_middleware.erl @@ -101,13 +101,13 @@ reset_rate_limit(TableID, IPAddr, Path) -> end. increment_ip_addr(IPAddr, Req) -> - case ets:whereis(?MODULE) of + case ets:whereis(?MODULE) of undefined -> pass; _ -> update_ip_addr(IPAddr, Req, 1) end. decrement_ip_addr(IPAddr, Req) -> - case ets:whereis(?MODULE) of + case ets:whereis(?MODULE) of undefined -> pass; _ -> update_ip_addr(IPAddr, Req, -1) end. diff --git a/apps/arweave/src/ar_block.erl b/apps/arweave/src/ar_block.erl index 4b5577a7c..621df7cde 100644 --- a/apps/arweave/src/ar_block.erl +++ b/apps/arweave/src/ar_block.erl @@ -52,8 +52,7 @@ block_field_size_limit(B) -> Check = (byte_size(B#block.nonce) =< 512) and (byte_size(B#block.previous_block) =< 48) and (byte_size(integer_to_binary(B#block.timestamp)) =< ?TIMESTAMP_FIELD_SIZE_LIMIT) and - (byte_size(integer_to_binary(B#block.last_retarget)) - =< ?TIMESTAMP_FIELD_SIZE_LIMIT) and + (byte_size(integer_to_binary(B#block.last_retarget)) =< ?TIMESTAMP_FIELD_SIZE_LIMIT) and (byte_size(integer_to_binary(B#block.diff)) =< DiffBytesLimit) and (byte_size(integer_to_binary(B#block.height)) =< 20) and (byte_size(B#block.hash) =< 48) and @@ -249,9 +248,8 @@ compute_next_vdf_difficulty(PrevB) -> {0, 0}, HistoryPart ), - NewVDFDifficulty = - (VDFIntervalTotal * VDFDifficulty) div IntervalTotal, - EMAVDFDifficulty = (9*VDFDifficulty + NewVDFDifficulty) div 10, + NewVDFDifficulty = (VDFIntervalTotal * VDFDifficulty) div IntervalTotal, + EMAVDFDifficulty = (9 * VDFDifficulty + NewVDFDifficulty) div 10, ?LOG_DEBUG([{event, vdf_difficulty_retarget}, {height, Height}, {old_vdf_difficulty, VDFDifficulty}, @@ -484,8 +482,7 @@ generate_block_data_segment_base(B) -> integer_to_binary(ScheduledRateDividend), integer_to_binary(ScheduledRateDivisor), integer_to_binary(B#block.packing_2_5_threshold), - integer_to_binary(B#block.strict_data_split_threshold) - | Props + integer_to_binary(B#block.strict_data_split_threshold) | Props ]; false -> Props @@ -514,8 +511,8 @@ generate_block_data_segment_base(B) -> %% of the two recall ranges. get_recall_range(H0, PartitionNumber, PartitionUpperBound) -> RecallRange1Offset = binary:decode_unsigned(binary:part(H0, 0, 8), big), - RecallRange1Start = PartitionNumber * ?PARTITION_SIZE - + RecallRange1Offset rem min(?PARTITION_SIZE, PartitionUpperBound), + RecallRange1Start = PartitionNumber * ?PARTITION_SIZE + + RecallRange1Offset rem min(?PARTITION_SIZE, PartitionUpperBound), RecallRange2Start = binary:decode_unsigned(H0, big) rem PartitionUpperBound, {RecallRange1Start, RecallRange2Start}. @@ -584,8 +581,7 @@ generate_size_tagged_list_from_txs(TXs, Height) -> End = Pos + DataSize, case Height >= ar_fork:height_2_5() of true -> - Padding = ar_tx:get_weave_size_increase(DataSize, Height) - - DataSize, + Padding = ar_tx:get_weave_size_increase(DataSize, Height) - DataSize, %% Encode the padding information in the Merkle tree. case Padding > 0 of true -> diff --git a/apps/arweave/src/ar_block_cache.erl b/apps/arweave/src/ar_block_cache.erl index 38baeea6e..135d2154e 100644 --- a/apps/arweave/src/ar_block_cache.erl +++ b/apps/arweave/src/ar_block_cache.erl @@ -181,7 +181,7 @@ add_validated(Tab, B) -> sets:to_list(SolutionSet)), SolutionSet3 = sets:from_list([H | Remaining]), [{_, Set}] = ets:lookup(Tab, links), - [{_, C = {MaxCDiff, _H}}] = ets:lookup(Tab, max_cdiff), + [{_, C = {MaxCDiff, _}}] = ets:lookup(Tab, max_cdiff), insert(Tab, [ {{block, PrevH}, {PrevB, PrevStatus, PrevTimestamp, sets:add_element(H, PrevChildren)}}, @@ -453,8 +453,7 @@ mark_on_chain(Tab, #block{ previous_block = PrevH, indep_hash = H }) -> %% Mark the blocks from the previous main fork as validated, not on-chain. mark_off_chain(Tab, sets:del_element(H, Children)); [{_, {PrevB, validated, Timestamp, Children}}] -> - [{{block, PrevH}, {PrevB, on_chain, Timestamp, Children}} - | mark_on_chain(Tab, PrevB)] + [{{block, PrevH}, {PrevB, on_chain, Timestamp, Children}} | mark_on_chain(Tab, PrevB)] end. mark_off_chain(Tab, Set) -> @@ -462,8 +461,7 @@ mark_off_chain(Tab, Set) -> fun(H, Acc) -> case ets:lookup(Tab, {block, H}) of [{_, {B, on_chain, Timestamp, Children}}] -> - [{{block, H}, {B, validated, Timestamp, Children}} - | mark_off_chain(Tab, Children)]; + [{{block, H}, {B, validated, Timestamp, Children}} | mark_off_chain(Tab, Children)]; _ -> Acc end @@ -479,7 +477,7 @@ remove2(Tab, H) -> ok; [{_, {#block{ hash = SolutionH, height = Height }, _Status, _Timestamp, Children}}] -> %% Don't update the cache here. remove/2 will do it. - delete(Tab, {block, H}, false), + delete(Tab, {block, H}, false), ar_ignore_registry:remove(H), remove_solution(Tab, H, SolutionH), insert(Tab, {links, gb_sets:del_element({Height, H}, Set)}, false), @@ -696,7 +694,7 @@ test_block_cache() -> %% B2_3->B2_2->B2->B1 is no longer and heavier but only B2->B1 are validated. add(bcache_test, B2_3 = on_top(random_block(3), B2_2)), - ?assertMatch({B2_2, [B2], {{not_validated, ExpectedStatus}, _Timestamp}}, + ?assertMatch({B2_2, [B2], {{not_validated, ExpectedStatus}, _}}, get_earliest_not_validated_from_longest_chain(bcache_test)), ?assertException(error, invalid_tip, mark_tip(bcache_test, block_id(B2_3))), assert_longest_chain([B2, B1], 0), @@ -723,7 +721,7 @@ test_block_cache() -> assert_longest_chain([B3, B2, B1], 1), add(bcache_test, B4 = on_top(random_block(5), B3)), - ?assertMatch({B4, [B3, B2], {{not_validated, ExpectedStatus}, _Timestamp}}, + ?assertMatch({B4, [B3, B2], {{not_validated, ExpectedStatus}, _}}, get_earliest_not_validated_from_longest_chain(bcache_test)), assert_longest_chain([B3, B2, B1], 1), @@ -768,7 +766,7 @@ test_block_cache() -> ?assertEqual(not_found, get_by_solution_hash(bcache_test, B4#block.hash, <<>>, 0, 0)), assert_longest_chain([B2_3, B2_2], 0), - + new(bcache_test, B11 = random_block(0)), add(bcache_test, _B12 = on_top(random_block(1), B11)), add_validated(bcache_test, B13 = on_top(random_block(1), B11)), diff --git a/apps/arweave/src/ar_block_index.erl b/apps/arweave/src/ar_block_index.erl index ce482565f..d7f7cebb2 100644 --- a/apps/arweave/src/ar_block_index.erl +++ b/apps/arweave/src/ar_block_index.erl @@ -90,8 +90,9 @@ get_range(Start, End) when Start > End -> get_range(Start, End) -> case catch ets:slot(block_index, Start) of [{{WeaveSize, _Height, H, TXRoot} = Entry}] -> - lists:reverse([{H, WeaveSize, TXRoot} - | get_range2(Start + 1, End, ets:next(block_index, Entry))]); + lists:reverse( + [{H, WeaveSize, TXRoot} | get_range2(Start + 1, End, ets:next(block_index, Entry))] + ); _ -> {error, invalid_start} end. diff --git a/apps/arweave/src/ar_block_pre_validator.erl b/apps/arweave/src/ar_block_pre_validator.erl index ef3a218a7..ec9b23983 100644 --- a/apps/arweave/src/ar_block_pre_validator.erl +++ b/apps/arweave/src/ar_block_pre_validator.erl @@ -87,7 +87,7 @@ handle_cast(pre_validate, #state{ pqueue = Q, size = Size, ip_timestamps = IPTim false -> {{_, {B, PrevB, SolutionResigned, Peer}}, Q2} = gb_sets:take_largest(Q), - BlockSize = byte_size(term_to_binary(B)), + BlockSize = byte_size(term_to_binary(B)), Size2 = Size - BlockSize, case ar_ignore_registry:permanent_member(B#block.indep_hash) of true -> @@ -407,7 +407,7 @@ may_be_report_double_signing(B, B2) -> Proof = {Key, Signature1, CDiff1, PrevCDiff, Preimage1, Signature2, CDiff2, PrevCDiff2, Preimage2}, ?LOG_INFO([{event, report_double_signing}, - {key, ar_util:encode(Key)}, + {key, ar_util:encode(Key)}, {block1, ar_util:encode(B#block.indep_hash)}, {block2, ar_util:encode(B2#block.indep_hash)}, {height1, B#block.height}, {height2, B2#block.height}]), @@ -798,7 +798,7 @@ pre_validate_nonce_limiter(B, PrevB, Peer) -> accept_block(B, Peer, Gossip) -> ar_ignore_registry:add(B#block.indep_hash), - ar_events:send(block, {new, B, + ar_events:send(block, {new, B, #{ source => {peer, Peer}, gossip => Gossip }}), ?LOG_INFO([{event, accepted_block}, {height, B#block.height}, {indep_hash, ar_util:encode(B#block.indep_hash)}]), diff --git a/apps/arweave/src/ar_block_propagation_worker.erl b/apps/arweave/src/ar_block_propagation_worker.erl index 07e54a23d..a4304e8b7 100644 --- a/apps/arweave/src/ar_block_propagation_worker.erl +++ b/apps/arweave/src/ar_block_propagation_worker.erl @@ -75,8 +75,9 @@ handle_cast({send_block2, Peer, SendAnnouncementFun, SendFun, RetryCount, From}, end, From ! {worker_sent_block, self()} end; - _ -> %% 208 (the peer has already received this block) or - %% an unexpected response. + _ -> + %% 208 (the peer has already received this block) or + %% an unexpected response. From ! {worker_sent_block, self()} end, {noreply, State}; diff --git a/apps/arweave/src/ar_bridge.erl b/apps/arweave/src/ar_bridge.erl index e3d2b834d..fedd220e8 100644 --- a/apps/arweave/src/ar_bridge.erl +++ b/apps/arweave/src/ar_bridge.erl @@ -209,8 +209,7 @@ send_to_worker(Peer, {JSON, B}, W) -> recall_byte = B#block.recall_byte, recall_byte2 = B#block.recall_byte2, solution_hash = SolutionH2, - tx_prefixes = [ar_node_worker:tx_id_prefix(ID) - || #tx{ id = ID } <- TXs] }, + tx_prefixes = [ar_node_worker:tx_id_prefix(ID) || #tx{ id = ID } <- TXs] }, ar_http_iface_client:send_block_announcement(Peer, Announcement) end, SendFun = @@ -289,8 +288,7 @@ determine_included_transactions([TXIDOrTX | TXs], [TXIDOrIndex | MissingTXs], In true -> missing; false -> - determine_included_transactions(TXs, MissingTXs, [strip_v2_data(TXIDOrTX) - | Included], N + 1) + determine_included_transactions(TXs, MissingTXs, [strip_v2_data(TXIDOrTX) | Included], N + 1) end; false -> determine_included_transactions(TXs, [TXIDOrIndex | MissingTXs], [TXID | Included], diff --git a/apps/arweave/src/ar_chunk_storage.erl b/apps/arweave/src/ar_chunk_storage.erl index 4c43305d4..bae8ed620 100644 --- a/apps/arweave/src/ar_chunk_storage.erl +++ b/apps/arweave/src/ar_chunk_storage.erl @@ -115,20 +115,19 @@ get_range(Start, Size, StoreID) -> BucketStart = Start2 - (Start2 - IntervalStart) rem ?DATA_CHUNK_SIZE, LeftBorder = ar_util:floor_int(BucketStart, ?CHUNK_GROUP_SIZE), End = Start2 + Size2, - LastBucketStart = (End - 1) - ((End - 1)- IntervalStart) rem ?DATA_CHUNK_SIZE, + LastBucketStart = (End - 1) - ((End - 1) - IntervalStart) rem ?DATA_CHUNK_SIZE, case LastBucketStart >= LeftBorder + ?CHUNK_GROUP_SIZE of false -> ChunkCount = (LastBucketStart - BucketStart) div ?DATA_CHUNK_SIZE + 1, get(Start2, BucketStart, LeftBorder, StoreID, ChunkCount); true -> SizeBeforeBorder = LeftBorder + ?CHUNK_GROUP_SIZE - BucketStart, - ChunkCountBeforeBorder = SizeBeforeBorder div ?DATA_CHUNK_SIZE - + case SizeBeforeBorder rem ?DATA_CHUNK_SIZE of 0 -> 0; _ -> 1 end, + ChunkCountBeforeBorder = SizeBeforeBorder div ?DATA_CHUNK_SIZE + + case SizeBeforeBorder rem ?DATA_CHUNK_SIZE of 0 -> 0; _ -> 1 end, StartAfterBorder = BucketStart + ChunkCountBeforeBorder * ?DATA_CHUNK_SIZE, - SizeAfterBorder = Size2 - ChunkCountBeforeBorder * ?DATA_CHUNK_SIZE - + (Start2 - BucketStart), - get(Start2, BucketStart, LeftBorder, StoreID, ChunkCountBeforeBorder) - ++ get_range(StartAfterBorder, SizeAfterBorder, StoreID) + SizeAfterBorder = Size2 - ChunkCountBeforeBorder * ?DATA_CHUNK_SIZE + (Start2 - BucketStart), + get(Start2, BucketStart, LeftBorder, StoreID, ChunkCountBeforeBorder) ++ + get_range(StartAfterBorder, SizeAfterBorder, StoreID) end; _ -> [] @@ -1011,8 +1010,10 @@ test_cross_file_not_aligned() -> assert_get(C3, 2 * ?CHUNK_GROUP_SIZE - ?DATA_CHUNK_SIZE div 2), ?assertEqual([{2 * ?CHUNK_GROUP_SIZE - ?DATA_CHUNK_SIZE div 2, C3}, {2 * ?CHUNK_GROUP_SIZE + ?DATA_CHUNK_SIZE div 2, C2}], - ar_chunk_storage:get_range(2 * ?CHUNK_GROUP_SIZE - - ?DATA_CHUNK_SIZE div 2 - ?DATA_CHUNK_SIZE, ?DATA_CHUNK_SIZE * 2)), + ar_chunk_storage:get_range( + 2 * ?CHUNK_GROUP_SIZE - ?DATA_CHUNK_SIZE div 2 - ?DATA_CHUNK_SIZE, ?DATA_CHUNK_SIZE * 2 + ) + ), ?assertEqual(not_found, ar_chunk_storage:get(?CHUNK_GROUP_SIZE + 1)), ?assertEqual( not_found, diff --git a/apps/arweave/src/ar_config.erl b/apps/arweave/src/ar_config.erl index b0c0cd444..9d4509d41 100644 --- a/apps/arweave/src/ar_config.erl +++ b/apps/arweave/src/ar_config.erl @@ -169,7 +169,7 @@ parse_options([{<<"no_auto_join">>, false} | Rest], Config) -> parse_options([{<<"no_auto_join">>, Opt} | _], _) -> {error, {bad_type, no_auto_join, boolean}, Opt}; -parse_options([{<<"join_workers">>, N} | Rest], Config) when is_integer(N)-> +parse_options([{<<"join_workers">>, N} | Rest], Config) when is_integer(N) -> parse_options(Rest, Config#config{ join_workers = N }); parse_options([{<<"join_workers">>, Opt} | _], _) -> {error, {bad_type, join_workers, number}, Opt}; @@ -683,7 +683,7 @@ log_config(Config, [Field | Rest], Index, Acc) -> FieldValue end, Line = ?LOG_INFO("~s: ~tp", [atom_to_list(Field), FormattedValue]), - log_config(Config, Rest, Index+1, [Line | Acc]). + log_config(Config, Rest, Index + 1, [Line | Acc]). log_config_value(peers, FieldValue) -> format_peers(FieldValue); diff --git a/apps/arweave/src/ar_coordination.erl b/apps/arweave/src/ar_coordination.erl index 552124067..e99323bbb 100644 --- a/apps/arweave/src/ar_coordination.erl +++ b/apps/arweave/src/ar_coordination.erl @@ -80,7 +80,7 @@ get_peer(PartitionNumber) -> init([]) -> process_flag(trap_exit, true), {ok, Config} = application:get_env(arweave, config), - + %% using timer:apply_after so we can cancel pending timers. This allows us to send the %% h1 batch as soon as it's full instead of waiting for the timeout to expire. {ok, H1BatchTimerRef} = timer:apply_after(?BATCH_TIMEOUT_MS, ?MODULE, send_h1_batch_to_peer, []), @@ -140,7 +140,7 @@ handle_cast({computed_h1, Candidate, Diff}, State) -> h1 = not_set, h2 = not_set, nonce = not_set, - poa2 = not_set, + poa2 = not_set, preimage = not_set, session_ref = not_set }, diff --git a/apps/arweave/src/ar_data_sync.erl b/apps/arweave/src/ar_data_sync.erl index d84d21c1a..c33762770 100644 --- a/apps/arweave/src/ar_data_sync.erl +++ b/apps/arweave/src/ar_data_sync.erl @@ -682,9 +682,10 @@ handle_cast(sync_data, State) -> gen_server:cast(self(), sync_data2), %% Find all storage_modules that might include the target chunks (e.g. neighboring %% storage_modules with an overlap, or unpacked copies used for packing, etc...) - StorageModules = [ar_storage_module:id(Module) - || Module <- ar_storage_module:get_all(RangeStart, RangeEnd), - ar_storage_module:id(Module) /= OriginStoreID], + StorageModules = [ + ar_storage_module:id(Module) || Module <- ar_storage_module:get_all(RangeStart, RangeEnd), + ar_storage_module:id(Module) /= OriginStoreID + ], {noreply, State#sync_data_state{ unsynced_intervals_from_other_storage_modules = Intervals, other_storage_modules_with_unsynced_intervals = StorageModules }}; @@ -1561,9 +1562,7 @@ validate_fetched_chunk(Args) -> get_chunk_seek_offset(Offset) -> case Offset > ?STRICT_DATA_SPLIT_THRESHOLD of true -> - ar_poa:get_padded_offset(Offset, ?STRICT_DATA_SPLIT_THRESHOLD) - - (?DATA_CHUNK_SIZE) - + 1; + ar_poa:get_padded_offset(Offset, ?STRICT_DATA_SPLIT_THRESHOLD) - (?DATA_CHUNK_SIZE) + 1; false -> Offset end. diff --git a/apps/arweave/src/ar_data_sync_worker.erl b/apps/arweave/src/ar_data_sync_worker.erl index 182d88aab..8476b7210 100644 --- a/apps/arweave/src/ar_data_sync_worker.erl +++ b/apps/arweave/src/ar_data_sync_worker.erl @@ -62,7 +62,7 @@ handle_cast({sync_range, Args}, State) -> ok; _ -> gen_server:cast(ar_data_sync_worker_master, {task_completed, - {sync_range, {State#state.name, SyncResult, Args, EndTime-StartTime}}}) + {sync_range, {State#state.name, SyncResult, Args, EndTime - StartTime}}}) end, {noreply, State}; @@ -83,13 +83,13 @@ terminate(Reason, _State) -> read_range({Start, End, _OriginStoreID, _TargetStoreID, _SkipSmall}) when Start >= End -> ok; -read_range({_Start, _End, _OriginStoreID, TargetStoreID, _SkipSmall} = Args) -> +read_range({Start, End, _OriginStoreID, TargetStoreID, _SkipSmall} = Args) -> case ar_data_sync:is_chunk_cache_full() of false -> case ar_data_sync:is_disk_space_sufficient(TargetStoreID) of true -> ?LOG_DEBUG([{event, read_range}, - {size, (_End - _Start) / (1024*1024)}, {args, Args}]), + {size, (End - Start) / (1024 * 1024)}, {args, Args}]), read_range2(?READ_RANGE_MESSAGES_PER_BATCH, Args); _ -> ar_util:cast_after(30000, self(), {read_range, Args}), @@ -142,14 +142,14 @@ read_range2(MessagesRemaining, {Start, End, OriginStoreID, TargetStoreID, SkipSm gen_server:cast(list_to_atom("ar_data_sync_" ++ OriginStoreID), {invalidate_bad_data_record, {Start, AbsoluteOffset, ChunksIndex, OriginStoreID, 1}}), - read_range2(MessagesRemaining-1, + read_range2(MessagesRemaining - 1, {Start + ChunkSize, End, OriginStoreID, TargetStoreID, SkipSmall}); {error, Error} -> ?LOG_ERROR([{event, failed_to_read_chunk}, {absolute_end_offset, AbsoluteOffset}, {chunk_data_key, ar_util:encode(ChunkDataKey)}, {reason, io_lib:format("~p", [Error])}]), - read_range2(MessagesRemaining, + read_range2(MessagesRemaining, {Start + ChunkSize, End, OriginStoreID, TargetStoreID, SkipSmall}); {ok, {Chunk, DataPath}} -> case ar_sync_record:is_recorded(AbsoluteOffset, ar_data_sync, @@ -166,9 +166,11 @@ read_range2(MessagesRemaining, {Start, End, OriginStoreID, TargetStoreID, SkipSm Args = {DataRoot, AbsoluteOffset, TXPath, TXRoot, DataPath, Packing, RelativeOffset, ChunkSize, Chunk, UnpackedChunk, TargetStoreID, ChunkDataKey}, - gen_server:cast(list_to_atom("ar_data_sync_" - ++ TargetStoreID), {pack_and_store_chunk, Args}), - read_range2(MessagesRemaining-1, + gen_server:cast( + list_to_atom("ar_data_sync_" ++ TargetStoreID), + {pack_and_store_chunk, Args} + ), + read_range2(MessagesRemaining - 1, {Start + ChunkSize, End, OriginStoreID, TargetStoreID, SkipSmall}); Reply -> ?LOG_ERROR([{event, chunk_record_not_found}, @@ -219,7 +221,7 @@ sync_range({Start, End, Peer, TargetStoreID, RetryCount} = Args) -> ok; false -> case ar_http_iface_client:get_chunk_binary(Peer, Start2, any) of - {ok, #{ chunk := Chunk } = Proof, Time, TransferSize} -> + {ok, #{ chunk := Chunk } = Proof, _Time, _TransferSize} -> %% In case we fetched a packed small chunk, %% we may potentially skip some chunks by %% continuing with Start2 + byte_size(Chunk) - the skip diff --git a/apps/arweave/src/ar_data_sync_worker_master.erl b/apps/arweave/src/ar_data_sync_worker_master.erl index fd7b238c0..342f38cd9 100644 --- a/apps/arweave/src/ar_data_sync_worker_master.erl +++ b/apps/arweave/src/ar_data_sync_worker_master.erl @@ -58,7 +58,7 @@ ready_for_work() -> try gen_server:call(?MODULE, ready_for_work, 1000) catch - exit:{timeout,_} -> + exit:{timeout, _} -> false end. @@ -112,7 +112,7 @@ handle_cast({task_completed, {sync_range, {Worker, Result, Args, ElapsedNative}} State2 = update_scheduled_task_count(Worker, sync_range, ar_util:format_peer(Peer), -1, State), PeerTasks = get_peer_tasks(Peer, State2), {PeerTasks2, State3} = complete_sync_range(PeerTasks, Result, ElapsedNative, DataSize, State2), - {PeerTasks3, State4} = process_peer_queue(PeerTasks2, State3), + {PeerTasks3, State4} = process_peer_queue(PeerTasks2, State3), {noreply, set_peer_tasks(PeerTasks3, State4)}; handle_cast(rebalance_peers, State) -> @@ -164,7 +164,7 @@ process_main_queue(State) -> push_main_task(Task, Args, State) -> enqueue_main_task(Task, Args, State, true). -enqueue_main_task(Task, Args, State) -> +enqueue_main_task(Task, Args, State) -> enqueue_main_task(Task, Args, State, false). enqueue_main_task(Task, Args, State, Front) -> TaskQueue = case Front of @@ -249,7 +249,7 @@ cut_peer_queue(MaxQueue, PeerTasks, State) -> {max_queue, MaxQueue}, {tasks_to_cut, TasksToCut}]), {TaskQueue2, _} = queue:split(MaxQueue, TaskQueue), { - PeerTasks#peer_tasks{ + PeerTasks#peer_tasks{ task_queue = TaskQueue2, task_queue_len = queue:len(TaskQueue2) }, update_queued_task_count(sync_range, ar_util:format_peer(Peer), -TasksToCut, State) }; @@ -265,7 +265,7 @@ enqueue_peer_task(PeerTasks, Task, Args) -> dequeue_peer_task(PeerTasks) -> {{value, {Task, Args}}, PeerTaskQueue} = queue:out(PeerTasks#peer_tasks.task_queue), TaskQueueLength = PeerTasks#peer_tasks.task_queue_len - 1, - PeerTasks2 = PeerTasks#peer_tasks{ + PeerTasks2 = PeerTasks#peer_tasks{ task_queue = PeerTaskQueue, task_queue_len = TaskQueueLength }, {PeerTasks2, Task, Args}. @@ -314,7 +314,7 @@ schedule_task(Task, Args, State) -> %% EMA, max_active, peer queue length) %%-------------------------------------------------------------------- complete_sync_range(PeerTasks, Result, ElapsedNative, DataSize, State) -> - PeerTasks2 = PeerTasks#peer_tasks{ + PeerTasks2 = PeerTasks#peer_tasks{ active_count = PeerTasks#peer_tasks.active_count - 1 }, ar_peers:rate_fetched_data( @@ -328,13 +328,13 @@ calculate_targets([], _AllPeerPerformances) -> calculate_targets(Peers, AllPeerPerformances) -> TotalThroughput = lists:foldl( - fun(Peer, Acc) -> + fun(Peer, Acc) -> Performance = maps:get(Peer, AllPeerPerformances, #performance{}), Acc + Performance#performance.current_rating end, 0.0, Peers), - TotalLatency = + TotalLatency = lists:foldl( - fun(Peer, Acc) -> + fun(Peer, Acc) -> Performance = maps:get(Peer, AllPeerPerformances, #performance{}), Acc + Performance#performance.average_latency end, 0.0, Peers), @@ -395,17 +395,17 @@ update_active(PeerTasks, Performance, TotalMaxActive, TargetLatency, State) -> TargetMaxActive = case FasterThanTarget orelse WorkersStarved of true -> %% latency < target, increase max_active. - MaxActive+1; + MaxActive + 1; false -> %% latency > target, decrease max_active - MaxActive-1 + MaxActive - 1 end, %% Can't have more active tasks than workers. WorkerLimitedMaxActive = min(TargetMaxActive, State#state.worker_count), %% Can't have more active tasks than we have active or queued tasks. TaskLimitedMaxActive = min( - WorkerLimitedMaxActive, + WorkerLimitedMaxActive, max(PeerTasks#peer_tasks.active_count, PeerTasks#peer_tasks.task_queue_len) ), %% Can't have less than the minimum. @@ -516,7 +516,7 @@ test_get_worker() -> worker_count = 3, worker_loads = #{worker1 => 3, worker2 => 2, worker3 => 1} }, - %% get_worker will cycle the queue until it finds a worker that has a worker_load =< the + %% get_worker will cycle the queue until it finds a worker that has a worker_load =< the %% average load (i.e. scheduled_task_count / worker_count) {worker2, State1} = get_worker(State0), State2 = update_scheduled_task_count(worker2, sync_range, "localhost", 1, State1), @@ -537,7 +537,7 @@ test_enqueue_main_task() -> StoreID1 = ar_storage_module:id({?PARTITION_SIZE, 1, default}), StoreID2 = ar_storage_module:id({?PARTITION_SIZE, 2, default}), State0 = #state{}, - + State1 = enqueue_main_task(read_range, {0, 100, StoreID1, StoreID2, true}, State0), State2 = enqueue_main_task(sync_range, {0, 100, Peer1, StoreID1}, State1), State3 = push_main_task(sync_range, {100, 200, Peer2, StoreID2}, State2), @@ -566,7 +566,7 @@ test_enqueue_peer_task() -> PeerATasks = #peer_tasks{ peer = PeerA }, PeerBTasks = #peer_tasks{ peer = PeerB }, - + PeerATasks1 = enqueue_peer_task(PeerATasks, sync_range, {0, 100, PeerA, StoreID1}), PeerATasks2 = enqueue_peer_task(PeerATasks1, sync_range, {100, 200, PeerA, StoreID1}), PeerBTasks1 = enqueue_peer_task(PeerBTasks, sync_range, {200, 300, PeerB, StoreID1}), @@ -653,7 +653,7 @@ test_cut_peer_queue() -> queued_task_count = length(TaskQueue), scheduled_task_count = 10 }, - + {PeerTasks1, State1} = cut_peer_queue(200, PeerTasks, State), assert_peer_tasks(TaskQueue, 0, 8, PeerTasks1), ?assertEqual(100, State1#state.queued_task_count), @@ -682,7 +682,7 @@ test_update_active() -> 200, #state{ worker_count = 20 }), ?assertEqual(11, Result1#peer_tasks.max_active), - + Result2 = update_active( #peer_tasks{max_active = 10, active_count = 20, task_queue_len = 30}, #performance{average_latency = 300}, @@ -698,7 +698,7 @@ test_update_active() -> 200, #state{ worker_count = 20 }), ?assertEqual(11, Result3#peer_tasks.max_active), - + Result4 = update_active( #peer_tasks{max_active = 10, active_count = 20, task_queue_len = 30}, #performance{average_latency = 100}, @@ -706,7 +706,7 @@ test_update_active() -> 200, #state{ worker_count = 10 }), ?assertEqual(10, Result4#peer_tasks.max_active), - + Result5 = update_active( #peer_tasks{max_active = 10, active_count = 5, task_queue_len = 10}, #performance{average_latency = 100}, @@ -714,7 +714,7 @@ test_update_active() -> 200, #state{ worker_count = 20 }), ?assertEqual(10, Result5#peer_tasks.max_active), - + Result6 = update_active( #peer_tasks{max_active = 10, active_count = 10, task_queue_len = 5}, #performance{average_latency = 100}, @@ -742,7 +742,7 @@ test_calculate_targets() -> "peer2" => #performance{current_rating = 0, average_latency = 0} }), ?assertEqual({0.0, 0.0}, Result2), - + Result3 = calculate_targets( ["peer1", "peer2"], #{ diff --git a/apps/arweave/src/ar_disk_cache.erl b/apps/arweave/src/ar_disk_cache.erl index 116db01ac..396115e27 100644 --- a/apps/arweave/src/ar_disk_cache.erl +++ b/apps/arweave/src/ar_disk_cache.erl @@ -33,7 +33,7 @@ %%% API %%%=================================================================== -lookup_block_filename(H) when is_binary(H)-> +lookup_block_filename(H) when is_binary(H) -> %% Use the process dictionary to keep the path. PathBlock = case get(ar_disk_cache_path) of @@ -154,7 +154,7 @@ init([]) -> Path, "(.*\\.json$)|(.*\\.bin$)", true, - fun(F,Acc) -> filelib:file_size(F) + Acc end, + fun(F, Acc) -> filelib:file_size(F) + Acc end, 0 ), LimitMax = Config#config.disk_cache_size * 1048576, % MB to Bytes. diff --git a/apps/arweave/src/ar_disksup.erl b/apps/arweave/src/ar_disksup.erl index d1d243c4c..32b2f92ce 100644 --- a/apps/arweave/src/ar_disksup.erl +++ b/apps/arweave/src/ar_disksup.erl @@ -287,13 +287,13 @@ check_disks_solaris(Str) -> %% @private %% @doc Predicate to take a word from the input string until a space or %% a percent '%' sign (the Capacity field is followed by a %) -parse_df_is_not_space($ ) -> false; +parse_df_is_not_space($\s) -> false; parse_df_is_not_space($%) -> false; parse_df_is_not_space(_) -> true. %% @private %% @doc Predicate to take spaces away from string. Stops on a non-space -parse_df_is_space($ ) -> true; +parse_df_is_space($\s) -> true; parse_df_is_space(_) -> false. %% @private @@ -384,7 +384,7 @@ check_disks_susv3(Str) -> check_disks_win32([]) -> []; -check_disks_win32([H|T]) -> +check_disks_win32([H | T]) -> case io_lib:fread("~s~s~d~d~d", H) of {ok, [Drive, "DRIVE_FIXED", BAvail, BTot, _TotFree], _RestStr} -> [{Drive, BTot div 1024, BAvail div 1024} | check_disks_win32(T)]; @@ -426,7 +426,7 @@ parse_df_2(Input) -> BlocksNum = case string:tokens(BlocksInfo, "-") of [Num, _] -> erlang:list_to_integer(Num); - _-> + _ -> 1 end, [Filesystem, Total, _, Available, _, _] = string:tokens(DfInfo, " \t"), diff --git a/apps/arweave/src/ar_domain.erl b/apps/arweave/src/ar_domain.erl index ff4643c3e..7d22c4097 100644 --- a/apps/arweave/src/ar_domain.erl +++ b/apps/arweave/src/ar_domain.erl @@ -12,7 +12,7 @@ get_labeling(ApexDomain, CustomDomains, Hostname) -> {0, Size} -> apex; {N, Size} -> - Label = binary:part(Hostname, {0, N-1}), + Label = binary:part(Hostname, {0, N - 1}), {labeled, Label}; nomatch -> get_labeling_1(CustomDomains, Hostname) @@ -22,7 +22,7 @@ lookup_arweave_txt_record(Domain) -> case inet_res:lookup("_arweave." ++ binary_to_list(Domain), in, txt) of [] -> not_found; - [RecordChunks|_] -> + [RecordChunks | _] -> list_to_binary(lists:concat(RecordChunks)) end. diff --git a/apps/arweave/src/ar_events.erl b/apps/arweave/src/ar_events.erl index f4109cb9a..3cdd14681 100644 --- a/apps/arweave/src/ar_events.erl +++ b/apps/arweave/src/ar_events.erl @@ -104,7 +104,7 @@ init(Name) -> %% {stop, Reason, State} %% @end %%-------------------------------------------------------------------- -handle_call(subscribe , {From, _Tag}, State) -> +handle_call(subscribe, {From, _Tag}, State) -> case maps:get(From, State#state.subscribers, unknown) of unknown -> Ref = erlang:monitor(process, From), @@ -140,8 +140,8 @@ handle_call(Request, _From, State) -> %%-------------------------------------------------------------------- handle_cast({send, From, Value}, State) -> %% Send to the subscribers except self. - [Pid ! {event, State#state.name, Value} - || Pid <- maps:keys(State#state.subscribers), Pid /= From], + [Pid ! {event, State#state.name, Value} || + Pid <- maps:keys(State#state.subscribers), Pid /= From], {noreply, State}; handle_cast(Msg, State) -> ?LOG_ERROR([{event, unhandled_cast}, {message, Msg}]), diff --git a/apps/arweave/src/ar_fraction.erl b/apps/arweave/src/ar_fraction.erl index a7d830f52..f88996006 100644 --- a/apps/arweave/src/ar_fraction.erl +++ b/apps/arweave/src/ar_fraction.erl @@ -8,6 +8,7 @@ %%% Types. %%%=================================================================== +-export_type([fraction/0]). -type fraction() :: {integer(), integer()}. %%%=================================================================== @@ -15,7 +16,7 @@ %%%=================================================================== %% @doc Compute the given power of the given integer. --spec pow(X::integer(), P::integer()) -> integer(). +-spec pow(X :: integer(), P :: integer()) -> integer(). pow(_X, 0) -> 1; pow(X, 1) -> @@ -34,14 +35,14 @@ pow(X, N) -> %% @doc Compute the X's power of e by summing up the terms of the Taylor series where %% the last term is a multiple of X to the power of P. --spec natural_exponent(X::fraction(), P::integer()) -> fraction(). +-spec natural_exponent(X :: fraction(), P :: integer()) -> fraction(). natural_exponent({0, _Divisor}, _P) -> {1, 1}; natural_exponent(X, P) -> {natural_exponent_dividend(X, P, 0, 1), natural_exponent_divisor(X, P)}. %% @doc Return the smaller of D1 and D2. --spec minimum(D1::fraction(), D2::fraction()) -> fraction(). +-spec minimum(D1 :: fraction(), D2 :: fraction()) -> fraction(). minimum({Dividend1, Divisor1} = D1, {Dividend2, Divisor2} = D2) -> case Dividend1 * Divisor2 < Dividend2 * Divisor1 of true -> @@ -51,7 +52,7 @@ minimum({Dividend1, Divisor1} = D1, {Dividend2, Divisor2} = D2) -> end. %% @doc Return the bigger of D1 and D2. --spec maximum(D1::fraction(), D2::fraction()) -> fraction(). +-spec maximum(D1 :: fraction(), D2 :: fraction()) -> fraction(). maximum(D1, D2) -> case minimum(D1, D2) of D1 -> @@ -61,13 +62,13 @@ maximum(D1, D2) -> end. %% @doc Return the product of D1 and D2. --spec multiply(D1::fraction(), D2::fraction()) -> fraction(). +-spec multiply(D1 :: fraction(), D2 :: fraction()) -> fraction(). multiply({Dividend1, Divisor1}, {Dividend2, Divisor2}) -> {Dividend1 * Dividend2, Divisor1 * Divisor2}. %% @doc Reduce the fraction until both the divisor and dividend are smaller than %% or equal to Max. Return at most Max or at least 1 / Max. --spec reduce(D::fraction(), Max::integer()) -> fraction(). +-spec reduce(D :: fraction(), Max :: integer()) -> fraction(). reduce({0, Divisor}, _Max) -> {0, Divisor}; reduce({Dividend, Divisor}, Max) -> @@ -75,7 +76,7 @@ reduce({Dividend, Divisor}, Max) -> reduce2({Dividend div GCD, Divisor div GCD}, Max). %% @doc Return the sum of two fractions. --spec add(A::fraction(), B::integer()) -> fraction(). +-spec add(A :: fraction(), B :: integer()) -> fraction(). add({Dividend1, Divisor1}, {Dividend2, Divisor2}) -> {Dividend1 * Divisor2 + Dividend2 * Divisor1, Divisor1 * Divisor2}. diff --git a/apps/arweave/src/ar_graphql_handler.erl b/apps/arweave/src/ar_graphql_handler.erl index 854710ffb..32234fae1 100644 --- a/apps/arweave/src/ar_graphql_handler.erl +++ b/apps/arweave/src/ar_graphql_handler.erl @@ -67,8 +67,8 @@ gather_query_params(Req, Body, Params) -> {error, Reason} end. -document([#{ <<"query">> := Q }|_]) -> Q; -document([_|Next]) -> document(Next); +document([#{ <<"query">> := Q } | _]) -> Q; +document([_ | Next]) -> document(Next); document([]) -> undefined. variables([#{ <<"variables">> := Vars } | _]) -> diff --git a/apps/arweave/src/ar_graphql_query.erl b/apps/arweave/src/ar_graphql_query.erl index 965ad8d2d..8ce9ebbfb 100644 --- a/apps/arweave/src/ar_graphql_query.erl +++ b/apps/arweave/src/ar_graphql_query.erl @@ -41,7 +41,7 @@ do_execute(_, _, <<"transactions">>, Args) -> end ]), TXs = case Opts of - [_|_] -> ar_arql_db:select_txs_by(Opts); + [_ | _] -> ar_arql_db:select_txs_by(Opts); [] -> [] end, {ok, [{ok, TX} || TX <- TXs]}; diff --git a/apps/arweave/src/ar_http.erl b/apps/arweave/src/ar_http.erl index 29b1b7c50..d6f058919 100644 --- a/apps/arweave/src/ar_http.erl +++ b/apps/arweave/src/ar_http.erl @@ -88,7 +88,7 @@ req(Args, ReestablishedConnection) -> case ReestablishedConnection of true -> ok; - false -> + false -> %% NOTE: the erlang prometheus client looks at the metric name to determine units. %% If it sees _duration_ it assumes the observed value is in %% native units and it converts it to .To query native units, use: @@ -99,7 +99,7 @@ req(Args, ReestablishedConnection) -> ar_http_iface_server:label_http_path(list_to_binary(Path)), ar_metrics:get_status_class(Response) ], EndTime - StartTime) - end, + end, Response. %%% ================================================================== %%% gen_server callbacks. diff --git a/apps/arweave/src/ar_http_iface_client.erl b/apps/arweave/src/ar_http_iface_client.erl index 0b74795e1..3a1ac7249 100644 --- a/apps/arweave/src/ar_http_iface_client.erl +++ b/apps/arweave/src/ar_http_iface_client.erl @@ -1013,19 +1013,17 @@ get_info(Peer) -> %% @doc Return a list of parsed peer IPs for a remote server. get_peers(Peer) -> try - begin - {ok, {{<<"200">>, _}, _, Body, _, _}} = - ar_http:req(#{ - method => get, - peer => Peer, - path => "/peers", - headers => p2p_headers(), - connect_timeout => 500, - timeout => 2 * 1000 - }), - PeerArray = ar_serialize:dejsonify(Body), - lists:map(fun ar_util:parse_peer/1, PeerArray) - end + {ok, {{<<"200">>, _}, _, Body, _, _}} = + ar_http:req(#{ + method => get, + peer => Peer, + path => "/peers", + headers => p2p_headers(), + connect_timeout => 500, + timeout => 2 * 1000 + }), + PeerArray = ar_serialize:dejsonify(Body), + lists:map(fun ar_util:parse_peer/1, PeerArray) catch _:_ -> unavailable end. diff --git a/apps/arweave/src/ar_http_iface_middleware.erl b/apps/arweave/src/ar_http_iface_middleware.erl index 3b74f4aa2..399555948 100644 --- a/apps/arweave/src/ar_http_iface_middleware.erl +++ b/apps/arweave/src/ar_http_iface_middleware.erl @@ -60,18 +60,20 @@ loop(TimeoutRef) -> CowboyStatus = handle_custom_codes(Status), RepliedReq = cowboy_req:reply(CowboyStatus, Headers, Body, HandledReq), {stop, RepliedReq}; - {read_complete_body, From, Req, SizeLimit} -> - case catch ar_http_req:body(Req, SizeLimit) of - Term -> - From ! {read_complete_body, Term} - end, - loop(TimeoutRef); - {read_body_chunk, From, Req, Size, Timeout} -> - case catch ar_http_req:read_body_chunk(Req, Size, Timeout) of - Term -> - From ! {read_body_chunk, Term} - end, - loop(TimeoutRef); + {read_complete_body, From, Req, SizeLimit} -> + try ar_http_req:body(Req, SizeLimit) of + Term -> From ! {read_complete_body, Term} + catch + Exception:Reason -> From ! {read_complete_body, {Exception, Reason}} + end, + loop(TimeoutRef); + {read_body_chunk, From, Req, Size, Timeout} -> + try ar_http_req:read_body_chunk(Req, Size, Timeout) of + Term -> From ! {read_body_chunk, Term} + catch + Exception:Reason -> From ! {read_body_chunk, {Exception, Reason}} + end, + loop(TimeoutRef); {timeout, HandlerPid, InitialReq} -> unlink(HandlerPid), exit(HandlerPid, handler_timeout), @@ -598,11 +600,10 @@ handle(<<"GET">>, [<<"peers">>], Req, _Pid) -> {200, #{}, ar_serialize:jsonify( [ - list_to_binary(ar_util:format_peer(P)) - || - P <- ar_peers:get_peers(lifetime), - P /= ar_http_util:arweave_peer(Req), - ar_peers:is_public_peer(P) + list_to_binary(ar_util:format_peer(P)) || + P <- ar_peers:get_peers(lifetime), + P /= ar_http_util:arweave_peer(Req), + ar_peers:is_public_peer(P) ] ), Req}; @@ -835,10 +836,10 @@ handle(<<"GET">>, [<<"block_time_history">>, EncodedBH], Req, _Pid) -> %% Return the current JSON-encoded hash list held by the node. %% GET request to endpoint /block_index. -handle(<<"GET">>, [<<"hash_list">>], Req, _Pid) -> - handle(<<"GET">>, [<<"block_index">>], Req, _Pid); +handle(<<"GET">>, [<<"hash_list">>], Req, Pid) -> + handle(<<"GET">>, [<<"block_index">>], Req, Pid); -handle(<<"GET">>, [<<"block_index">>], Req, _Pid) -> +handle(<<"GET">>, [<<"block_index">>], Req, _) -> ok = ar_semaphore:acquire(get_block_index, infinity), case ar_node:is_joined() of false -> @@ -877,17 +878,17 @@ handle(<<"GET">>, [<<"block_index2">>], Req, _Pid) -> end end; -handle(<<"GET">>, [<<"hash_list">>, From, To], Req, _Pid) -> - handle(<<"GET">>, [<<"block_index">>, From, To], Req, _Pid); +handle(<<"GET">>, [<<"hash_list">>, From, To], Req, Pid) -> + handle(<<"GET">>, [<<"block_index">>, From, To], Req, Pid); -handle(<<"GET">>, [<<"hash_list2">>, From, To], Req, _Pid) -> - handle(<<"GET">>, [<<"block_index2">>, From, To], Req, _Pid); +handle(<<"GET">>, [<<"hash_list2">>, From, To], Req, Pid) -> + handle(<<"GET">>, [<<"block_index2">>, From, To], Req, Pid); -handle(<<"GET">>, [<<"block_index2">>, From, To], Req, _Pid) -> +handle(<<"GET">>, [<<"block_index2">>, From, To], Req, Pid) -> erlang:put(encoding, binary), - handle(<<"GET">>, [<<"block_index">>, From, To], Req, _Pid); + handle(<<"GET">>, [<<"block_index">>, From, To], Req, Pid); -handle(<<"GET">>, [<<"block_index">>, From, To], Req, _Pid) -> +handle(<<"GET">>, [<<"block_index">>, From, To], Req, _) -> ok = ar_semaphore:acquire(get_block_index, infinity), case ar_node:is_joined() of false -> @@ -1553,15 +1554,17 @@ handle_get_tx_status(EncodedTXID, Req) -> %% First confirmation is when the TX is %% in the latest block. NumberOfConfirmations = CurrentHeight - Height + 1, - Status = PseudoTags - ++ [{<<"number_of_confirmations">>, - NumberOfConfirmations}], + Status = PseudoTags ++ [ + {<<"number_of_confirmations">>, NumberOfConfirmations} + ], {200, #{}, ar_serialize:jsonify({Status}), Req}; _ -> {404, #{}, <<"Not Found.">>, Req} end; not_found -> {404, #{}, <<"Not Found.">>, Req}; + {error, not_found} -> + {404, #{}, <<"Not Found.">>, Req}; {error, timeout} -> {503, #{}, <<"ArQL unavailable.">>, Req} end @@ -1842,8 +1845,7 @@ handle_get_block(H, Req, Pid, Encoding) -> {400, #{}, <<>>, Req2}; Indices -> Map = collect_missing_transactions(B#block.txs, Indices), - TXs2 = [maps:get(TX#tx.id, Map, TX#tx.id) - || TX <- B#block.txs], + TXs2 = [maps:get(TX#tx.id, Map, TX#tx.id) || TX <- B#block.txs], handle_get_block3(B#block{ txs = TXs2 }, Req2, binary) end; {error, body_size_too_large} -> @@ -2577,12 +2579,12 @@ get_total_supply(RootHash, Cursor, Sum, Denomination) -> end. get_balance_sum([{_, {Balance, _LastTX}} | Range], BlockDenomination) -> - ar_pricing:redenominate(Balance, 1, BlockDenomination) - + get_balance_sum(Range, BlockDenomination); + ar_pricing:redenominate(Balance, 1, BlockDenomination) + + get_balance_sum(Range, BlockDenomination); get_balance_sum([{_, {Balance, _LastTX, Denomination, _MiningPermission}} | Range], BlockDenomination) -> - ar_pricing:redenominate(Balance, Denomination, BlockDenomination) - + get_balance_sum(Range, BlockDenomination); + ar_pricing:redenominate(Balance, Denomination, BlockDenomination) + + get_balance_sum(Range, BlockDenomination); get_balance_sum([], _BlockDenomination) -> 0. diff --git a/apps/arweave/src/ar_inflation.erl b/apps/arweave/src/ar_inflation.erl index cd34b0b35..36dbf0a8f 100644 --- a/apps/arweave/src/ar_inflation.erl +++ b/apps/arweave/src/ar_inflation.erl @@ -1,4 +1,4 @@ -%%% @doc Module responsible for managing and testing the inflation schedule of +%%% @doc Module responsible for managing and testing the inflation schedule of %%% the Arweave main network. -module(ar_inflation). @@ -50,12 +50,9 @@ calculate_post_15_y1_extra() -> pre_15_calculate(Height) when Height =< ?REWARD_DELAY -> 1; pre_15_calculate(Height) -> - ?WINSTON_PER_AR - * 0.2 - * ?GENESIS_TOKENS - * math:pow(2, -(Height - ?REWARD_DELAY) / ?PRE_15_BLOCK_PER_YEAR) - * math:log(2) - / ?PRE_15_BLOCK_PER_YEAR. + ?WINSTON_PER_AR * 0.2 * ?GENESIS_TOKENS * + math:pow(2, -(Height - ?REWARD_DELAY) / ?PRE_15_BLOCK_PER_YEAR) * + math:log(2) / ?PRE_15_BLOCK_PER_YEAR. calculate_base(Height) -> {Ln2Dividend, Ln2Divisor} = ?LN2, @@ -63,27 +60,20 @@ calculate_base(Height) -> Divisor = ?BLOCKS_PER_YEAR * Ln2Divisor, Precision = ?INFLATION_NATURAL_EXPONENT_DECIMAL_FRACTION_PRECISION, {EXDividend, EXDivisor} = ar_fraction:natural_exponent({Dividend, Divisor}, Precision), - ?GENESIS_TOKENS - * ?WINSTON_PER_AR - * EXDivisor - * 2 - * Ln2Dividend - div ( - 10 - * ?BLOCKS_PER_YEAR - * Ln2Divisor - * EXDividend - ). + ?GENESIS_TOKENS * ?WINSTON_PER_AR * EXDivisor * 2 * Ln2Dividend div ( + 10 * + ?BLOCKS_PER_YEAR * + Ln2Divisor * + EXDividend + ). calculate_base_pre_fork_2_5(Height) -> - ?WINSTON_PER_AR - * ( - 0.2 - * ?GENESIS_TOKENS - * math:pow(2, -(Height) / ?BLOCK_PER_YEAR) - * math:log(2) - ) - / ?BLOCK_PER_YEAR. + ?WINSTON_PER_AR * ( + 0.2 * + ?GENESIS_TOKENS * + math:pow(2, -(Height) / ?BLOCK_PER_YEAR) * + math:log(2) + ) / ?BLOCK_PER_YEAR. %%%=================================================================== %%% Tests. @@ -166,8 +156,8 @@ is_in_tolerance(X, Y) -> is_in_tolerance(X, Y, ?DEFAULT_TOLERANCE_PERCENT). is_in_tolerance(X, Y, TolerancePercent) -> Tolerance = TolerancePercent / 100, - ( X >= ( Y * (1 - Tolerance ) ) ) and - ( X =< ( Y + (Y * Tolerance ) ) ). + (X >= (Y * (1 - Tolerance))) and + (X =< (Y + (Y * Tolerance))). %% @doc Count the total inflation rewards for a given year. year_sum_rewards(YearNum) -> diff --git a/apps/arweave/src/ar_join.erl b/apps/arweave/src/ar_join.erl index a4e871f6d..8ebf65155 100644 --- a/apps/arweave/src/ar_join.erl +++ b/apps/arweave/src/ar_join.erl @@ -39,8 +39,8 @@ set_block_time_history([], _BlockTimeHistory) -> set_block_time_history(Blocks, []) -> Blocks; set_block_time_history([B | Blocks], BlockTimeHistory) -> - [B#block{ block_time_history = BlockTimeHistory } - | set_block_time_history(Blocks, tl(BlockTimeHistory))]. + [B#block{ block_time_history = BlockTimeHistory } | + set_block_time_history(Blocks, tl(BlockTimeHistory))]. %%%=================================================================== %%% Private functions. @@ -234,14 +234,12 @@ get_block(Peers, BShadow, [TXID | TXIDs], TXs, Retries) -> do_join(Peers, B, BI) -> ar:console("Downloading the block trail.~n", []), {ok, Config} = application:get_env(arweave, config), - WorkerQ = queue:from_list([spawn(fun() -> worker() end) - || _ <- lists:seq(1, Config#config.join_workers)]), + WorkerQ = queue:from_list([spawn(fun() -> worker() end) || _ <- lists:seq(1, Config#config.join_workers)]), PeerQ = queue:from_list(Peers), Trail = lists:sublist(tl(BI), 2 * ?MAX_TX_ANCHOR_DEPTH), SizeTaggedTXs = ar_block:generate_size_tagged_list_from_txs(B#block.txs, B#block.height), Retries = lists:foldl(fun(Peer, Acc) -> maps:put(Peer, 10, Acc) end, #{}, Peers), - Blocks = [B#block{ size_tagged_txs = SizeTaggedTXs } - | get_block_trail(WorkerQ, PeerQ, Trail, Retries)], + Blocks = [B#block{ size_tagged_txs = SizeTaggedTXs } | get_block_trail(WorkerQ, PeerQ, Trail, Retries)], ar:console("Downloaded the block trail successfully.~n", []), Blocks2 = may_be_set_reward_history(Blocks, Peers), Blocks3 = may_be_set_block_time_history(Blocks2, Peers), diff --git a/apps/arweave/src/ar_mempool.erl b/apps/arweave/src/ar_mempool.erl index 0fa6d7199..45a6cf854 100644 --- a/apps/arweave/src/ar_mempool.erl +++ b/apps/arweave/src/ar_mempool.erl @@ -3,7 +3,7 @@ -include_lib("arweave/include/ar.hrl"). -export([reset/0, load_from_disk/0, add_tx/2, drop_txs/1, drop_txs/3, - get_map/0, get_all_txids/0, take_chunk/2, get_tx/1, has_tx/1, + get_map/0, get_all_txids/0, take_chunk/2, get_tx/1, has_tx/1, get_priority_set/0, get_last_tx_map/0, get_origin_tx_map/0, get_propagation_queue/0, del_from_propagation_queue/2]). @@ -21,7 +21,7 @@ load_from_disk() -> {ok, {SerializedTXs, _MempoolSize}} -> TXs = maps:map(fun(_, {TX, St}) -> {deserialize_tx(TX), St} end, SerializedTXs), - {MempoolSize2, PrioritySet2, PropagationQueue2, LastTXMap2, OriginTXMap2} = + {MempoolSize2, PrioritySet2, PropagationQueue2, LastTXMap2, OriginTXMap2} = maps:fold( fun(TXID, {TX, Status}, {MempoolSize, PrioritySet, PropagationQueue, LastTXMap, OriginTXMap}) -> MetaData = {_, _, Timestamp} = init_tx_metadata(TX, Status), @@ -79,11 +79,11 @@ add_tx(#tx{ id = TXID } = TX, Status) -> add_to_last_tx_map(get_last_tx_map(), TX), add_to_origin_tx_map(get_origin_tx_map(), TX) }; - {TX, PrevStatus, Timestamp} -> + {TX, PrevStatus, Timestamp} -> { {TX, Status, Timestamp}, get_mempool_size(), - add_to_priority_set(get_priority_set(),TX, PrevStatus, Status, Timestamp), + add_to_priority_set(get_priority_set(), TX, PrevStatus, Status, Timestamp), get_propagation_queue(), get_last_tx_map(), get_origin_tx_map() @@ -98,7 +98,7 @@ add_tx(#tx{ id = TXID } = TX, Status) -> {last_tx_map, LastTXMap}, {origin_tx_map, OriginTXMap} ]), - + case ar_node:is_joined() of true -> % 1. Drop unconfirmable transactions: @@ -353,7 +353,7 @@ del_from_origin_tx_map(OriginTXMap, TX) -> unconfirmed_tx(TX = #tx{}) -> {ar_tx:utility(TX), TX#tx.id}. - + increase_mempool_size( _MempoolSize = {MempoolHeaderSize, MempoolDataSize}, TX = #tx{}) -> @@ -390,7 +390,7 @@ find_low_priority_txs(Iterator, {MempoolHeaderSize, MempoolDataSize}) when MempoolHeaderSize > ?MEMPOOL_HEADER_SIZE_LIMIT; MempoolDataSize > ?MEMPOOL_DATA_SIZE_LIMIT -> - {{_Utility, TXID, _Status} = _Element, Iterator2} = gb_sets:next(Iterator), + {{_Utility_, TXID, _Status} = _, Iterator2} = gb_sets:next(Iterator), TX = get_tx(TXID), case should_drop_low_priority_tx(TX, {MempoolHeaderSize, MempoolDataSize}) of true -> @@ -460,7 +460,7 @@ filter_clashing_txs(ClashingTXIDs) -> %% confirmed) %% %% Note: when doing the overspend calculation any unconfirmed deposit -%% transactions are ignored. This is to prevent a second potentially +%% transactions are ignored. This is to prevent a second potentially %% malicious scenario like the following: %% %% Peer A: receives deposit TX and several spend TXs, diff --git a/apps/arweave/src/ar_merkle.erl b/apps/arweave/src/ar_merkle.erl index b68a2dcac..2dd070081 100644 --- a/apps/arweave/src/ar_merkle.erl +++ b/apps/arweave/src/ar_merkle.erl @@ -15,12 +15,12 @@ %%% as verification of those proofs. -record(node, { id, - type = branch, % root | branch | leaf - data, % The value (for leaves). - note, % The offset, a number less than 2^256. - left, % The (optional) ID of a node to the left. - right, % The (optional) ID of a node to the right. - max, % The maximum observed note at this point. + type = branch, % root | branch | leaf + data, % The value (for leaves). + note, % The offset, a number less than 2^256. + left, % The (optional) ID of a node to the left. + right, % The (optional) ID of a node to the right. + max, % The maximum observed note at this point. is_rebased = false }). @@ -86,7 +86,7 @@ validate_path(ID, Dest, LeftBound, RightBound, Path, CheckBorders, CheckSplit, A DataSize = RightBound, %% Will be set to true only if we only take right branches from the root to the leaf. In this %% case we know the leaf chunk is the final chunk in the range represented by the merkle tree. - IsRightMostInItsSubTree = undefined, + IsRightMostInItsSubTree = undefined, %% Set to non-zero when AllowRebase is true and we begin processing a subtree. LeftBoundShift = 0, validate_path(ID, Dest, LeftBound, RightBound, Path, @@ -281,7 +281,7 @@ generate_leaf({Data, Note}) -> }. %% Note: This implementation leaves some duplicates in the tree structure. -%% The produced trees could be a little smaller if these duplicates were +%% The produced trees could be a little smaller if these duplicates were %% not present, but removing them with ar_util:unique takes far too long. generate_all_rows([RootN], Tree) -> RootID = RootN#node.id, @@ -335,8 +335,8 @@ generate_path_parts(ID, Dest, Tree, PrevNote) -> {true, left} -> {<< 0:(?HASH_SIZE * 8) >>, Dest - PrevNote} end, - [RebaseMark, N#node.left, N#node.right, note_to_binary(Note) - | generate_path_parts(NextID, Dest2, Tree, Note)] + [RebaseMark, N#node.left, N#node.right, note_to_binary(Note) | + generate_path_parts(NextID, Dest2, Tree, Note)] end. get(ID, Map) -> @@ -421,7 +421,7 @@ test_tree_with_rebase_shallow() -> {Root0, Tree0} = ar_merkle:generate_tree(Tags0), assert_tree([ {branch, undefined, ?DATA_CHUNK_SIZE, false}, - {leaf, Leaf2, 2*?DATA_CHUNK_SIZE, false}, + {leaf, Leaf2, 2 * ?DATA_CHUNK_SIZE, false}, {leaf, Leaf1, ?DATA_CHUNK_SIZE, false} ], Tree0), @@ -438,7 +438,7 @@ test_tree_with_rebase_shallow() -> Path1_1 = ar_merkle:generate_path(Root1, 0, Tree1), ?assertNotEqual(Path0_1, Path1_1), {Leaf1, 0, ?DATA_CHUNK_SIZE} = ar_merkle:validate_path(Root0, 0, 2 * ?DATA_CHUNK_SIZE, - Path0_1, offset_rebase_support_ruleset), + Path0_1, offset_rebase_support_ruleset), {Leaf1, 0, ?DATA_CHUNK_SIZE} = ar_merkle:validate_path(Root1, 0, 2 * ?DATA_CHUNK_SIZE, Path1_1, offset_rebase_support_ruleset), ?assertEqual(false, ar_merkle:validate_path(Root1, 0, 2 * ?DATA_CHUNK_SIZE, @@ -454,7 +454,7 @@ test_tree_with_rebase_shallow() -> {Leaf2, ?DATA_CHUNK_SIZE, 2 * ?DATA_CHUNK_SIZE} = ar_merkle:validate_path( Root1, ?DATA_CHUNK_SIZE, 2 * ?DATA_CHUNK_SIZE, Path1_2, offset_rebase_support_ruleset), {Leaf2, ?DATA_CHUNK_SIZE, 2 * ?DATA_CHUNK_SIZE} = ar_merkle:validate_path( - Root1, 2 * ?DATA_CHUNK_SIZE - 1, 2 * ?DATA_CHUNK_SIZE, Path1_2, + Root1, 2 * ?DATA_CHUNK_SIZE - 1, 2 * ?DATA_CHUNK_SIZE, Path1_2, offset_rebase_support_ruleset), ?assertEqual(false, ar_merkle:validate_path( Root1, ?DATA_CHUNK_SIZE, 2 * ?DATA_CHUNK_SIZE, Path0_2, offset_rebase_support_ruleset)), @@ -488,24 +488,24 @@ test_tree_with_rebase_shallow() -> 2 * ?DATA_CHUNK_SIZE, Path2_1, offset_rebase_support_ruleset), Path2_2 = ar_merkle:generate_path(Root2, ?DATA_CHUNK_SIZE, Tree2), - {Leaf2, ?DATA_CHUNK_SIZE, 2*?DATA_CHUNK_SIZE} = ar_merkle:validate_path(Root2, + {Leaf2, ?DATA_CHUNK_SIZE, 2 * ?DATA_CHUNK_SIZE} = ar_merkle:validate_path(Root2, ?DATA_CHUNK_SIZE, 2 * ?DATA_CHUNK_SIZE, Path2_2, offset_rebase_support_ruleset), - - {Leaf2, ?DATA_CHUNK_SIZE, 2*?DATA_CHUNK_SIZE} = ar_merkle:validate_path(Root2, - 2*?DATA_CHUNK_SIZE - 1, 2*?DATA_CHUNK_SIZE, Path2_2, offset_rebase_support_ruleset), + + {Leaf2, ?DATA_CHUNK_SIZE, 2 * ?DATA_CHUNK_SIZE} = ar_merkle:validate_path(Root2, + 2 * ?DATA_CHUNK_SIZE - 1, 2 * ?DATA_CHUNK_SIZE, Path2_2, offset_rebase_support_ruleset), ?assertEqual(false, ar_merkle:validate_path(Root2, ?DATA_CHUNK_SIZE, - 2*?DATA_CHUNK_SIZE, Path2_1, offset_rebase_support_ruleset)), + 2 * ?DATA_CHUNK_SIZE, Path2_1, offset_rebase_support_ruleset)), ?assertEqual(false, ar_merkle:validate_path(Root2, 0, - 2*?DATA_CHUNK_SIZE, Path2_2, offset_rebase_support_ruleset)). + 2 * ?DATA_CHUNK_SIZE, Path2_2, offset_rebase_support_ruleset)). test_tree_with_rebase_nested() -> %% _________________Root3________________ %% / \ %% _____SubTree1______________ Leaf6 - %% / \ + %% / \ %% SubTree2 ________SubTree3_________ - %% / \ / \ + %% / \ / \ %% Leaf1 Leaf2 SubTree4 (with offset reset) Leaf5 %% / \ %% Leaf3 Leaf4 (with offset reset) @@ -517,27 +517,27 @@ test_tree_with_rebase_nested() -> Leaf6 = crypto:strong_rand_bytes(?HASH_SIZE), Tags3 = [ {Leaf1, ?DATA_CHUNK_SIZE}, - {Leaf2, 2*?DATA_CHUNK_SIZE}, + {Leaf2, 2 * ?DATA_CHUNK_SIZE}, [ {Leaf3, ?DATA_CHUNK_SIZE}, [ {Leaf4, ?DATA_CHUNK_SIZE} ] ], - {Leaf5, 5*?DATA_CHUNK_SIZE}, - {Leaf6, 6*?DATA_CHUNK_SIZE} + {Leaf5, 5 * ?DATA_CHUNK_SIZE}, + {Leaf6, 6 * ?DATA_CHUNK_SIZE} ], {Root3, Tree3} = ar_merkle:generate_tree(Tags3), assert_tree([ - {branch, undefined, 5*?DATA_CHUNK_SIZE, false}, %% Root - {branch, undefined, 2*?DATA_CHUNK_SIZE, false}, %% SubTree1 - {leaf, Leaf6, 6*?DATA_CHUNK_SIZE, false}, + {branch, undefined, 5 * ?DATA_CHUNK_SIZE, false}, %% Root + {branch, undefined, 2 * ?DATA_CHUNK_SIZE, false}, %% SubTree1 + {leaf, Leaf6, 6 * ?DATA_CHUNK_SIZE, false}, {branch, undefined, ?DATA_CHUNK_SIZE, false}, %% SubTree2 - {branch, undefined, 4*?DATA_CHUNK_SIZE, false}, %% SubTree3 - {leaf, Leaf6, 6*?DATA_CHUNK_SIZE, false}, %% duplicates are safe and expected - {leaf, Leaf6, 6*?DATA_CHUNK_SIZE, false}, %% duplicates are safe and expected - {leaf, Leaf5, 5*?DATA_CHUNK_SIZE, false}, - {leaf, Leaf2, 2*?DATA_CHUNK_SIZE, false}, + {branch, undefined, 4 * ?DATA_CHUNK_SIZE, false}, %% SubTree3 + {leaf, Leaf6, 6 * ?DATA_CHUNK_SIZE, false}, %% duplicates are safe and expected + {leaf, Leaf6, 6 * ?DATA_CHUNK_SIZE, false}, %% duplicates are safe and expected + {leaf, Leaf5, 5 * ?DATA_CHUNK_SIZE, false}, + {leaf, Leaf2, 2 * ?DATA_CHUNK_SIZE, false}, {leaf, Leaf1, ?DATA_CHUNK_SIZE, false}, {branch, undefined, ?DATA_CHUNK_SIZE, true}, %% SubTree4 {leaf, Leaf3, ?DATA_CHUNK_SIZE, false}, @@ -547,39 +547,39 @@ test_tree_with_rebase_nested() -> BadRoot = crypto:strong_rand_bytes(32), Path3_1 = ar_merkle:generate_path(Root3, 0, Tree3), {Leaf1, 0, ?DATA_CHUNK_SIZE} = ar_merkle:validate_path( - Root3, 0, 6*?DATA_CHUNK_SIZE, Path3_1, offset_rebase_support_ruleset), + Root3, 0, 6 * ?DATA_CHUNK_SIZE, Path3_1, offset_rebase_support_ruleset), ?assertEqual(false, ar_merkle:validate_path( - BadRoot, 0, 6*?DATA_CHUNK_SIZE, Path3_1, offset_rebase_support_ruleset)), - + BadRoot, 0, 6 * ?DATA_CHUNK_SIZE, Path3_1, offset_rebase_support_ruleset)), + Path3_2 = ar_merkle:generate_path(Root3, ?DATA_CHUNK_SIZE, Tree3), - {Leaf2, ?DATA_CHUNK_SIZE, 2*?DATA_CHUNK_SIZE} = ar_merkle:validate_path( - Root3, ?DATA_CHUNK_SIZE, 6*?DATA_CHUNK_SIZE, Path3_2, offset_rebase_support_ruleset), + {Leaf2, ?DATA_CHUNK_SIZE, 2 * ?DATA_CHUNK_SIZE} = ar_merkle:validate_path( + Root3, ?DATA_CHUNK_SIZE, 6 * ?DATA_CHUNK_SIZE, Path3_2, offset_rebase_support_ruleset), ?assertEqual(false, ar_merkle:validate_path( - BadRoot, ?DATA_CHUNK_SIZE, 6*?DATA_CHUNK_SIZE, Path3_2, offset_rebase_support_ruleset)), - + BadRoot, ?DATA_CHUNK_SIZE, 6 * ?DATA_CHUNK_SIZE, Path3_2, offset_rebase_support_ruleset)), + Path3_3 = ar_merkle:generate_path(Root3, ?DATA_CHUNK_SIZE * 2, Tree3), - {Leaf3, 2*?DATA_CHUNK_SIZE, 3*?DATA_CHUNK_SIZE} = ar_merkle:validate_path( - Root3, 2*?DATA_CHUNK_SIZE, 6*?DATA_CHUNK_SIZE, Path3_3, offset_rebase_support_ruleset), + {Leaf3, 2 * ?DATA_CHUNK_SIZE, 3 * ?DATA_CHUNK_SIZE} = ar_merkle:validate_path( + Root3, 2 * ?DATA_CHUNK_SIZE, 6 * ?DATA_CHUNK_SIZE, Path3_3, offset_rebase_support_ruleset), ?assertEqual(false, ar_merkle:validate_path( - BadRoot, 2*?DATA_CHUNK_SIZE, 6*?DATA_CHUNK_SIZE, Path3_3, offset_rebase_support_ruleset)), - + BadRoot, 2 * ?DATA_CHUNK_SIZE, 6 * ?DATA_CHUNK_SIZE, Path3_3, offset_rebase_support_ruleset)), + Path3_4 = ar_merkle:generate_path(Root3, ?DATA_CHUNK_SIZE * 3, Tree3), - {Leaf4, 3*?DATA_CHUNK_SIZE, 4*?DATA_CHUNK_SIZE} = ar_merkle:validate_path( - Root3, 3*?DATA_CHUNK_SIZE, 6*?DATA_CHUNK_SIZE, Path3_4, offset_rebase_support_ruleset), + {Leaf4, 3 * ?DATA_CHUNK_SIZE, 4 * ?DATA_CHUNK_SIZE} = ar_merkle:validate_path( + Root3, 3 * ?DATA_CHUNK_SIZE, 6 * ?DATA_CHUNK_SIZE, Path3_4, offset_rebase_support_ruleset), ?assertEqual(false, ar_merkle:validate_path( - BadRoot, 3*?DATA_CHUNK_SIZE, 6*?DATA_CHUNK_SIZE, Path3_4, offset_rebase_support_ruleset)), - + BadRoot, 3 * ?DATA_CHUNK_SIZE, 6 * ?DATA_CHUNK_SIZE, Path3_4, offset_rebase_support_ruleset)), + Path3_5 = ar_merkle:generate_path(Root3, ?DATA_CHUNK_SIZE * 4, Tree3), - {Leaf5, 4*?DATA_CHUNK_SIZE, 5*?DATA_CHUNK_SIZE} = ar_merkle:validate_path( - Root3, 4*?DATA_CHUNK_SIZE, 6*?DATA_CHUNK_SIZE, Path3_5, offset_rebase_support_ruleset), + {Leaf5, 4 * ?DATA_CHUNK_SIZE, 5 * ?DATA_CHUNK_SIZE} = ar_merkle:validate_path( + Root3, 4 * ?DATA_CHUNK_SIZE, 6 * ?DATA_CHUNK_SIZE, Path3_5, offset_rebase_support_ruleset), ?assertEqual(false, ar_merkle:validate_path( - BadRoot, 4*?DATA_CHUNK_SIZE, 6*?DATA_CHUNK_SIZE, Path3_5, offset_rebase_support_ruleset)), + BadRoot, 4 * ?DATA_CHUNK_SIZE, 6 * ?DATA_CHUNK_SIZE, Path3_5, offset_rebase_support_ruleset)), Path3_6 = ar_merkle:generate_path(Root3, ?DATA_CHUNK_SIZE * 5, Tree3), - {Leaf6, 5*?DATA_CHUNK_SIZE, 6*?DATA_CHUNK_SIZE} = ar_merkle:validate_path( - Root3, 5*?DATA_CHUNK_SIZE, 6*?DATA_CHUNK_SIZE, Path3_6, offset_rebase_support_ruleset), + {Leaf6, 5 * ?DATA_CHUNK_SIZE, 6 * ?DATA_CHUNK_SIZE} = ar_merkle:validate_path( + Root3, 5 * ?DATA_CHUNK_SIZE, 6 * ?DATA_CHUNK_SIZE, Path3_6, offset_rebase_support_ruleset), ?assertEqual(false, ar_merkle:validate_path( - BadRoot, 5*?DATA_CHUNK_SIZE, 6*?DATA_CHUNK_SIZE, Path3_6, offset_rebase_support_ruleset)), + BadRoot, 5 * ?DATA_CHUNK_SIZE, 6 * ?DATA_CHUNK_SIZE, Path3_6, offset_rebase_support_ruleset)), %% ________Root4_________ %% / \ @@ -590,28 +590,28 @@ test_tree_with_rebase_nested() -> %% Leaf3 Leaf4 Leaf5 Leaf6 Tags4 = [ {Leaf1, ?DATA_CHUNK_SIZE}, - {Leaf2, 2*?DATA_CHUNK_SIZE}, + {Leaf2, 2 * ?DATA_CHUNK_SIZE}, [ {Leaf3, ?DATA_CHUNK_SIZE}, - {Leaf4, 2*?DATA_CHUNK_SIZE} + {Leaf4, 2 * ?DATA_CHUNK_SIZE} ], [ {Leaf5, ?DATA_CHUNK_SIZE}, - {Leaf6, 2*?DATA_CHUNK_SIZE} + {Leaf6, 2 * ?DATA_CHUNK_SIZE} ] ], {Root4, Tree4} = ar_merkle:generate_tree(Tags4), assert_tree([ - {branch, undefined, 2*?DATA_CHUNK_SIZE, false}, %% Root + {branch, undefined, 2 * ?DATA_CHUNK_SIZE, false}, %% Root {branch, undefined, ?DATA_CHUNK_SIZE, false}, %% SubTree1 - {branch, undefined, 4*?DATA_CHUNK_SIZE, false}, %% SubTree2 - {leaf, Leaf2, 2*?DATA_CHUNK_SIZE, false}, + {branch, undefined, 4 * ?DATA_CHUNK_SIZE, false}, %% SubTree2 + {leaf, Leaf2, 2 * ?DATA_CHUNK_SIZE, false}, {leaf, Leaf1, ?DATA_CHUNK_SIZE, false}, {branch, undefined, ?DATA_CHUNK_SIZE, true}, %% SubTree3 - {leaf, Leaf4, 2*?DATA_CHUNK_SIZE, false}, + {leaf, Leaf4, 2 * ?DATA_CHUNK_SIZE, false}, {leaf, Leaf3, ?DATA_CHUNK_SIZE, false}, {branch, undefined, ?DATA_CHUNK_SIZE, true}, %% SubTree4 - {leaf, Leaf6, 2*?DATA_CHUNK_SIZE, false}, + {leaf, Leaf6, 2 * ?DATA_CHUNK_SIZE, false}, {leaf, Leaf5, ?DATA_CHUNK_SIZE, false} ], Tree4), @@ -656,50 +656,50 @@ test_tree_with_rebase_nested() -> %% / \ %% SubTree3 Leaf4 %% / \ - %% Leaf2 Leaf3 + %% Leaf2 Leaf3 Tags5 = [ {Leaf1, ?DATA_CHUNK_SIZE}, [ {Leaf2, ?DATA_CHUNK_SIZE}, - {Leaf3, 2*?DATA_CHUNK_SIZE}, - {Leaf4, 3*?DATA_CHUNK_SIZE} + {Leaf3, 2 * ?DATA_CHUNK_SIZE}, + {Leaf4, 3 * ?DATA_CHUNK_SIZE} ], - {Leaf5, 5*?DATA_CHUNK_SIZE} + {Leaf5, 5 * ?DATA_CHUNK_SIZE} ], {Root5, Tree5} = ar_merkle:generate_tree(Tags5), assert_tree([ - {branch, undefined, 4*?DATA_CHUNK_SIZE, false}, %% Root + {branch, undefined, 4 * ?DATA_CHUNK_SIZE, false}, %% Root {branch, undefined, ?DATA_CHUNK_SIZE, false}, %% SubTree1 - {leaf, Leaf5, 5*?DATA_CHUNK_SIZE, false}, - {leaf, Leaf5, 5*?DATA_CHUNK_SIZE, false}, %% Duplicates are safe and expected + {leaf, Leaf5, 5 * ?DATA_CHUNK_SIZE, false}, + {leaf, Leaf5, 5 * ?DATA_CHUNK_SIZE, false}, %% Duplicates are safe and expected {leaf, Leaf1, ?DATA_CHUNK_SIZE, false}, - {branch, undefined, 2*?DATA_CHUNK_SIZE, true}, %% SubTree2 + {branch, undefined, 2 * ?DATA_CHUNK_SIZE, true}, %% SubTree2 {branch, undefined, ?DATA_CHUNK_SIZE, false}, %% SubTree3 - {leaf, Leaf4, 3*?DATA_CHUNK_SIZE, false}, - {leaf, Leaf4, 3*?DATA_CHUNK_SIZE, false}, - {leaf, Leaf3, 2*?DATA_CHUNK_SIZE, false}, + {leaf, Leaf4, 3 * ?DATA_CHUNK_SIZE, false}, + {leaf, Leaf4, 3 * ?DATA_CHUNK_SIZE, false}, + {leaf, Leaf3, 2 * ?DATA_CHUNK_SIZE, false}, {leaf, Leaf2, ?DATA_CHUNK_SIZE, false} ], Tree5), Path5_1 = ar_merkle:generate_path(Root5, 0, Tree5), - {Leaf1, 0, ?DATA_CHUNK_SIZE} = ar_merkle:validate_path(Root5, 0, 5*?DATA_CHUNK_SIZE, + {Leaf1, 0, ?DATA_CHUNK_SIZE} = ar_merkle:validate_path(Root5, 0, 5 * ?DATA_CHUNK_SIZE, Path5_1, offset_rebase_support_ruleset), Path5_2 = ar_merkle:generate_path(Root5, ?DATA_CHUNK_SIZE, Tree5), - {Leaf2, ?DATA_CHUNK_SIZE, 2*?DATA_CHUNK_SIZE} = ar_merkle:validate_path( - Root5, ?DATA_CHUNK_SIZE, 5*?DATA_CHUNK_SIZE, Path5_2, offset_rebase_support_ruleset), + {Leaf2, ?DATA_CHUNK_SIZE, 2 * ?DATA_CHUNK_SIZE} = ar_merkle:validate_path( + Root5, ?DATA_CHUNK_SIZE, 5 * ?DATA_CHUNK_SIZE, Path5_2, offset_rebase_support_ruleset), - Path5_3 = ar_merkle:generate_path(Root5, 2*?DATA_CHUNK_SIZE, Tree5), - {Leaf3, 2*?DATA_CHUNK_SIZE, 3*?DATA_CHUNK_SIZE} = ar_merkle:validate_path( - Root5, 2*?DATA_CHUNK_SIZE, 5*?DATA_CHUNK_SIZE, Path5_3, offset_rebase_support_ruleset), + Path5_3 = ar_merkle:generate_path(Root5, 2 * ?DATA_CHUNK_SIZE, Tree5), + {Leaf3, 2 * ?DATA_CHUNK_SIZE, 3 * ?DATA_CHUNK_SIZE} = ar_merkle:validate_path( + Root5, 2 * ?DATA_CHUNK_SIZE, 5 * ?DATA_CHUNK_SIZE, Path5_3, offset_rebase_support_ruleset), - Path5_4 = ar_merkle:generate_path(Root5, 3*?DATA_CHUNK_SIZE, Tree5), - {Leaf4, 3*?DATA_CHUNK_SIZE, 4*?DATA_CHUNK_SIZE} = ar_merkle:validate_path( - Root5, 3*?DATA_CHUNK_SIZE, 5*?DATA_CHUNK_SIZE, Path5_4, offset_rebase_support_ruleset), + Path5_4 = ar_merkle:generate_path(Root5, 3 * ?DATA_CHUNK_SIZE, Tree5), + {Leaf4, 3 * ?DATA_CHUNK_SIZE, 4 * ?DATA_CHUNK_SIZE} = ar_merkle:validate_path( + Root5, 3 * ?DATA_CHUNK_SIZE, 5 * ?DATA_CHUNK_SIZE, Path5_4, offset_rebase_support_ruleset), - Path5_5 = ar_merkle:generate_path(Root5, 4*?DATA_CHUNK_SIZE, Tree5), - {Leaf5, 4*?DATA_CHUNK_SIZE, 5*?DATA_CHUNK_SIZE} = ar_merkle:validate_path( - Root5, 4*?DATA_CHUNK_SIZE, 5*?DATA_CHUNK_SIZE, Path5_5, offset_rebase_support_ruleset), + Path5_5 = ar_merkle:generate_path(Root5, 4 * ?DATA_CHUNK_SIZE, Tree5), + {Leaf5, 4 * ?DATA_CHUNK_SIZE, 5 * ?DATA_CHUNK_SIZE} = ar_merkle:validate_path( + Root5, 4 * ?DATA_CHUNK_SIZE, 5 * ?DATA_CHUNK_SIZE, Path5_5, offset_rebase_support_ruleset), %% ______________Root__________________ %% / \ @@ -709,51 +709,51 @@ test_tree_with_rebase_nested() -> %% / \ %% Leaf2 SubTree3 (with offset reset) %% / \ - %% Leaf3 Leaf4 + %% Leaf3 Leaf4 Tags6 = [ {Leaf1, ?DATA_CHUNK_SIZE}, [ {Leaf2, ?DATA_CHUNK_SIZE}, [ {Leaf3, ?DATA_CHUNK_SIZE}, - {Leaf4, 2*?DATA_CHUNK_SIZE} + {Leaf4, 2 * ?DATA_CHUNK_SIZE} ] ], - {Leaf5, 5*?DATA_CHUNK_SIZE} + {Leaf5, 5 * ?DATA_CHUNK_SIZE} ], {Root6, Tree6} = ar_merkle:generate_tree(Tags6), assert_tree([ - {branch, undefined, 4*?DATA_CHUNK_SIZE, false}, %% Root + {branch, undefined, 4 * ?DATA_CHUNK_SIZE, false}, %% Root {branch, undefined, ?DATA_CHUNK_SIZE, false}, %% SubTree1 - {leaf, Leaf5, 5*?DATA_CHUNK_SIZE, false}, - {leaf, Leaf5, 5*?DATA_CHUNK_SIZE, false}, + {leaf, Leaf5, 5 * ?DATA_CHUNK_SIZE, false}, + {leaf, Leaf5, 5 * ?DATA_CHUNK_SIZE, false}, {leaf, Leaf1, ?DATA_CHUNK_SIZE, false}, {branch, undefined, ?DATA_CHUNK_SIZE, true}, %% SubTree2 {leaf, Leaf2, ?DATA_CHUNK_SIZE, false}, {branch, undefined, ?DATA_CHUNK_SIZE, true}, %% SubTree3 - {leaf, Leaf4, 2*?DATA_CHUNK_SIZE, false}, + {leaf, Leaf4, 2 * ?DATA_CHUNK_SIZE, false}, {leaf, Leaf3, ?DATA_CHUNK_SIZE, false} ], Tree6), Path6_1 = ar_merkle:generate_path(Root6, 0, Tree6), - {Leaf1, 0, ?DATA_CHUNK_SIZE} = ar_merkle:validate_path(Root6, 0, 5*?DATA_CHUNK_SIZE, + {Leaf1, 0, ?DATA_CHUNK_SIZE} = ar_merkle:validate_path(Root6, 0, 5 * ?DATA_CHUNK_SIZE, Path6_1, offset_rebase_support_ruleset), Path6_2 = ar_merkle:generate_path(Root6, ?DATA_CHUNK_SIZE, Tree6), - {Leaf2, ?DATA_CHUNK_SIZE, 2*?DATA_CHUNK_SIZE} = ar_merkle:validate_path( - Root6, ?DATA_CHUNK_SIZE, 5*?DATA_CHUNK_SIZE, Path6_2, offset_rebase_support_ruleset), + {Leaf2, ?DATA_CHUNK_SIZE, 2 * ?DATA_CHUNK_SIZE} = ar_merkle:validate_path( + Root6, ?DATA_CHUNK_SIZE, 5 * ?DATA_CHUNK_SIZE, Path6_2, offset_rebase_support_ruleset), - Path6_3 = ar_merkle:generate_path(Root6, 2*?DATA_CHUNK_SIZE, Tree6), - {Leaf3, 2*?DATA_CHUNK_SIZE, 3*?DATA_CHUNK_SIZE} = ar_merkle:validate_path( - Root6, 2*?DATA_CHUNK_SIZE, 5*?DATA_CHUNK_SIZE, Path6_3, offset_rebase_support_ruleset), + Path6_3 = ar_merkle:generate_path(Root6, 2 * ?DATA_CHUNK_SIZE, Tree6), + {Leaf3, 2 * ?DATA_CHUNK_SIZE, 3 * ?DATA_CHUNK_SIZE} = ar_merkle:validate_path( + Root6, 2 * ?DATA_CHUNK_SIZE, 5 * ?DATA_CHUNK_SIZE, Path6_3, offset_rebase_support_ruleset), - Path6_4 = ar_merkle:generate_path(Root6, 3*?DATA_CHUNK_SIZE, Tree6), - {Leaf4, 3*?DATA_CHUNK_SIZE, 4*?DATA_CHUNK_SIZE} = ar_merkle:validate_path( - Root6, 3*?DATA_CHUNK_SIZE, 5*?DATA_CHUNK_SIZE, Path6_4, offset_rebase_support_ruleset), + Path6_4 = ar_merkle:generate_path(Root6, 3 * ?DATA_CHUNK_SIZE, Tree6), + {Leaf4, 3 * ?DATA_CHUNK_SIZE, 4 * ?DATA_CHUNK_SIZE} = ar_merkle:validate_path( + Root6, 3 * ?DATA_CHUNK_SIZE, 5 * ?DATA_CHUNK_SIZE, Path6_4, offset_rebase_support_ruleset), - Path6_5 = ar_merkle:generate_path(Root6, 4*?DATA_CHUNK_SIZE, Tree6), - {Leaf5, 4*?DATA_CHUNK_SIZE, 5*?DATA_CHUNK_SIZE} = ar_merkle:validate_path( - Root6, 4*?DATA_CHUNK_SIZE, 5*?DATA_CHUNK_SIZE, Path6_5, offset_rebase_support_ruleset). + Path6_5 = ar_merkle:generate_path(Root6, 4 * ?DATA_CHUNK_SIZE, Tree6), + {Leaf5, 4 * ?DATA_CHUNK_SIZE, 5 * ?DATA_CHUNK_SIZE} = ar_merkle:validate_path( + Root6, 4 * ?DATA_CHUNK_SIZE, 5 * ?DATA_CHUNK_SIZE, Path6_5, offset_rebase_support_ruleset). test_tree_with_rebase_bad_paths() -> %% ______________Root__________________ @@ -764,7 +764,7 @@ test_tree_with_rebase_bad_paths() -> %% / \ %% Leaf2 SubTree3 (with offset reset) %% / \ - %% Leaf3 Leaf4 + %% Leaf3 Leaf4 Leaf1 = crypto:strong_rand_bytes(?HASH_SIZE), Leaf2 = crypto:strong_rand_bytes(?HASH_SIZE), Leaf3 = crypto:strong_rand_bytes(?HASH_SIZE), @@ -776,35 +776,35 @@ test_tree_with_rebase_bad_paths() -> {Leaf2, ?DATA_CHUNK_SIZE}, [ {Leaf3, ?DATA_CHUNK_SIZE}, - {Leaf4, 2*?DATA_CHUNK_SIZE} + {Leaf4, 2 * ?DATA_CHUNK_SIZE} ] ], - {Leaf5, 5*?DATA_CHUNK_SIZE} + {Leaf5, 5 * ?DATA_CHUNK_SIZE} ], {Root, Tree} = ar_merkle:generate_tree(Tags), - GoodPath = ar_merkle:generate_path(Root, 3*?DATA_CHUNK_SIZE, Tree), - {Leaf4, 3*?DATA_CHUNK_SIZE, 4*?DATA_CHUNK_SIZE} = ar_merkle:validate_path( - Root, 3*?DATA_CHUNK_SIZE, 5*?DATA_CHUNK_SIZE, GoodPath, offset_rebase_support_ruleset), + GoodPath = ar_merkle:generate_path(Root, 3 * ?DATA_CHUNK_SIZE, Tree), + {Leaf4, 3 * ?DATA_CHUNK_SIZE, 4 * ?DATA_CHUNK_SIZE} = ar_merkle:validate_path( + Root, 3 * ?DATA_CHUNK_SIZE, 5 * ?DATA_CHUNK_SIZE, GoodPath, offset_rebase_support_ruleset), BadPath1 = change_path(GoodPath, 0), %% Change L ?assertEqual(false, ar_merkle:validate_path( - Root, 3*?DATA_CHUNK_SIZE, 5*?DATA_CHUNK_SIZE, BadPath1, offset_rebase_support_ruleset)), + Root, 3 * ?DATA_CHUNK_SIZE, 5 * ?DATA_CHUNK_SIZE, BadPath1, offset_rebase_support_ruleset)), - BadPath2 = change_path(GoodPath, 2*?HASH_SIZE + 1), %% Change note + BadPath2 = change_path(GoodPath, 2 * ?HASH_SIZE + 1), %% Change note ?assertEqual(false, ar_merkle:validate_path( - Root, 3*?DATA_CHUNK_SIZE, 5*?DATA_CHUNK_SIZE, BadPath2, offset_rebase_support_ruleset)), + Root, 3 * ?DATA_CHUNK_SIZE, 5 * ?DATA_CHUNK_SIZE, BadPath2, offset_rebase_support_ruleset)), - BadPath3 = change_path(GoodPath, 2*?HASH_SIZE + ?NOTE_SIZE + 1), %% Change offset rebase zeros + BadPath3 = change_path(GoodPath, 2 * ?HASH_SIZE + ?NOTE_SIZE + 1), %% Change offset rebase zeros ?assertEqual(false, ar_merkle:validate_path( - Root, 3*?DATA_CHUNK_SIZE, 5*?DATA_CHUNK_SIZE, BadPath3, offset_rebase_support_ruleset)), + Root, 3 * ?DATA_CHUNK_SIZE, 5 * ?DATA_CHUNK_SIZE, BadPath3, offset_rebase_support_ruleset)), BadPath4 = change_path(GoodPath, byte_size(GoodPath) - ?NOTE_SIZE - 1), %% Change leaf data hash ?assertEqual(false, ar_merkle:validate_path( - Root, 3*?DATA_CHUNK_SIZE, 5*?DATA_CHUNK_SIZE, BadPath4, offset_rebase_support_ruleset)), + Root, 3 * ?DATA_CHUNK_SIZE, 5 * ?DATA_CHUNK_SIZE, BadPath4, offset_rebase_support_ruleset)), BadPath5 = change_path(GoodPath, byte_size(GoodPath) - 1), %% Change leaf note ?assertEqual(false, ar_merkle:validate_path( - Root, 3*?DATA_CHUNK_SIZE, 5*?DATA_CHUNK_SIZE, BadPath5, offset_rebase_support_ruleset)). + Root, 3 * ?DATA_CHUNK_SIZE, 5 * ?DATA_CHUNK_SIZE, BadPath5, offset_rebase_support_ruleset)). test_tree_with_rebase_partial_chunk() -> Leaf1 = crypto:strong_rand_bytes(?HASH_SIZE), @@ -832,8 +832,10 @@ test_tree_with_rebase_partial_chunk() -> ?DATA_CHUNK_SIZE + 100, Path5_1, offset_rebase_support_ruleset), Path5_2 = ar_merkle:generate_path(Root5, ?DATA_CHUNK_SIZE, Tree5), - {Leaf2, ?DATA_CHUNK_SIZE, ?DATA_CHUNK_SIZE+100} = ar_merkle:validate_path(Root5, - ?DATA_CHUNK_SIZE, ?DATA_CHUNK_SIZE+100, Path5_2, offset_rebase_support_ruleset), + {Leaf2, ?DATA_CHUNK_SIZE, ?DATA_CHUNK_SIZE + 100} = + ar_merkle:validate_path( + Root5, ?DATA_CHUNK_SIZE, ?DATA_CHUNK_SIZE + 100, Path5_2, offset_rebase_support_ruleset + ), %% Root6__________________ %% / \ @@ -869,7 +871,7 @@ test_tree_with_rebase_partial_chunk() -> {Leaf3, 393213, 655355} = ar_merkle:validate_path(Root6, 393213 + 2, 655355, Path6_3, offset_rebase_support_ruleset), - %% Root6 (with offset reset) + %% Root6 (with offset reset) %% / \ %% ____SubTree1___ Leaf3 %% / \ @@ -899,7 +901,7 @@ test_tree_with_rebase_partial_chunk() -> Path8_2 = ar_merkle:generate_path(Root8, 131070, Tree8), ?assertEqual(false, - ar_merkle:validate_path(Root8, 131070+5, 655355, Path8_2, offset_rebase_support_ruleset)), + ar_merkle:validate_path(Root8, 131070 + 5, 655355, Path8_2, offset_rebase_support_ruleset)), Path8_3 = ar_merkle:generate_path(Root8, 393213 + 1, Tree8), {Leaf3, 393213, 655355} = ar_merkle:validate_path(Root8, 393213 + 2, 655355, Path8_3, @@ -966,7 +968,7 @@ test_tree_with_rebase_partial_chunk() -> ], {Root10, Tree10} = ar_merkle:generate_tree(Tags10), assert_tree([ - {branch, undefined, ?DATA_CHUNK_SIZE+1, false}, %% Root + {branch, undefined, ?DATA_CHUNK_SIZE + 1, false}, %% Root {branch, undefined, ?DATA_CHUNK_SIZE, false}, %% SubTree1 {leaf, Leaf3, ?DATA_CHUNK_SIZE, true}, {leaf, Leaf1, ?DATA_CHUNK_SIZE, true}, @@ -980,13 +982,14 @@ test_tree_with_rebase_partial_chunk() -> Path10_1, offset_rebase_support_ruleset), Path10_2 = ar_merkle:generate_path(Root10, ?DATA_CHUNK_SIZE, Tree10), - {Leaf2, ?DATA_CHUNK_SIZE, ?DATA_CHUNK_SIZE+1} = ar_merkle:validate_path(Root10, - ?DATA_CHUNK_SIZE, ?DATA_CHUNK_SIZE+1, - Path10_2, offset_rebase_support_ruleset), + {Leaf2, ?DATA_CHUNK_SIZE, ?DATA_CHUNK_SIZE + 1} = + ar_merkle:validate_path( + Root10, ?DATA_CHUNK_SIZE, ?DATA_CHUNK_SIZE + 1, Path10_2, offset_rebase_support_ruleset + ), - Path10_3 = ar_merkle:generate_path(Root10, ?DATA_CHUNK_SIZE+1, Tree10), + Path10_3 = ar_merkle:generate_path(Root10, ?DATA_CHUNK_SIZE + 1, Tree10), ?assertEqual(false, - ar_merkle:validate_path(Root10, ?DATA_CHUNK_SIZE+1, (2*?DATA_CHUNK_SIZE)+1, + ar_merkle:validate_path(Root10, ?DATA_CHUNK_SIZE + 1, (2 * ?DATA_CHUNK_SIZE) + 1, Path10_3, offset_rebase_support_ruleset)), ok. @@ -1023,8 +1026,9 @@ test_tree_with_rebase_subtree_ids() -> ?assertEqual(SubTreeLeaf2#node.id, TreeLeaf2#node.id). generate_and_validate_uneven_tree_path_test() -> - Tags = make_tags_cumulative([{<>, 1} - || N <- lists:seq(0, ?UNEVEN_TEST_SIZE - 1)]), + Tags = make_tags_cumulative( + [{<>, 1} || N <- lists:seq(0, ?UNEVEN_TEST_SIZE - 1)] + ), {MR, Tree} = ar_merkle:generate_tree(Tags), %% Make sure the target is in the 'uneven' ending of the tree. Path = ar_merkle:generate_path(MR, ?UNEVEN_TEST_TARGET, Tree), @@ -1050,7 +1054,7 @@ test_reject_invalid_tree_path() -> ar_merkle:validate_path( MR, RandomTarget, ?TEST_SIZE, - ar_merkle:generate_path(MR, RandomTarget+1, Tree) + ar_merkle:generate_path(MR, RandomTarget + 1, Tree) ) ). @@ -1076,5 +1080,5 @@ assert_tree([ExpectedValues | RestOfValues], [Node | RestOfTree]) -> change_path(Path, Index) -> NewByte = (binary:at(Path, Index) + 1) rem 256, List = binary_to_list(Path), - UpdatedList = lists:sublist(List, Index) ++ [NewByte] ++ lists:nthtail(Index+1, List), + UpdatedList = lists:sublist(List, Index) ++ [NewByte] ++ lists:nthtail(Index + 1, List), list_to_binary(UpdatedList). diff --git a/apps/arweave/src/ar_metrics.erl b/apps/arweave/src/ar_metrics.erl index 443fb377d..ed67d6f4b 100644 --- a/apps/arweave/src/ar_metrics.erl +++ b/apps/arweave/src/ar_metrics.erl @@ -470,17 +470,17 @@ get_status_class({error, connect_timeout}) -> "connect_timeout"; get_status_class({error, timeout}) -> "timeout"; -get_status_class({error,{shutdown,timeout}}) -> +get_status_class({error, {shutdown, timeout}}) -> "shutdown_timeout"; get_status_class({error, econnrefused}) -> "econnrefused"; -get_status_class({error, {shutdown,econnrefused}}) -> +get_status_class({error, {shutdown, econnrefused}}) -> "shutdown_econnrefused"; -get_status_class({error, {shutdown,ehostunreach}}) -> +get_status_class({error, {shutdown, ehostunreach}}) -> "shutdown_ehostunreach"; -get_status_class({error, {shutdown,normal}}) -> +get_status_class({error, {shutdown, normal}}) -> "shutdown_normal"; -get_status_class({error, {closed,_}}) -> +get_status_class({error, {closed, _}}) -> "closed"; get_status_class({error, noproc}) -> "noproc"; diff --git a/apps/arweave/src/ar_metrics_collector.erl b/apps/arweave/src/ar_metrics_collector.erl index aa90bfd0a..5f39fef2c 100644 --- a/apps/arweave/src/ar_metrics_collector.erl +++ b/apps/arweave/src/ar_metrics_collector.erl @@ -7,8 +7,6 @@ collect_mf/2 ]). --import(prometheus_model_helpers, [create_mf/4]). - -include_lib("prometheus/include/prometheus.hrl"). -define(METRIC_NAME_PREFIX, "arweave_"). @@ -33,7 +31,7 @@ deregister_cleanup(_Registry) -> ok. %% =================================================================== add_metric_family({Name, Type, Help, Metrics}, Callback) -> - Callback(create_mf(?METRIC_NAME(Name), Help, Type, Metrics)). + Callback(prometheus_model_helpers:create_mf(?METRIC_NAME(Name), Help, Type, Metrics)). metrics() -> [ diff --git a/apps/arweave/src/ar_mining_io.erl b/apps/arweave/src/ar_mining_io.erl index 146f581e1..da34c6a74 100644 --- a/apps/arweave/src/ar_mining_io.erl +++ b/apps/arweave/src/ar_mining_io.erl @@ -158,7 +158,7 @@ start_io_thread(PartitionNumber, MiningAddress, StoreID, #state{ io_threads = Th when is_map_key({PartitionNumber, MiningAddress, StoreID}, Threads) -> State; start_io_thread(PartitionNumber, MiningAddress, StoreID, - #state{ io_threads = Threads, io_thread_monitor_refs = Refs, + #state{ io_threads = Threads, io_thread_monitor_refs = Refs, session_ref = SessionRef } = State) -> Thread = spawn( @@ -205,9 +205,9 @@ io_thread(PartitionNumber, MiningAddress, StoreID, SessionRef) -> io_thread(PartitionNumber, MiningAddress, StoreID, Ref); {WhichChunk, {Candidate, RecallRangeStart}} -> case ar_mining_server:is_session_valid(SessionRef, Candidate) of - true -> + true -> read_range(WhichChunk, Candidate, RecallRangeStart, StoreID); - false -> + false -> ok %% Clear the message queue of requests from outdated mining sessions end, io_thread(PartitionNumber, MiningAddress, StoreID, SessionRef) @@ -268,7 +268,7 @@ read_range(_WhichChunk, _Candidate, _RangeStart, Nonce, NonceMax, _ChunkOffsets) read_range(WhichChunk, Candidate, RangeStart, Nonce, NonceMax, []) -> ar_mining_server:recall_chunk(skipped, undefined, Nonce, Candidate), read_range(WhichChunk, Candidate, RangeStart, Nonce + 1, NonceMax, []); -read_range(WhichChunk, Candidate,RangeStart, Nonce, NonceMax, [{EndOffset, Chunk} | ChunkOffsets]) +read_range(WhichChunk, Candidate, RangeStart, Nonce, NonceMax, [{EndOffset, Chunk} | ChunkOffsets]) %% Only 256 KiB chunks are supported at this point. when RangeStart + Nonce * ?DATA_CHUNK_SIZE < EndOffset - ?DATA_CHUNK_SIZE -> ar_mining_server:recall_chunk(skipped, undefined, Nonce, Candidate), diff --git a/apps/arweave/src/ar_mining_server.erl b/apps/arweave/src/ar_mining_server.erl index 1a33ad3bc..c6049c59e 100644 --- a/apps/arweave/src/ar_mining_server.erl +++ b/apps/arweave/src/ar_mining_server.erl @@ -100,7 +100,7 @@ post_solution(Solution) -> is_session_valid(_SessionRef, #mining_candidate{ session_ref = not_set }) -> true; is_session_valid(undefined, _Candidate) -> - false; + false; is_session_valid(SessionRef, #mining_candidate{ session_ref = SessionRef }) -> true; is_session_valid(_SessionRef, _Candidate) -> @@ -137,7 +137,7 @@ handle_call(Request, _From, State) -> handle_cast(pause, State) -> #state{ session = Session } = State, ar_mining_stats:mining_paused(), - %% Setting paused to true allows all pending tasks to complete, but prevents new output to be + %% Setting paused to true allows all pending tasks to complete, but prevents new output to be %% distributed. Setting diff to infnity ensures that no solutions are found. {noreply, State#state{ diff = infinity, session = Session#mining_session{ paused = true } }}; @@ -172,7 +172,7 @@ handle_cast({add_task, {TaskType, Candidate} = Task}, State) -> {partition_number, Candidate#mining_candidate.partition_number}]), {noreply, State} end; - + handle_cast(handle_task, #state{ task_queue = Q } = State) -> case gb_sets:is_empty(Q) of true -> @@ -195,7 +195,7 @@ handle_cast({compute_h2_for_peer, Candidate, H1List}, State) -> partition_number = PartitionNumber, partition_upper_bound = PartitionUpperBound } = Candidate, - + {_RecallRange1Start, RecallRange2Start} = ar_block:get_recall_range(H0, PartitionNumber, PartitionUpperBound), Range2Exists = ar_mining_io:read_recall_range(chunk2, Candidate, RecallRange2Start), @@ -439,7 +439,7 @@ distribute_output([{PartitionNumber, MiningAddress} | Partitions], Candidate, St %% @doc Before loading a recall range we reserve enough cache space for the whole range. This %% helps make sure we don't exceed the cache limit (too much) when there are many parallel -%% reads. +%% reads. %% %% As we compute hashes we'll decrement the chunk_cache_size to indicate available cache space. reserve_cache_space() -> @@ -476,9 +476,8 @@ handle_task({computed_output, Args}, State) -> start_interval_number = CurrentStartIntervalNumber, partition_upper_bound = CurrentPartitionUpperBound } = MiningSession, MiningSession2 = - case {CurrentStartIntervalNumber, CurrentNextSeed, CurrentPartitionUpperBound, - CurrentNextVDFDifficulty} - == {StartIntervalNumber, NextSeed, PartitionUpperBound, NextVDFDifficulty} of + case {CurrentStartIntervalNumber, CurrentNextSeed, CurrentPartitionUpperBound, CurrentNextVDFDifficulty} == + {StartIntervalNumber, NextSeed, PartitionUpperBound, NextVDFDifficulty} of true -> MiningSession; false -> @@ -493,7 +492,7 @@ handle_task({computed_output, Args}, State) -> start_interval_number = StartIntervalNumber, partition_upper_bound = PartitionUpperBound }, State), - ?LOG_INFO([{event, new_mining_session}, + ?LOG_INFO([{event, new_mining_session}, {session_ref, ar_util:safe_encode(NewMiningSession#mining_session.ref)}, {step_number, StepNumber}, {interval_number, StartIntervalNumber}, @@ -548,7 +547,7 @@ handle_task({chunk2, Candidate}, State) -> {noreply, State#state{ session = Session2, hashing_threads = Threads2 }}; {{chunk1, H1}, Map2} -> %% Decrement 1 for chunk2: - %% we're computing h2 for a peer so chunk1 was not previously read or cached + %% we're computing h2 for a peer so chunk1 was not previously read or cached %% on this node update_chunk_cache_size(-1), {Thread, Threads2} = pick_hashing_thread(Threads), @@ -671,7 +670,7 @@ handle_task({computed_h2, Candidate}, State) -> true -> #mining_candidate{ chunk2 = Chunk2, h0 = H0, h2 = H2, mining_address = MiningAddress, - nonce = Nonce, partition_number = PartitionNumber, + nonce = Nonce, partition_number = PartitionNumber, partition_upper_bound = PartitionUpperBound, cm_lead_peer = Peer } = Candidate, case binary:decode_unsigned(H2, big) > get_difficulty(State, Candidate) of @@ -795,13 +794,13 @@ prepare_solution(Candidate, State) -> case is_session_valid(SessionRef, Candidate) of true -> #mining_candidate{ - mining_address = MiningAddress, next_seed = NextSeed, + mining_address = MiningAddress, next_seed = NextSeed, next_vdf_difficulty = NextVDFDifficulty, nonce = Nonce, nonce_limiter_output = NonceLimiterOutput, partition_number = PartitionNumber, partition_upper_bound = PartitionUpperBound, poa2 = PoA2, preimage = Preimage, seed = Seed, start_interval_number = StartIntervalNumber, step_number = StepNumber } = Candidate, - + Solution = #mining_solution{ mining_address = MiningAddress, merkle_rebase_threshold = RebaseThreshold, @@ -821,10 +820,10 @@ prepare_solution(Candidate, State) -> false -> error end. - + prepare_solution(last_step_checkpoints, Candidate, Solution) -> #mining_candidate{ - next_seed = NextSeed, next_vdf_difficulty = NextVDFDifficulty, + next_seed = NextSeed, next_vdf_difficulty = NextVDFDifficulty, start_interval_number = StartIntervalNumber, step_number = StepNumber } = Candidate, LastStepCheckpoints = ar_nonce_limiter:get_step_checkpoints( StepNumber, NextSeed, StartIntervalNumber, NextVDFDifficulty), @@ -885,7 +884,7 @@ prepare_solution(proofs, Candidate, Solution) -> solution_hash = H2, recall_byte1 = RecallByte1, recall_byte2 = RecallByte2 }) end; -prepare_solution(poa1, Candidate, #mining_solution{ poa1 = not_set } = Solution ) -> +prepare_solution(poa1, Candidate, #mining_solution{ poa1 = not_set } = Solution) -> #mining_solution{ mining_address = MiningAddress, partition_number = PartitionNumber, recall_byte1 = RecallByte1 } = Solution, diff --git a/apps/arweave/src/ar_mining_stats.erl b/apps/arweave/src/ar_mining_stats.erl index 2a02586e3..65714e9ee 100644 --- a/apps/arweave/src/ar_mining_stats.erl +++ b/apps/arweave/src/ar_mining_stats.erl @@ -34,7 +34,7 @@ current_h1_from_peer_hps = 0.0, total_h2_to_peer = 0, total_h2_from_peer = 0, - + partitions = [], peers = [] }). @@ -199,7 +199,7 @@ reset_count(Key, Now) -> ets:insert(?MODULE, [{Key, Now, 0}]). get_average(Key, Now) -> - case ets:lookup(?MODULE, Key) of + case ets:lookup(?MODULE, Key) of [] -> 0.0; [{_, Start, _Count}] when Now - Start =:= 0 -> @@ -210,7 +210,7 @@ get_average(Key, Now) -> end. get_count(Key) -> - case ets:lookup(?MODULE, Key) of + case ets:lookup(?MODULE, Key) of [] -> 0; [{_, _Start, Count}] -> @@ -218,7 +218,7 @@ get_count(Key) -> end. get_start(Key) -> - case ets:lookup(?MODULE, Key) of + case ets:lookup(?MODULE, Key) of [] -> undefined; [{_, Start, _Count}] -> @@ -226,7 +226,7 @@ get_start(Key) -> end. get_total_data_size() -> - case ets:lookup(?MODULE, total_data_size) of + case ets:lookup(?MODULE, total_data_size) of [] -> 0; [{_, TotalDataSize}] -> @@ -284,7 +284,7 @@ optimal_overall_read_mibps(VDFSpeed, TotalDataSize, WeaveSize) -> (100.0 / VDFSpeed) * NumPartitions * (1 + (TotalDataSize / WeaveSize)). optimal_partition_read_mibps(undefined, _PartitionDataSize, _TotalDataSize, _WeaveSize) -> - 0.0; + 0.0; optimal_partition_read_mibps(VDFSpeed, PartitionDataSize, TotalDataSize, WeaveSize) -> (100.0 / VDFSpeed) * (PartitionDataSize / ?PARTITION_SIZE) * (1 + (TotalDataSize / WeaveSize)). @@ -477,11 +477,11 @@ format_report(Report, WeaveSize) -> ), PartitionTable = format_partition_report(Report, WeaveSize), PeerTable = format_peer_report(Report), - + io_lib:format("\n~s~s~s", [Preamble, PartitionTable, PeerTable]). format_partition_report(Report, WeaveSize) -> - Header = + Header = "Local mining stats:\n" "+-----------+-----------+----------+---------------+---------------+---------------+------------+------------+--------------+\n" "| Partition | Data Size | % of Max | Read (Cur) | Read (Avg) | Read (Ideal) | Hash (Cur) | Hash (Avg) | Hash (Ideal) |\n" @@ -540,7 +540,7 @@ format_partition_row(PartitionReport) -> format_peer_report(#report{ peers = [] }) -> ""; format_peer_report(Report) -> - Header = + Header = "\n" "Coordinated mining cluster stats:\n" "+----------------------+-------------+-------------+---------------+---------------+---------+---------+\n" @@ -629,12 +629,12 @@ test_local_stats(Fun, Stat) -> timer:sleep(1000), Fun(1), Fun(1), - + Fun(2), TotalStart2 = get_start({partition, 2, Stat, total}), CurrentStart2 = get_start({partition, 2, Stat, current}), Fun(2), - + ?assert(TotalStart1 /= TotalStart2), ?assert(CurrentStart1 /= CurrentStart2), ?assertEqual(0.0, get_average({partition, 1, Stat, total}, TotalStart1)), @@ -755,12 +755,12 @@ test_peer_stats(Fun, Stat) -> timer:sleep(1000), Fun(Peer1, 5), Fun(Peer1, 15), - + Fun(Peer2, 1), TotalStart2 = get_start({peer, Peer2, Stat, total}), CurrentStart2 = get_start({peer, Peer2, Stat, current}), Fun(Peer2, 19), - + ?assert(TotalStart1 /= TotalStart2), ?assert(CurrentStart1 /= CurrentStart2), ?assertEqual(0.0, get_average({peer, Peer1, Stat, total}, TotalStart1)), @@ -873,23 +873,23 @@ test_optimal_stats() -> ?assertEqual(250.0, optimal_overall_read_mibps( 2.0, floor(4.75 * ?PARTITION_SIZE), floor(19 * ?PARTITION_SIZE))), - ?assertEqual(0.0, + ?assertEqual(0.0, optimal_partition_read_mibps( undefined, ?PARTITION_SIZE, floor(10 * ?PARTITION_SIZE), floor(10 * ?PARTITION_SIZE))), - ?assertEqual(200.0, + ?assertEqual(200.0, optimal_partition_read_mibps( 1.0, ?PARTITION_SIZE, floor(10 * ?PARTITION_SIZE), floor(10 * ?PARTITION_SIZE))), - ?assertEqual(100.0, + ?assertEqual(100.0, optimal_partition_read_mibps( 2.0, ?PARTITION_SIZE, floor(10 * ?PARTITION_SIZE), floor(10 * ?PARTITION_SIZE))), - ?assertEqual(50.0, + ?assertEqual(50.0, optimal_partition_read_mibps( 1.0, floor(0.25 * ?PARTITION_SIZE), floor(10 * ?PARTITION_SIZE), floor(10 * ?PARTITION_SIZE))), - ?assertEqual(160.0, + ?assertEqual(160.0, optimal_partition_read_mibps( 1.0, ?PARTITION_SIZE, floor(6 * ?PARTITION_SIZE), floor(10 * ?PARTITION_SIZE))). @@ -948,15 +948,15 @@ test_report() -> ar_mining_stats:h2_received_from_peer(Peer2), ar_mining_stats:h2_received_from_peer(Peer2), ar_mining_stats:h2_received_from_peer(Peer2), - - Report1 = generate_report([], [], WeaveSize, Now+1000), + + Report1 = generate_report([], [], WeaveSize, Now + 1000), ?assertEqual(#report{ now = Now+1000 }, Report1), log_report(format_report(Report1, WeaveSize)), - Report2 = generate_report(Partitions, Peers, WeaveSize, Now+1000), + Report2 = generate_report(Partitions, Peers, WeaveSize, Now + 1000), ReportString = format_report(Report2, WeaveSize), log_report(ReportString), - ?assertEqual(#report{ + ?assertEqual(#report{ now = Now+1000, vdf_speed = 3.0, total_data_size = floor(0.6 * ?PARTITION_SIZE), @@ -1031,4 +1031,3 @@ test_report() -> ] }, Report2). - \ No newline at end of file diff --git a/apps/arweave/src/ar_node.erl b/apps/arweave/src/ar_node.erl index b557829f1..31604c934 100644 --- a/apps/arweave/src/ar_node.erl +++ b/apps/arweave/src/ar_node.erl @@ -278,8 +278,7 @@ get_recent_partition_upper_bound_by_prev_h(H, Diff, [], _Genesis) -> get_recent_partition_upper_bound_by_prev_h_short_cache_test() -> ar_block_cache:new(block_cache, B0 = test_block(1, 1, <<>>)), H0 = B0#block.indep_hash, - BI = lists:reverse([{H0, 20, <<>>} - | [{crypto:strong_rand_bytes(48), 20, <<>>} || _ <- lists:seq(1, 99)]]), + BI = lists:reverse([{H0, 20, <<>>} | [{crypto:strong_rand_bytes(48), 20, <<>>} || _ <- lists:seq(1, 99)]]), ets:insert(node_state, {recent_block_index, BI}), ?assertEqual(not_found, get_recent_partition_upper_bound_by_prev_h(B0#block.indep_hash)), ?assertEqual(not_found, diff --git a/apps/arweave/src/ar_node_sup.erl b/apps/arweave/src/ar_node_sup.erl index f11c66cad..1f909678b 100644 --- a/apps/arweave/src/ar_node_sup.erl +++ b/apps/arweave/src/ar_node_sup.erl @@ -1,6 +1,6 @@ -%% This Source Code Form is subject to the terms of the GNU General -%% Public License, v. 2.0. If a copy of the GPLv2 was not distributed -%% with this file, You can obtain one at +%% This Source Code Form is subject to the terms of the GNU General +%% Public License, v. 2.0. If a copy of the GPLv2 was not distributed +%% with this file, You can obtain one at %% https://www.gnu.org/licenses/old-licenses/gpl-2.0.en.html -module(ar_node_sup). diff --git a/apps/arweave/src/ar_node_utils.erl b/apps/arweave/src/ar_node_utils.erl index ba92737e8..42b7688c5 100644 --- a/apps/arweave/src/ar_node_utils.erl +++ b/apps/arweave/src/ar_node_utils.erl @@ -509,8 +509,10 @@ validate_block(reward_history_hash, {NewB, OldB, Wallets, BlockAnchors, RecentTX #block{ reward_history = RewardHistory } = OldB, HashRate = ar_difficulty:get_hash_rate(Diff), RewardAddr = NewB#block.reward_addr, - RewardHistory2 = lists:sublist([{RewardAddr, HashRate, Reward, Denomination} - | RewardHistory], ?REWARD_HISTORY_BLOCKS), + RewardHistory2 = lists:sublist( + [{RewardAddr, HashRate, Reward, Denomination} | RewardHistory], + ?REWARD_HISTORY_BLOCKS + ), case ar_block:reward_history_hash(RewardHistory2) of RewardHistoryHash -> validate_block(block_time_history_hash, {NewB, OldB, Wallets, BlockAnchors, @@ -544,7 +546,7 @@ validate_block(next_vdf_difficulty, {NewB, OldB, Wallets, BlockAnchors, RecentTX RecentTXMap}); true -> ExpectedNextVDFDifficulty = ar_block:compute_next_vdf_difficulty(OldB), - #nonce_limiter_info{ next_vdf_difficulty = NextVDFDifficulty } = + #nonce_limiter_info{ next_vdf_difficulty = NextVDFDifficulty } = NewB#block.nonce_limiter_info, case ExpectedNextVDFDifficulty == NextVDFDifficulty of false -> @@ -632,8 +634,7 @@ validate_block(merkle_rebase_support_threshold, {NewB, OldB}) -> #block{ height = Height } = NewB, case Height > ar_fork:height_2_7() of true -> - case NewB#block.merkle_rebase_support_threshold - == OldB#block.merkle_rebase_support_threshold of + case NewB#block.merkle_rebase_support_threshold == OldB#block.merkle_rebase_support_threshold of false -> {error, invalid_merkle_rebase_support_threshold}; true -> @@ -642,8 +643,7 @@ validate_block(merkle_rebase_support_threshold, {NewB, OldB}) -> false -> case Height == ar_fork:height_2_7() of true -> - case NewB#block.merkle_rebase_support_threshold - == OldB#block.weave_size of + case NewB#block.merkle_rebase_support_threshold == OldB#block.weave_size of true -> valid; false -> diff --git a/apps/arweave/src/ar_node_worker.erl b/apps/arweave/src/ar_node_worker.erl index 027e5a31a..5b8ed0d2e 100644 --- a/apps/arweave/src/ar_node_worker.erl +++ b/apps/arweave/src/ar_node_worker.erl @@ -148,8 +148,8 @@ init([]) -> end, gen_server:cast(?MODULE, process_task_queue), ets:insert(node_state, [ - {is_joined, false}, - {hash_list_2_0_for_1_0_blocks, read_hash_list_2_0_for_1_0_blocks()} + {is_joined, false}, + {hash_list_2_0_for_1_0_blocks, read_hash_list_2_0_for_1_0_blocks()} ]), %% Start the HTTP server. ok = ar_http_iface_server:start(), @@ -370,13 +370,13 @@ handle_info({event, nonce_limiter, initialized}, State) -> [{_, {Height, Blocks, BI}}] = ets:lookup(node_state, join_state), ar_storage:store_block_index(BI), B = hd(Blocks), - RewardHistory = [{H, {Addr, HashRate, Reward, Denomination}} - || {{Addr, HashRate, Reward, Denomination}, {H, _, _}} + RewardHistory = [{H, {Addr, HashRate, Reward, Denomination}} || + {{Addr, HashRate, Reward, Denomination}, {H, _, _}} <- lists:zip(B#block.reward_history, lists:sublist(BI, length(B#block.reward_history)))], ar_storage:store_reward_history_part2(RewardHistory), - BlockTimeHistory = [{H, {BlockInterval, VDFInterval, ChunkCount}} - || {{BlockInterval, VDFInterval, ChunkCount}, {H, _, _}} + BlockTimeHistory = [{H, {BlockInterval, VDFInterval, ChunkCount}} || + {{BlockInterval, VDFInterval, ChunkCount}, {H, _, _}} <- lists:zip(B#block.block_time_history, lists:sublist(BI, length(B#block.block_time_history)))], ar_storage:store_block_time_history_part2(BlockTimeHistory), @@ -393,23 +393,23 @@ handle_info({event, nonce_limiter, initialized}, State) -> {BlockAnchors, RecentTXMap} = get_block_anchors_and_recent_txs_map(BlockTXPairs), {Rate, ScheduledRate} = {B#block.usd_to_ar_rate, B#block.scheduled_usd_to_ar_rate}, ets:insert(node_state, [ - {recent_block_index, lists:sublist(BI, ?BLOCK_INDEX_HEAD_LEN)}, - {is_joined, true}, - {current, Current}, - {timestamp, B#block.timestamp}, - {nonce_limiter_info, B#block.nonce_limiter_info}, - {wallet_list, B#block.wallet_list}, - {height, Height}, - {hash, B#block.hash}, - {reward_pool, B#block.reward_pool}, - {diff, B#block.diff}, - {cumulative_diff, B#block.cumulative_diff}, - {last_retarget, B#block.last_retarget}, - {weave_size, B#block.weave_size}, - {block_txs_pairs, BlockTXPairs}, - {block_anchors, BlockAnchors}, - {recent_txs_map, RecentTXMap}, - {usd_to_ar_rate, Rate}, + {recent_block_index, lists:sublist(BI, ?BLOCK_INDEX_HEAD_LEN)}, + {is_joined, true}, + {current, Current}, + {timestamp, B#block.timestamp}, + {nonce_limiter_info, B#block.nonce_limiter_info}, + {wallet_list, B#block.wallet_list}, + {height, Height}, + {hash, B#block.hash}, + {reward_pool, B#block.reward_pool}, + {diff, B#block.diff}, + {cumulative_diff, B#block.cumulative_diff}, + {last_retarget, B#block.last_retarget}, + {weave_size, B#block.weave_size}, + {block_txs_pairs, BlockTXPairs}, + {block_anchors, BlockAnchors}, + {recent_txs_map, RecentTXMap}, + {usd_to_ar_rate, Rate}, {scheduled_usd_to_ar_rate, ScheduledRate}, {price_per_gib_minute, B#block.price_per_gib_minute}, {kryder_plus_rate_multiplier, B#block.kryder_plus_rate_multiplier}, @@ -456,7 +456,7 @@ handle_info({event, miner, {found_solution, _Solution, _PoACache, _PoA2Cache}}, #{ automine := false, miner_2_6 := undefined } = State) -> {noreply, State}; handle_info({event, miner, {found_solution, Solution, PoACache, PoA2Cache}}, State) -> - #mining_solution{ + #mining_solution{ last_step_checkpoints = LastStepCheckpoints, merkle_rebase_threshold = MerkleRebaseThreshold, mining_address = MiningAddress, @@ -523,8 +523,8 @@ handle_info({event, miner, {found_solution, Solution, PoACache, PoA2Cache}}, Sta global_step_number = PrevStepNumber } = TipNonceLimiterInfo, PrevIntervalNumber = PrevStepNumber div ?NONCE_LIMITER_RESET_FREQUENCY, PassesSeedCheck = PassesTimelineCheck andalso - {IntervalNumber, NonceLimiterNextSeed, NonceLimiterNextVDFDifficulty} - == {PrevIntervalNumber, PrevNextSeed, PrevNextVDFDifficulty}, + {IntervalNumber, NonceLimiterNextSeed, NonceLimiterNextVDFDifficulty} == + {PrevIntervalNumber, PrevNextSeed, PrevNextVDFDifficulty}, PrevB = ar_block_cache:get(block_cache, PrevH), CorrectRebaseThreshold = case PassesSeedCheck of @@ -679,7 +679,7 @@ handle_info({event, miner, {found_solution, Solution, PoACache, PoA2Cache}}, Sta ?LOG_INFO([{event, mined_block}, {indep_hash, ar_util:encode(H)}, {solution, ar_util:encode(SolutionH)}, {height, Height}, {txs, length(B#block.txs)}, - {chunks, + {chunks, case B#block.recall_byte2 of undefined -> 1; _ -> 2 @@ -810,7 +810,7 @@ terminate(Reason, _State) -> maps:put(TXID, {ar_mempool:get_tx(TXID), Status}, Acc) end, #{}, - ar_mempool:get_priority_set() + ar_mempool:get_priority_set() ), dump_mempool(Mempool, MempoolSize); _ -> @@ -1076,8 +1076,7 @@ apply_block3(B, [PrevB | _] = PrevBlocks, Timestamp, State) -> HashRate = ar_difficulty:get_hash_rate(B#block.diff), Denomination2 = B#block.denomination, Addr = B#block.reward_addr, - RewardHistory2 = [{Addr, HashRate, Reward, Denomination2} - | RewardHistory], + RewardHistory2 = [{Addr, HashRate, Reward, Denomination2} | RewardHistory], Len = ?REWARD_HISTORY_BLOCKS + ?STORE_BLOCKS_BEHIND_CURRENT, RewardHistory3 = lists:sublist(RewardHistory2, Len), B#block{ reward_history = RewardHistory3 }; @@ -1241,10 +1240,14 @@ pack_block_with_transactions(#block{ height = Height, diff = Diff } = B, PrevB) {ok, RootHash} = ar_wallets:add_wallets(PrevB#block.wallet_list, Accounts2, Height, Denomination2), HashRate = ar_difficulty:get_hash_rate(Diff), - RewardHistory2 = lists:sublist([{RewardAddr, HashRate, Reward2, Denomination2} - | RewardHistory], ?REWARD_HISTORY_BLOCKS + ?STORE_BLOCKS_BEHIND_CURRENT), - RewardHistory3 = lists:sublist([{RewardAddr, HashRate, Reward2, Denomination2} - | RewardHistory], ?REWARD_HISTORY_BLOCKS), + RewardHistory2 = lists:sublist( + [{RewardAddr, HashRate, Reward2, Denomination2} | RewardHistory], + ?REWARD_HISTORY_BLOCKS + ?STORE_BLOCKS_BEHIND_CURRENT + ), + RewardHistory3 = lists:sublist( + [{RewardAddr, HashRate, Reward2, Denomination2} | RewardHistory], + ?REWARD_HISTORY_BLOCKS + ), B2#block{ wallet_list = RootHash, reward_pool = EndowmentPool2, @@ -1526,22 +1529,22 @@ apply_validated_block2(State, B, PrevBlocks, Orphans, RecentBI, BlockTXPairs) -> ar_storage:store_reward_history_part(AddedBlocks), ar_storage:store_block_time_history_part(AddedBlocks, lists:last(PrevBlocks)), ets:insert(node_state, [ - {recent_block_index, RecentBI2}, - {current, B#block.indep_hash}, - {timestamp, B#block.timestamp}, - {wallet_list, B#block.wallet_list}, - {height, B#block.height}, - {hash, B#block.hash}, - {reward_pool, B#block.reward_pool}, - {diff, B#block.diff}, - {cumulative_diff, B#block.cumulative_diff}, - {last_retarget, B#block.last_retarget}, - {weave_size, B#block.weave_size}, - {nonce_limiter_info, B#block.nonce_limiter_info}, - {block_txs_pairs, BlockTXPairs}, - {block_anchors, BlockAnchors}, - {recent_txs_map, RecentTXMap}, - {usd_to_ar_rate, Rate}, + {recent_block_index, RecentBI2}, + {current, B#block.indep_hash}, + {timestamp, B#block.timestamp}, + {wallet_list, B#block.wallet_list}, + {height, B#block.height}, + {hash, B#block.hash}, + {reward_pool, B#block.reward_pool}, + {diff, B#block.diff}, + {cumulative_diff, B#block.cumulative_diff}, + {last_retarget, B#block.last_retarget}, + {weave_size, B#block.weave_size}, + {nonce_limiter_info, B#block.nonce_limiter_info}, + {block_txs_pairs, BlockTXPairs}, + {block_anchors, BlockAnchors}, + {recent_txs_map, RecentTXMap}, + {usd_to_ar_rate, Rate}, {scheduled_usd_to_ar_rate, ScheduledRate}, {price_per_gib_minute, B#block.price_per_gib_minute}, {kryder_plus_rate_multiplier, B#block.kryder_plus_rate_multiplier}, @@ -1641,11 +1644,13 @@ record_economic_metrics2(B, PrevB) -> true -> #block{ reward_history = RewardHistory } = B, RewardHistorySize = length(RewardHistory), - AverageHashRate = ar_util:safe_divide(lists:sum([HR - || {_, HR, _, _} <- RewardHistory]), RewardHistorySize), + AverageHashRate = ar_util:safe_divide( + lists:sum([HR || {_, HR, _, _} <- RewardHistory]), RewardHistorySize + ), prometheus_gauge:set(average_network_hash_rate, AverageHashRate), - AverageBlockReward = ar_util:safe_divide(lists:sum([R - || {_, _, R, _} <- RewardHistory]), RewardHistorySize), + AverageBlockReward = ar_util:safe_divide( + lists:sum([R || {_, _, R, _} <- RewardHistory]), RewardHistorySize + ), prometheus_gauge:set(average_block_reward, AverageBlockReward), prometheus_gauge:set(price_per_gibibyte_minute, B#block.price_per_gib_minute), BlockInterval = ar_block:compute_block_interval(PrevB), @@ -1654,8 +1659,8 @@ record_economic_metrics2(B, PrevB) -> PrevB#block.kryder_plus_rate_multiplier_latch, PrevB#block.kryder_plus_rate_multiplier, PrevB#block.denomination, BlockInterval}, - {ExpectedBlockReward, - _, _, _, _} = ar_pricing:get_miner_reward_endowment_pool_debt_supply(Args), + {ExpectedBlockReward, _, _, _, _} = + ar_pricing:get_miner_reward_endowment_pool_debt_supply(Args), prometheus_gauge:set(expected_block_reward, ExpectedBlockReward), LegacyPricePerGibibyte = ar_pricing:get_storage_cost(1024 * 1024 * 1024, os:system_time(second), PrevB#block.usd_to_ar_rate, B#block.height), diff --git a/apps/arweave/src/ar_nonce_limiter.erl b/apps/arweave/src/ar_nonce_limiter.erl index 613ac310b..be3392dc0 100644 --- a/apps/arweave/src/ar_nonce_limiter.erl +++ b/apps/arweave/src/ar_nonce_limiter.erl @@ -6,7 +6,7 @@ get_current_step_number/1, get_seed_data/2, get_step_checkpoints/4, get_steps/4, validate_last_step_checkpoints/3, request_validation/3, get_or_init_nonce_limiter_info/1, get_or_init_nonce_limiter_info/2, - apply_external_update/2, get_session/1, + apply_external_update/2, get_session/1, compute/3, resolve_remote_server_raw_peers/0, maybe_add_entropy/4, mix_seed/2]). @@ -72,9 +72,9 @@ get_seed_data(StepNumber, PrevB) -> partition_upper_bound = PartitionUpperBound, next_partition_upper_bound = NextPartitionUpperBound, %% VDF difficulty in use at the previous block - vdf_difficulty = VDFDifficulty, + vdf_difficulty = VDFDifficulty, %% Next VDF difficulty scheduled at the previous block - next_vdf_difficulty = PrevNextVDFDifficulty + next_vdf_difficulty = PrevNextVDFDifficulty } = NonceLimiterInfo, true = StepNumber > PrevStepNumber, case get_entropy_reset_point(PrevStepNumber, StepNumber) of @@ -151,8 +151,7 @@ validate_last_step_checkpoints(_B, _PrevB, _PrevOutput) -> %% @doc Determine whether StepNumber has passed the entropy reset line. If it has return the %% reset line, otherwise return none. get_entropy_reset_point(PrevStepNumber, StepNumber) -> - ResetLine = (PrevStepNumber div ?NONCE_LIMITER_RESET_FREQUENCY + 1) - * ?NONCE_LIMITER_RESET_FREQUENCY, + ResetLine = (PrevStepNumber div ?NONCE_LIMITER_RESET_FREQUENCY + 1) * ?NONCE_LIMITER_RESET_FREQUENCY, case ResetLine > StepNumber of true -> none; @@ -208,7 +207,7 @@ request_validation(H, #nonce_limiter_info{ output = Output, %% PrevOutput x %% |----------------------| StepsToValidate %% |-----------------------------------| SessionSteps - %% StartStepNumber x + %% StartStepNumber x %% StartOutput x %% |-------------| ComputedSteps %% --------------> NumAlreadyComputed @@ -747,10 +746,10 @@ apply_base_block(B, State) -> last_step_checkpoints = LastStepCheckpoints, vdf_difficulty = VDFDifficulty, next_vdf_difficulty = NextVDFDifficulty } = B#block.nonce_limiter_info, - Session = #vdf_session{ + Session = #vdf_session{ seed = Seed, step_number = StepNumber, upper_bound = UpperBound, next_upper_bound = NextUpperBound, - vdf_difficulty = VDFDifficulty, next_vdf_difficulty = NextVDFDifficulty , + vdf_difficulty = VDFDifficulty, next_vdf_difficulty = NextVDFDifficulty, step_checkpoints_map = #{ StepNumber => LastStepCheckpoints }, steps = [Output] }, SessionKey = session_key(B#block.nonce_limiter_info), @@ -1000,7 +999,7 @@ apply_external_update2(Update, State) -> %% computed - have now been invalidated. NextSessionStart = (SessionInterval + 1) * ?NONCE_LIMITER_RESET_FREQUENCY, {_, Steps} = get_step_range( - Session, min(RangeStart, NextSessionStart), StepNumber), + Session, min(RangeStart, NextSessionStart), StepNumber), State2 = apply_external_update3(State, SessionKey, CurrentSessionKey, Session, Steps), {reply, ok, State2} @@ -1050,7 +1049,7 @@ apply_external_update2(Update, State) -> %% noticed the gap and requested the full VDF session be sent - %% which may contain previously processed steps in a addition to %% the missing ones. - %% + %% %% To avoid processing those steps twice, the client grabs %% CurrentStepNumber (our most recently processed step number) %% and ignores it and any lower steps found in Session. @@ -1094,13 +1093,13 @@ apply_external_update3(State, SessionKey, CurrentSessionKey, Session, Steps) -> %% primarily used to manage "overflow" steps. %% %% Between blocks nodes will add all computed VDF steps to the same session - -%% *even if* the new steps have crossed the entropy reset line and therefore -%% could be added to a new session (i.e. "overflow steps"). Once a block is +%% *even if* the new steps have crossed the entropy reset line and therefore +%% could be added to a new session (i.e. "overflow steps"). Once a block is %% processed the node will open a new session and re-allocate all the steps past %% the entropy reset line to that new session. However, any steps that have crossed %% *TWO* entropy reset lines are no longer valid (the seed they were generated with %% has changed with the arrival of a new block) -%% +%% %% Note: This overlap in session caching is intentional. The intention is to %% quickly access the steps when validating B1 -> reset line -> B2 given the %% current fork of B1 -> B2' -> reset line -> B3 i.e. we can query all steps by @@ -1131,7 +1130,7 @@ get_step_range(Steps, StepNumber, _RangeStart, RangeEnd) get_step_range(Steps, StepNumber, RangeStart, RangeEnd) -> %% Clip RangeStart to the earliest step number in Steps RangeStart2 = max(RangeStart, StepNumber - length(Steps) + 1), - RangeSteps = + RangeSteps = case StepNumber > RangeEnd of true -> %% Exclude steps beyond the end of the session @@ -1146,7 +1145,7 @@ get_step_range(Steps, StepNumber, RangeStart, RangeEnd) -> {RangeEnd2, RangeSteps2}. %% @doc Update the VDF session cache based on new info from a validated block. -cache_block_session(State, SessionKey, PrevSessionKey, CurrentSessionKey, +cache_block_session(State, SessionKey, PrevSessionKey, CurrentSessionKey, StepCheckpointsMap, Seed, UpperBound, NextUpperBound, VDFDifficulty, NextVDFDifficulty) -> Session = case get_session(SessionKey, State) of @@ -1169,7 +1168,7 @@ cache_session(State, SessionKey, CurrentSessionKey, Session) -> #state{ session_by_key = SessionByKey, sessions = Sessions } = State, {NextSeed, Interval, NextVDFDifficulty} = SessionKey, maybe_set_vdf_metrics(SessionKey, CurrentSessionKey, Session), - ?LOG_DEBUG([{event, add_session}, {next_seed, ar_util:encode(NextSeed)}, + ?LOG_DEBUG([{event, add_session}, {next_seed, ar_util:encode(NextSeed)}, {next_vdf_difficulty, Session#vdf_session.next_vdf_difficulty}, {interval, Interval}, {step_number, Session#vdf_session.step_number}]), @@ -1185,7 +1184,7 @@ maybe_set_vdf_metrics(SessionKey, CurrentSessionKey, Session) -> true -> #vdf_session{ step_number = StepNumber, - vdf_difficulty = VDFDifficulty, + vdf_difficulty = VDFDifficulty, next_vdf_difficulty = NextVDFDifficulty } = Session, prometheus_gauge:set(vdf_step, StepNumber), prometheus_gauge:set(vdf_difficulty, [current], VDFDifficulty), @@ -1313,7 +1312,7 @@ test_applies_validated_steps() -> {ok, Output2, _} = compute(2, InitialOutput, B1VDFDifficulty), B2VDFDifficulty = 3, B2NextVDFDifficulty = 4, - B2 = test_block(2, Output2, Seed, NextSeed, [], [Output2], + B2 = test_block(2, Output2, Seed, NextSeed, [], [Output2], B2VDFDifficulty, B2NextVDFDifficulty), ok = ar_events:subscribe(nonce_limiter), assert_validate(B2, B1, valid), @@ -1382,7 +1381,7 @@ test_applies_validated_steps() -> B8VDFDifficulty = 4, %% Change the next_vdf_difficulty to confirm that apply_tip2 handles updating an %% existing VDF session - B8NextVDFDifficulty = 6, + B8NextVDFDifficulty = 6, B8 = test_block(8, Output8, NextSeed, NextSeed2, [], [Output8, Output7, Output6], B8VDFDifficulty, B8NextVDFDifficulty), ar_events:send(node_state, {new_tip, B8, B4}), @@ -1439,7 +1438,7 @@ get_step_range_test() -> ), ?assertEqual( {0, []}, - get_step_range(lists:seq(9, 5, -1), 9 , 10, 14), + get_step_range(lists:seq(9, 5, -1), 9, 10, 14), "Disjoint range B" ), ?assertEqual( diff --git a/apps/arweave/src/ar_nonce_limiter_server_sup.erl b/apps/arweave/src/ar_nonce_limiter_server_sup.erl index a2fb86d76..b72319097 100644 --- a/apps/arweave/src/ar_nonce_limiter_server_sup.erl +++ b/apps/arweave/src/ar_nonce_limiter_server_sup.erl @@ -24,8 +24,7 @@ init([]) -> {ok, Config} = application:get_env(arweave, config), Workers = lists:map( fun(Peer) -> - Name = list_to_atom("ar_nonce_limiter_server_worker_" - ++ ar_util:peer_to_str(Peer)), + Name = list_to_atom("ar_nonce_limiter_server_worker_" ++ ar_util:peer_to_str(Peer)), {Name, {ar_nonce_limiter_server_worker, start_link, [Name, Peer]}, permanent, ?SHUTDOWN_TIMEOUT, worker, [Name]} end, diff --git a/apps/arweave/src/ar_nonce_limiter_server_worker.erl b/apps/arweave/src/ar_nonce_limiter_server_worker.erl index 8913e0978..46c585b38 100644 --- a/apps/arweave/src/ar_nonce_limiter_server_worker.erl +++ b/apps/arweave/src/ar_nonce_limiter_server_worker.erl @@ -107,7 +107,7 @@ push_update(SessionKey, StepNumber, Output, Peer, Format, State) -> SessionFound = Response#nonce_limiter_update_response.session_found, RequestedStepNumber = Response#nonce_limiter_update_response.step_number, - case { + case { RequestedFormat == Format, Postpone == 0, SessionFound, diff --git a/apps/arweave/src/ar_p3.erl b/apps/arweave/src/ar_p3.erl index 51b41fd2d..709785b8d 100644 --- a/apps/arweave/src/ar_p3.erl +++ b/apps/arweave/src/ar_p3.erl @@ -65,10 +65,10 @@ handle_call({reverse_charge, Transaction}, _From, State) -> { reply, ar_p3_db:reverse_transaction( - Transaction#p3_transaction.address, + Transaction#p3_transaction.address, Transaction#p3_transaction.id), State - }; + }; handle_call({get_balance, Address, Asset}, _From, State) -> {reply, ar_p3_db:get_balance(Address, Asset), State}; @@ -179,7 +179,7 @@ validate_mod_seq(ModSeq, _Req, ServiceConfig) when is_integer(ModSeq) -> {error, stale_mod_seq} end; validate_mod_seq(_ModSeq, _Req, _ServiceConfig) -> - {error, invalid_mod_seq}. + {error, invalid_mod_seq}. validate_endpoint(Req, ServiceConfig) -> Endpoint = cowboy_req:header(?P3_ENDPOINT_HEADER, Req), @@ -326,7 +326,7 @@ get_block_txs(Height) -> apply_deposits([], _DepositAddress) -> ok; -apply_deposits([TX|TXs], DepositAddress) -> +apply_deposits([TX | TXs], DepositAddress) -> case TX#tx.target == DepositAddress of true -> apply_deposit(TX); diff --git a/apps/arweave/src/ar_p3_config.erl b/apps/arweave/src/ar_p3_config.erl index 7ae3034fc..bb87b679a 100644 --- a/apps/arweave/src/ar_p3_config.erl +++ b/apps/arweave/src/ar_p3_config.erl @@ -153,7 +153,7 @@ parse_payment(BadToken, _PaymentConfig) -> parse_services(ServicesConfig) when is_list(ServicesConfig) -> Services = [parse_service(ServiceConfig, #p3_service{}) || {ServiceConfig} <- ServicesConfig], lists:foldl( - fun(Service, Acc) -> + fun(Service, Acc) -> maps:put(Service#p3_service.endpoint, Service, Acc) end, #{}, @@ -168,7 +168,7 @@ parse_services(ServicesConfig) -> %% {"endpoint": "/info", "modSeq": 1, "rates": {rates}} parse_service([{?P3_ENDPOINT_HEADER, Endpoint} | Rest], ServiceConfig) -> parse_service(Rest, ServiceConfig#p3_service{ endpoint = Endpoint }); - + parse_service([{<<"rate_type">>, RateType} | Rest], ServiceConfig) -> parse_service(Rest, ServiceConfig#p3_service{ rate_type = RateType }); @@ -264,7 +264,7 @@ to_json_service(ServiceConfig, P3Config) -> #{ <<"endpoint">> => ServiceConfig#p3_service.endpoint, <<"modSeq">> => ServiceConfig#p3_service.mod_seq, - <<"rates">> => Rates#{ + <<"rates">> => Rates#{ <<"description">> => maps:get(ServiceConfig#p3_service.rate_type, ?RATE_TYPE_MAP) } }. @@ -274,13 +274,13 @@ to_json_rates(RatesConfig, P3Config) -> fun(Asset, Price, Acc) -> {Network, Token} = ?FROM_P3_ASSET(Asset), Address = ar_util:encode(get_payments_value(P3Config, Asset, #p3_payment.address)), - Acc#{ - Network => #{ - Token => #{ - <<"price">> => Price, - <<"address">> => Address - } - } + Acc#{ + Network => #{ + Token => #{ + <<"price">> => Price, + <<"address">> => Address + } + } } end, #{}, diff --git a/apps/arweave/src/ar_p3_db.erl b/apps/arweave/src/ar_p3_db.erl index 7fed22b4e..5ede477a9 100644 --- a/apps/arweave/src/ar_p3_db.erl +++ b/apps/arweave/src/ar_p3_db.erl @@ -107,7 +107,7 @@ handle_call({post_deposit, Address, Amount, TXID}, _From, State) -> Account -> {reply, post_tx_transaction(Account, Amount, TXID), State} end; - + handle_call({post_charge, Address, Amount, Minimum, Request}, _From, State) -> case get_account2(Address) of not_found -> @@ -309,7 +309,7 @@ safe_get(Name, Key, Default) -> Result = try ar_kv:get(Name, Key) catch - error:{badmatch,[]} -> + error:{badmatch, []} -> not_found end, case Result of @@ -317,6 +317,6 @@ safe_get(Name, Key, Default) -> Value; not_found -> Default; - _ -> + _ -> Result end. diff --git a/apps/arweave/src/ar_packing_server.erl b/apps/arweave/src/ar_packing_server.erl index 93cdcb991..40ed09321 100644 --- a/apps/arweave/src/ar_packing_server.erl +++ b/apps/arweave/src/ar_packing_server.erl @@ -3,7 +3,7 @@ -behaviour(gen_server). -export([start_link/0, packing_atom/1, - request_unpack/2, request_repack/2, pack/4, unpack/5, repack/6, + request_unpack/2, request_repack/2, pack/4, unpack/5, repack/6, is_buffer_full/0, record_buffer_size_metric/0]). -export([init/1, handle_cast/2, handle_call/3, handle_info/2, terminate/2]). @@ -128,8 +128,7 @@ init([]) -> %% artificially throttle processes uniformly. ThrottleDelay = calculate_throttle_delay(SpawnSchedulers, PackingRate), Workers = queue:from_list( - [spawn_link(fun() -> worker(ThrottleDelay, PackingStateRef) end) - || _ <- lists:seq(1, SpawnSchedulers)]), + [spawn_link(fun() -> worker(ThrottleDelay, PackingStateRef) end) || _ <- lists:seq(1, SpawnSchedulers)]), ets:insert(?MODULE, {buffer_size, 0}), {ok, Config} = application:get_env(arweave, config), MaxSize = @@ -146,7 +145,7 @@ init([]) -> ar:console("~nSetting the packing chunk cache size limit to ~B chunks.~n", [MaxSize]), ets:insert(?MODULE, {buffer_size_limit, MaxSize}), timer:apply_interval(200, ?MODULE, record_buffer_size_metric, []), - {ok, #state{ + {ok, #state{ workers = Workers, num_workers = SpawnSchedulers }}. handle_call(Request, _From, State) -> @@ -286,7 +285,7 @@ worker(ThrottleDelay, RandomXStateRef) -> worker(ThrottleDelay, RandomXStateRef); {repack, Ref, From, Args} -> {RequestedPacking, Packing, Chunk, AbsoluteOffset, TXRoot, ChunkSize} = Args, - case repack(RequestedPacking, Packing, + case repack(RequestedPacking, Packing, AbsoluteOffset, TXRoot, Chunk, ChunkSize, RandomXStateRef, internal) of {ok, Packed, Unpacked} -> From ! {chunk, {packed, Ref, {RequestedPacking, Packed, AbsoluteOffset, TXRoot, @@ -318,7 +317,7 @@ worker(ThrottleDelay, RandomXStateRef) -> worker(ThrottleDelay, RandomXStateRef) end. -chunk_key(spora_2_5, ChunkOffset, TXRoot) -> +chunk_key(spora_2_5, ChunkOffset, TXRoot) -> %% The presence of the absolute end offset in the key makes sure %% packing of every chunk is unique, even when the same chunk is %% present in the same transaction or across multiple transactions @@ -326,7 +325,7 @@ chunk_key(spora_2_5, ChunkOffset, TXRoot) -> %% ensures one cannot find data that has certain patterns after %% packing. {spora_2_5, crypto:hash(sha256, << ChunkOffset:256, TXRoot/binary >>)}; -chunk_key({spora_2_6, RewardAddr}, ChunkOffset, TXRoot) -> +chunk_key({spora_2_6, RewardAddr}, ChunkOffset, TXRoot) -> %% The presence of the absolute end offset in the key makes sure %% packing of every chunk is unique, even when the same chunk is %% present in the same transaction or across multiple transactions @@ -387,7 +386,7 @@ unpack(PackingArgs, ChunkOffset, TXRoot, Chunk, ChunkSize, repack(unpacked, unpacked, _ChunkOffset, _TXRoot, Chunk, _ChunkSize, _RandomXStateRef, _External) -> {ok, Chunk, Chunk}; -repack(RequestedPacking, unpacked, +repack(RequestedPacking, unpacked, ChunkOffset, TXRoot, Chunk, _ChunkSize, RandomXStateRef, External) -> case pack(RequestedPacking, ChunkOffset, TXRoot, Chunk, RandomXStateRef, External) of {ok, Packed, _} -> @@ -395,7 +394,7 @@ repack(RequestedPacking, unpacked, Error -> Error end; -repack(unpacked, StoredPacking, +repack(unpacked, StoredPacking, ChunkOffset, TXRoot, Chunk, ChunkSize, RandomXStateRef, External) -> case unpack(StoredPacking, ChunkOffset, TXRoot, Chunk, ChunkSize, RandomXStateRef, External) of {ok, Unpacked, _} -> @@ -403,14 +402,14 @@ repack(unpacked, StoredPacking, Error -> Error end; -repack(RequestedPacking, StoredPacking, +repack(RequestedPacking, StoredPacking, _ChunkOffset, _TXRoot, Chunk, _ChunkSize, _RandomXStateRef, _External) when StoredPacking == RequestedPacking -> - %% StoredPacking and Packing are in the same format and neither is unpacked. To + %% StoredPacking and Packing are in the same format and neither is unpacked. To %% avoid uneccessary unpacking we'll return none for the UnpackedChunk. If a caller %% needs the UnpackedChunk they should call unpack explicity. {ok, Chunk, none}; -repack(RequestedPacking, StoredPacking, +repack(RequestedPacking, StoredPacking, ChunkOffset, TXRoot, Chunk, ChunkSize, RandomXStateRef, External) -> {SourcePacking, UnpackKey} = chunk_key(StoredPacking, ChunkOffset, TXRoot), {TargetPacking, PackKey} = chunk_key(RequestedPacking, ChunkOffset, TXRoot), @@ -419,7 +418,7 @@ repack(RequestedPacking, StoredPacking, PrometheusLabel = atom_to_list(SourcePacking) ++ "_to_" ++ atom_to_list(TargetPacking), prometheus_histogram:observe_duration(packing_duration_milliseconds, [repack, PrometheusLabel, External], fun() -> - ar_mine_randomx:randomx_reencrypt_chunk(SourcePacking, TargetPacking, + ar_mine_randomx:randomx_reencrypt_chunk(SourcePacking, TargetPacking, RandomXStateRef, UnpackKey, PackKey, Chunk, ChunkSize) end); Error -> Error @@ -509,7 +508,7 @@ minimum_run_time(_Module, _Function, _Args, 0, MinTime) -> max(1, (MinTime + 500) div 1000); minimum_run_time(Module, Function, Args, Repetitions, MinTime) -> {RunTime, _} = timer:tc(Module, Function, Args), - minimum_run_time(Module, Function, Args, Repetitions-1, erlang:min(MinTime, RunTime)). + minimum_run_time(Module, Function, Args, Repetitions - 1, erlang:min(MinTime, RunTime)). %% @doc Walk up the stack trace to the parent of the current function. E.g. %% example() -> @@ -519,7 +518,7 @@ minimum_run_time(Module, Function, Args, Repetitions, MinTime) -> get_caller() -> {current_stacktrace, CallStack} = process_info(self(), current_stacktrace), calling_function(CallStack). -calling_function([_, {_, _, _, _}|[{Module, Function, Arity, _}|_]]) -> +calling_function([_, {_, _, _, _} | [{Module, Function, Arity, _} | _]]) -> atom_to_list(Module) ++ ":" ++ atom_to_list(Function) ++ "/" ++ integer_to_list(Arity); calling_function(_) -> "unknown". diff --git a/apps/arweave/src/ar_patricia_tree.erl b/apps/arweave/src/ar_patricia_tree.erl index aae7c4fe5..2318a0909 100644 --- a/apps/arweave/src/ar_patricia_tree.erl +++ b/apps/arweave/src/ar_patricia_tree.erl @@ -245,8 +245,7 @@ compute_hash(Tree, HashFun, KeyPrefix, UpdateMap) -> Hashes2 = [H || {H, _} <- Hashes], NewHash3 = ar_deep_hash:hash([NewHash2 | Hashes2]), {NewHash3, UpdateMap2#{ {NewHash2, KeyPrefix} => {Key, Value}, - {NewHash3, KeyPrefix} => [{NewHash2, KeyPrefix} - | Hashes] }}; + {NewHash3, KeyPrefix} => [{NewHash2, KeyPrefix} | Hashes] }}; no_value -> case Hashes of [{SingleHash, _}] -> diff --git a/apps/arweave/src/ar_peers.erl b/apps/arweave/src/ar_peers.erl index b0d9fbc81..82847ed1d 100644 --- a/apps/arweave/src/ar_peers.erl +++ b/apps/arweave/src/ar_peers.erl @@ -654,8 +654,7 @@ score_peers(Rating) -> %% probabilistic to always give everyone a chance to improve %% in the competition (i.e., reduce the advantage gained by %% being the first to earn a reputation). - Score = rand:uniform() * get_peer_rating(Rating, Performance) - / (Total + 0.0001), + Score = rand:uniform() * get_peer_rating(Rating, Performance) / (Total + 0.0001), [{Peer, Score} | Acc]; (_, Acc) -> Acc diff --git a/apps/arweave/src/ar_poa.erl b/apps/arweave/src/ar_poa.erl index c09e7cbbc..d14db4d72 100644 --- a/apps/arweave/src/ar_poa.erl +++ b/apps/arweave/src/ar_poa.erl @@ -51,8 +51,7 @@ validate(Args) -> RecallBucketOffset = case RecallOffset >= StrictDataSplitThreshold of true -> - get_padded_offset(RecallOffset + 1, StrictDataSplitThreshold) - - (?DATA_CHUNK_SIZE) - BlockStartOffset; + get_padded_offset(RecallOffset + 1, StrictDataSplitThreshold) - (?DATA_CHUNK_SIZE) - BlockStartOffset; false -> RecallOffset - BlockStartOffset end, @@ -73,8 +72,7 @@ validate(Args) -> case ExpectedChunkID of not_set -> ChunkSize = ChunkEndOffset - ChunkStartOffset, - AbsoluteEndOffset = BlockStartOffset + TXStartOffset - + ChunkEndOffset, + AbsoluteEndOffset = BlockStartOffset + TXStartOffset + ChunkEndOffset, prometheus_counter:inc( validating_packed_spora, [ar_packing_server:packing_atom(Packing)]), diff --git a/apps/arweave/src/ar_poller.erl b/apps/arweave/src/ar_poller.erl index 4b10c4e2a..099b88183 100644 --- a/apps/arweave/src/ar_poller.erl +++ b/apps/arweave/src/ar_poller.erl @@ -63,10 +63,10 @@ init(Workers) -> ok end, {ok, Config} = application:get_env(arweave, config), - {ok, #state{ + {ok, #state{ workers = Workers, worker_count = length(Workers), - in_sync_trusted_peers = sets:from_list(Config#config.peers) + in_sync_trusted_peers = sets:from_list(Config#config.peers) }}. handle_call(Request, _From, State) -> @@ -119,8 +119,8 @@ handle_cast({peer_out_of_sync, Peer}, State) -> {false, true} -> ar_mining_stats:pause_performance_reports(60000), ar_util:terminal_clear(), - TrustedPeersStr = string:join([ar_util:format_peer(Peer2) - || Peer2 <- Config#config.peers], ", "), + TrustedPeersStr = string:join([ar_util:format_peer(Peer2) || + Peer2 <- Config#config.peers], ", "), ?LOG_INFO([{event, node_out_of_sync}, {peer, ar_util:format_peer(Peer)}, {trusted_peers, TrustedPeersStr}]), ar:console("WARNING: The node is out of sync with all of the specified " diff --git a/apps/arweave/src/ar_pricing.erl b/apps/arweave/src/ar_pricing.erl index 1042e9b7e..5ca3b0741 100644 --- a/apps/arweave/src/ar_pricing.erl +++ b/apps/arweave/src/ar_pricing.erl @@ -38,8 +38,8 @@ is_v2_pricing_height(Height) -> Fork_2_6_8 = ar_fork:height_2_6_8(), Height >= Fork_2_6_8 % First check just this because it may be infinity. - andalso Height >= Fork_2_6_8 + (?PRICE_2_6_8_TRANSITION_START) - + (?PRICE_2_6_8_TRANSITION_BLOCKS). + andalso Height >= Fork_2_6_8 + (?PRICE_2_6_8_TRANSITION_START) + + (?PRICE_2_6_8_TRANSITION_BLOCKS). %% @doc Return the price per gibibyte minute estimated from the given history of %% network hash rates and block rewards. The total reward used in calculations @@ -134,8 +134,7 @@ get_price_per_gib_minute2(Height, RewardHistory, BlockTimeHistory, Denomination2 2 * (?RECALL_RANGE_SIZE) div (?DATA_CHUNK_SIZE); _ -> min(2 * ?RECALL_RANGE_SIZE, - ?RECALL_RANGE_SIZE - + ?RECALL_RANGE_SIZE * TwoChunkCount div OneChunkCount) + ?RECALL_RANGE_SIZE + ?RECALL_RANGE_SIZE * TwoChunkCount div OneChunkCount) div ?DATA_CHUNK_SIZE end, %% The following walks through the math of calculating the price per GiB per minute. @@ -152,7 +151,7 @@ get_price_per_gib_minute2(Height, RewardHistory, BlockTimeHistory, Denomination2 %% EstimatedDataSizeInGiB = EstimatedPartitionCount * (?PARTITION_SIZE) div (?GiB), %% PricePerGiBPerBlock = max(1, RewardTotal) div EstimatedDataSizeInGiB, %% PricePerGiBPerMinute = PricePerGibPerBlock div 2, - PricePerGiBPerMinute = + PricePerGiBPerMinute = ( (SolutionsPerPartitionPerVDFStep * VDFIntervalTotal) * max(1, RewardTotal) * (?GiB) * 60 @@ -179,11 +178,9 @@ get_price_per_gib_minute2(Height, RewardHistory, BlockTimeHistory, Denomination2 %% Estimated price per gib minute = total block reward / estimated data size %% in gibibytes. (max(1, RewardTotal) * (?GiB) * SolutionsPerPartitionPerBlock) - div (max(1, HashRateTotal) - * (?PARTITION_SIZE) - * 2 % The reward is paid every two minutes whereas we are calculating - % the minute rate here. - ) + div (max(1, HashRateTotal) * (?PARTITION_SIZE) * 2) + % The reward is paid every two minutes whereas we are calculating + % the minute rate here. end. %% @doc Return the minimum required transaction fee for the given number of @@ -192,8 +189,8 @@ get_tx_fee(Args) -> {DataSize, GiBMinutePrice, KryderPlusRateMultiplier, Height} = Args, FirstYearPrice = DataSize * GiBMinutePrice * 60 * 24 * 365, {LnDecayDividend, LnDecayDivisor} = ?LN_PRICE_DECAY_ANNUAL, - PerpetualPrice = {-FirstYearPrice * LnDecayDivisor * KryderPlusRateMultiplier - * (?N_REPLICATIONS(Height)), LnDecayDividend * (?GiB)}, + PerpetualPrice = {-FirstYearPrice * LnDecayDivisor * KryderPlusRateMultiplier * + (?N_REPLICATIONS(Height)), LnDecayDividend * (?GiB)}, MinerShare = ar_fraction:multiply(PerpetualPrice, ?MINER_MINIMUM_ENDOWMENT_CONTRIBUTION_SHARE), {Dividend, Divisor} = ar_fraction:add(PerpetualPrice, MinerShare), @@ -205,8 +202,8 @@ get_miner_reward_endowment_pool_debt_supply(Args) -> KryderPlusRateMultiplierLatch, KryderPlusRateMultiplier, Denomination, BlockInterval} = Args, Inflation = redenominate(ar_inflation:calculate(Height), 1, Denomination), - ExpectedReward = (?N_REPLICATIONS(Height)) * WeaveSize * GiBMinutePrice - * BlockInterval div (60 * ?GiB), + ExpectedReward = (?N_REPLICATIONS(Height)) * WeaveSize * GiBMinutePrice * + BlockInterval div (60 * ?GiB), {EndowmentPoolFeeShare, MinerFeeShare} = distribute_transaction_fees2(TXs, Denomination), BaseReward = Inflation + MinerFeeShare, EndowmentPool2 = EndowmentPool + EndowmentPoolFeeShare, @@ -347,7 +344,7 @@ recalculate_price_per_gib_minute2(B) -> true -> %% price_per_gib_minute = scheduled_price_per_gib_minute %% scheduled_price_per_gib_minute = - %% get_price_per_gib_minute() + %% get_price_per_gib_minute() %% EMA'ed with scheduled_price_per_gib_minute at 0.1 alpha %% and then capped to 0.5x to 2x of scheduled_price_per_gib_minute RewardHistory2 = lists:sublist(RewardHistory, ?REWARD_HISTORY_BLOCKS), @@ -495,10 +492,12 @@ usd_to_ar({Dividend, Divisor}, Rate, Height) -> InitialInflation = trunc(ar_inflation:calculate(?INITIAL_USD_TO_AR_HEIGHT(Height)())), CurrentInflation = trunc(ar_inflation:calculate(Height)), {InitialRateDividend, InitialRateDivisor} = Rate, - trunc( Dividend - * ?WINSTON_PER_AR - * CurrentInflation - * InitialRateDividend ) + trunc( + Dividend * + ?WINSTON_PER_AR * + CurrentInflation * + InitialRateDividend + ) div Divisor div InitialInflation div InitialRateDivisor. @@ -587,12 +586,12 @@ distribute_transaction_fees([TX | TXs], EndowmentPool, Miner, Height) -> %% @doc Return the cost of storing 1 GB in the network perpetually. %% Integral of the exponential decay curve k*e^(-at), i.e. k/a. %% @end --spec get_perpetual_gb_cost_at_timestamp(Timestamp::integer(), Height::nonegint()) -> usd(). +-spec get_perpetual_gb_cost_at_timestamp(Timestamp :: integer(), Height :: nonegint()) -> usd(). get_perpetual_gb_cost_at_timestamp(Timestamp, Height) -> K = get_gb_cost_per_year_at_timestamp(Timestamp, Height), get_perpetual_gb_cost(K, Height). --spec get_perpetual_gb_cost(Init::usd(), Height::nonegint()) -> usd(). +-spec get_perpetual_gb_cost(Init :: usd(), Height :: nonegint()) -> usd(). get_perpetual_gb_cost(Init, Height) -> case Height >= ar_fork:height_2_5() of true -> @@ -605,7 +604,7 @@ get_perpetual_gb_cost(Init, Height) -> end. %% @doc Return the cost in USD of storing 1 GB per year at the given time. --spec get_gb_cost_per_year_at_timestamp(Timestamp::integer(), Height::nonegint()) -> usd(). +-spec get_gb_cost_per_year_at_timestamp(Timestamp :: integer(), Height :: nonegint()) -> usd(). get_gb_cost_per_year_at_timestamp(Timestamp, Height) -> Datetime = system_time_to_universal_time(Timestamp, seconds), get_gb_cost_per_year_at_datetime(Datetime, Height). @@ -617,7 +616,7 @@ get_gb_cost_per_block_at_timestamp(Timestamp, Height) -> get_gb_cost_per_block_at_datetime(Datetime, Height). %% @doc Return the cost in USD of storing 1 GB per year. --spec get_gb_cost_per_year_at_datetime(DT::datetime(), Height::nonegint()) -> usd(). +-spec get_gb_cost_per_year_at_datetime(DT :: datetime(), Height :: nonegint()) -> usd(). get_gb_cost_per_year_at_datetime({{Y, M, _}, _} = DT, Height) -> PrevY = prev_jun_30_year(Y, M), NextY = next_jun_30_year(Y, M), @@ -630,21 +629,15 @@ get_gb_cost_per_year_at_datetime({{Y, M, _}, _} = DT, Height) -> {PrevYCostDividend, PrevYCostDivisor} = PrevYCost, {NextYCostDividend, NextYCostDivisor} = NextYCost, Dividend = - (?N_REPLICATIONS(Height)) - * ( - PrevYCostDividend * NextYCostDivisor * FracYDivisor - - FracYDividend - * ( - PrevYCostDividend - * NextYCostDivisor - - NextYCostDividend - * PrevYCostDivisor + (?N_REPLICATIONS(Height)) * + ( + PrevYCostDividend * NextYCostDivisor * FracYDivisor - + FracYDividend * ( + PrevYCostDividend * NextYCostDivisor - + NextYCostDividend * PrevYCostDivisor ) ), - Divisor = - PrevYCostDivisor - * NextYCostDivisor - * FracYDivisor, + Divisor = PrevYCostDivisor * NextYCostDivisor * FracYDivisor, {Dividend, Divisor}; false -> CY = PrevYCost - (FracY * (PrevYCost - NextYCost)), @@ -662,7 +655,7 @@ next_jun_30_year(Y, _M) -> Y + 1. %% @doc Return the cost in USD of storing 1 GB per average block time. --spec get_gb_cost_per_block_at_datetime(DT::datetime(), Height::nonegint()) -> usd(). +-spec get_gb_cost_per_block_at_datetime(DT :: datetime(), Height :: nonegint()) -> usd(). get_gb_cost_per_block_at_datetime(DT, Height) -> case Height >= ar_fork:height_2_5() of true -> @@ -701,7 +694,7 @@ usd_p_gby(Y, Height) -> T = Y - 2019, P = ?TX_PRICE_NATURAL_EXPONENT_DECIMAL_FRACTION_PRECISION, {EDividend, EDivisor} = ar_fraction:natural_exponent({ADividend * T, ADivisor}, P), - {EDividend * KDividend, EDivisor * KDivisor}; + {EDividend * KDividend, EDivisor * KDivisor}; false -> {Dividend, Divisor} = ?USD_PER_GBY_2019, K = Dividend / Divisor, @@ -761,18 +754,15 @@ recalculate_usd_to_ar_rate3(#block{ height = PrevHeight, diff = Diff } = B) -> MaxAdjustmentUp = ar_fraction:multiply(Rate, ?USD_TO_AR_MAX_ADJUSTMENT_UP_MULTIPLIER), MaxAdjustmentDown = ar_fraction:multiply(Rate, ?USD_TO_AR_MAX_ADJUSTMENT_DOWN_MULTIPLIER), CappedScheduledRate = ar_fraction:reduce(ar_fraction:maximum( - ar_fraction:minimum(ScheduledRate, MaxAdjustmentUp), MaxAdjustmentDown), - ?USD_TO_AR_FRACTION_REDUCTION_LIMIT), - ?LOG_DEBUG([{event, recalculated_rate}, - {new_rate, ar_util:safe_divide(element(1, Rate), element(2, Rate))}, - {new_scheduled_rate, ar_util:safe_divide(element(1, CappedScheduledRate), - element(2, CappedScheduledRate))}, - {new_scheduled_rate_without_capping, - ar_util:safe_divide(element(1, ScheduledRate), element(2, ScheduledRate))}, - {max_adjustment_up, ar_util:safe_divide(element(1, MaxAdjustmentUp), - element(2,MaxAdjustmentUp))}, - {max_adjustment_down, ar_util:safe_divide(element(1, MaxAdjustmentDown), - element(2,MaxAdjustmentDown))}]), + ar_fraction:minimum(ScheduledRate, MaxAdjustmentUp), MaxAdjustmentDown), ?USD_TO_AR_FRACTION_REDUCTION_LIMIT), + ?LOG_DEBUG([ + {event, recalculated_rate}, + {new_rate, ar_util:safe_divide(element(1, Rate), element(2, Rate))}, + {new_scheduled_rate, ar_util:safe_divide(element(1, CappedScheduledRate), element(2, CappedScheduledRate))}, + {new_scheduled_rate_without_capping, ar_util:safe_divide(element(1, ScheduledRate), element(2, ScheduledRate))}, + {max_adjustment_up, ar_util:safe_divide(element(1, MaxAdjustmentUp), element(2, MaxAdjustmentUp))}, + {max_adjustment_down, ar_util:safe_divide(element(1, MaxAdjustmentDown), element(2, MaxAdjustmentDown))} + ]), {Rate, CappedScheduledRate}. %%%=================================================================== @@ -782,13 +772,11 @@ recalculate_usd_to_ar_rate3(#block{ height = PrevHeight, diff = Diff } = B) -> get_gb_cost_per_year_at_datetime_is_monotone_test_() -> [ ar_test_node:test_with_mocked_functions([{ar_fork, height_2_5, fun() -> infinity end}], - fun test_get_gb_cost_per_year_at_datetime_is_monotone/0, 120) - | - [ - ar_test_node:test_with_mocked_functions([{ar_fork, height_2_5, fun() -> Height end}], - fun test_get_gb_cost_per_year_at_datetime_is_monotone/0, 120) - || Height <- lists:seq(0, 20) - ] + fun test_get_gb_cost_per_year_at_datetime_is_monotone/0, 120) | [ + ar_test_node:test_with_mocked_functions([{ar_fork, height_2_5, fun() -> Height end}], + fun test_get_gb_cost_per_year_at_datetime_is_monotone/0, 120) || + Height <- lists:seq(0, 20) + ] ]. test_get_gb_cost_per_year_at_datetime_is_monotone() -> diff --git a/apps/arweave/src/ar_retarget.erl b/apps/arweave/src/ar_retarget.erl index 217e84103..477a60570 100644 --- a/apps/arweave/src/ar_retarget.erl +++ b/apps/arweave/src/ar_retarget.erl @@ -158,8 +158,8 @@ calculate_difficulty_with_drop(OldDiff, TS, Last, Height, PrevTS, InitialCoeff, Step = 10 * 60, %% Drop the difficulty InitialCoeff times right away, then drop extra Coeff times %% for every 10 minutes passed. - ActualTime2 = ActualTime * InitialCoeff - * ar_fraction:pow(Coeff, max(TS - PrevTS, 0) div Step), + ActualTime2 = ActualTime * InitialCoeff * + ar_fraction:pow(Coeff, max(TS - PrevTS, 0) div Step), MaxDiff = ?MAX_DIFF, MinDiff = min_difficulty(Height), DiffInverse = (MaxDiff - OldDiff) * ActualTime2 div TargetTime, @@ -257,15 +257,19 @@ calculate_difficulty_before_1_8(OldDiff, TS, Last, Height) -> ActualTime = TS - Last, TimeError = abs(ActualTime - TargetTime), Diff = erlang:max( - if - TimeError < (TargetTime * ?RETARGET_TOLERANCE) -> OldDiff; - TargetTime > ActualTime -> OldDiff + 1; - true -> OldDiff - 1 + case true of + _ when TimeError < (TargetTime * ?RETARGET_TOLERANCE) -> + OldDiff; + _ when TargetTime > ActualTime -> + OldDiff + 1; + _ -> + OldDiff - 1 end, min_difficulty(Height) ), Diff. + between(N, Min, _) when N < Min -> Min; between(N, _, Max) when N > Max -> Max; between(N, _, _) -> N. @@ -374,66 +378,42 @@ test_calculate_difficulty_linear() -> %% The actual time is three times smaller. Retarget5 = Timestamp - TargetTime div 3, ?assert( - 3.001 * hashes(Diff) - > hashes( - calculate_difficulty(Diff, Timestamp, Retarget5, 1) - ) + 3.001 * hashes(Diff) > hashes(calculate_difficulty(Diff, Timestamp, Retarget5, 1)) ), ?assert( - 3.001 / 2 * hashes(Diff) - > hashes( % Expect 2x drop at 2.5. - calculate_difficulty_at_2_5(Diff, Timestamp, Retarget5, 0, Timestamp - 1) - ) + % Expect 2x drop at 2.5. + 3.001 / 2 * hashes(Diff) > hashes(calculate_difficulty_at_2_5(Diff, Timestamp, Retarget5, 0, Timestamp - 1)) ), ?assert( - 2.999 * hashes(Diff) - < hashes( - calculate_difficulty(Diff, Timestamp, Retarget5, 1) - ) + 2.999 * hashes(Diff) < hashes(calculate_difficulty(Diff, Timestamp, Retarget5, 1)) ), ?assert( - 2.999 / 2 * hashes(Diff) - < hashes( % Expect 2x drop at 2.5. - calculate_difficulty_at_2_5(Diff, Timestamp, Retarget5, 0, Timestamp - 1) - ) + % Expect 2x drop at 2.5. + 2.999 / 2 * hashes(Diff) < hashes(calculate_difficulty_at_2_5(Diff, Timestamp, Retarget5, 0, Timestamp - 1)) ), %% The actual time is two times bigger. Retarget6 = Timestamp - 2 * TargetTime, ?assert( - hashes(Diff) - > 1.999 * hashes( - calculate_difficulty(Diff, Timestamp, Retarget6, 1) - ) + hashes(Diff) > 1.999 * hashes(calculate_difficulty(Diff, Timestamp, Retarget6, 1)) ), ?assert( - hashes(Diff) - > 3.999 * hashes( % Expect 2x drop at 2.5. - calculate_difficulty_at_2_5(Diff, Timestamp, Retarget6, 0, Timestamp - 1) - ) + % Expect 2x drop at 2.5. + hashes(Diff) > 3.999 * hashes(calculate_difficulty_at_2_5(Diff, Timestamp, Retarget6, 0, Timestamp - 1)) ), ?assert( - hashes(Diff) - > 7.999 * hashes( % Expect extra 2x after 10 minutes. - calculate_difficulty_at_2_5(Diff, Timestamp, Retarget6, 0, Timestamp - 600) - ) + % Expect extra 2x after 10 minutes. + hashes(Diff) > 7.999 * hashes(calculate_difficulty_at_2_5(Diff, Timestamp, Retarget6, 0, Timestamp - 600)) ), ?assert( - hashes(Diff) - < 2.001 * hashes( - calculate_difficulty(Diff, Timestamp, Retarget6, 1) - ) + hashes(Diff) < 2.001 * hashes(calculate_difficulty(Diff, Timestamp, Retarget6, 1)) ), ?assert( - hashes(Diff) - < 4.001 * hashes( % Expect 2x drop at 2.5. - calculate_difficulty_at_2_5(Diff, Timestamp, Retarget6, 0, Timestamp - 1) - ) + % Expect 2x drop at 2.5. + hashes(Diff) < 4.001 * hashes(calculate_difficulty_at_2_5(Diff, Timestamp, Retarget6, 0, Timestamp - 1)) ), ?assert( - hashes(Diff) - < 8.001 * hashes( % Expect extra 2x after 10 minutes. - calculate_difficulty_at_2_5(Diff, Timestamp, Retarget6, 0, Timestamp - 600) - ) + % Expect extra 2x after 10 minutes. + hashes(Diff) < 8.001 * hashes(calculate_difficulty_at_2_5(Diff, Timestamp, Retarget6, 0, Timestamp - 600)) ). hashes(Diff) -> diff --git a/apps/arweave/src/ar_serialize.erl b/apps/arweave/src/ar_serialize.erl index 670982e69..59e1938a4 100644 --- a/apps/arweave/src/ar_serialize.erl +++ b/apps/arweave/src/ar_serialize.erl @@ -210,8 +210,8 @@ block_to_json_struct( {packing_2_5_threshold, integer_to_binary(B#block.packing_2_5_threshold)}, {strict_data_split_threshold, - integer_to_binary(B#block.strict_data_split_threshold)} - | JSONElements3 + integer_to_binary(B#block.strict_data_split_threshold)} | + JSONElements3 ]; false -> JSONElements3 @@ -266,8 +266,7 @@ block_to_json_struct( {denomination, integer_to_binary(Denomination)}, {redenomination_height, RedenominationHeight}, {double_signing_proof, DoubleSigningProof}, - {previous_cumulative_diff, integer_to_binary(PrevCDiff)} - | JSONElements4], + {previous_cumulative_diff, integer_to_binary(PrevCDiff)} | JSONElements4], case B#block.recall_byte2 of undefined -> JSONElements6; @@ -284,8 +283,7 @@ block_to_json_struct( {merkle_rebase_support_threshold, integer_to_binary(RebaseThreshold)}, {chunk_hash, ar_util:encode(B#block.chunk_hash)}, {block_time_history_hash, - ar_util:encode(B#block.block_time_history_hash)} - | JSONElements5], + ar_util:encode(B#block.block_time_history_hash)} | JSONElements5], case B#block.chunk2_hash of undefined -> JSONElements7; @@ -328,8 +326,7 @@ block_time_history_to_binary([{BlockInterval, VDFInterval, ChunkCount} | BlockTi block_time_history_to_binary(BlockTimeHistory, [ ar_serialize:encode_int(BlockInterval, 8), ar_serialize:encode_int(VDFInterval, 8), - ar_serialize:encode_int(ChunkCount, 8) - | IOList]). + ar_serialize:encode_int(ChunkCount, 8) | IOList]). binary_to_block_time_history(Bin) -> binary_to_block_time_history(Bin, []). @@ -349,7 +346,7 @@ binary_to_block_time_history(_Rest, _BlockTimeHistory) -> %% Note: the #nonce_limiter_update and #vdf_session records are only serialized for communication %% between a VDF server and VDF client. Only fields that are required for this communication are %% serialized. -%% +%% %% For example, the vdf_difficulty and next_vdf_difficulty fields are omitted as they are only used %% by nodes that compute their own VDF and never need to be shared from VDF server to VDF client. nonce_limiter_update_to_binary(1 = _Format, #nonce_limiter_update{ session_key = {NextSeed, Interval, _}, @@ -456,7 +453,7 @@ binary_to_nonce_limiter_update_response(_Bin) -> {error, invalid2}. binary_to_nonce_limiter_update_response( - SessionFoundBin, StepNumberSize, StepNumber, Postpone, Format) + SessionFoundBin, StepNumberSize, StepNumber, Postpone, Format) when SessionFoundBin == 0; SessionFoundBin == 1 -> SessionFound = case SessionFoundBin of 0 -> false; 1 -> true end, StepNumber2 = case StepNumberSize of 0 -> undefined; _ -> StepNumber end, @@ -1319,9 +1316,9 @@ json_struct_to_tx(TXStruct, ComputeDataSize) -> id = TXID, last_tx = ar_util:decode(find_value(<<"last_tx">>, TXStruct)), owner = ar_util:decode(find_value(<<"owner">>, TXStruct)), - tags = [{ar_util:decode(Name), ar_util:decode(Value)} + tags = [{ar_util:decode(Name), ar_util:decode(Value)} || %% Only the elements matching this pattern are included in the list. - || {[{<<"name">>, Name}, {<<"value">>, Value}]} <- Tags], + {[{<<"name">>, Name}, {<<"value">>, Value}]} <- Tags], target = ar_wallet:base64_address_with_optional_checksum_to_decoded_address( find_value(<<"target">>, TXStruct)), quantity = binary_to_integer(find_value(<<"quantity">>, TXStruct)), diff --git a/apps/arweave/src/ar_storage.erl b/apps/arweave/src/ar_storage.erl index 001d91299..178a27fde 100644 --- a/apps/arweave/src/ar_storage.erl +++ b/apps/arweave/src/ar_storage.erl @@ -141,7 +141,7 @@ update_block_index(TipHeight, OrphanCount, BI) -> %% Record the contents of BI starting at this height. Whether there are 1 or 0 orphans %% we update the index starting at the same height (the tip). Only when OrphanCount is > 1 do %% need to rewrite the index starting at a lower height. - IndexHeight = TipHeight - max(0, OrphanCount-1), + IndexHeight = TipHeight - max(0, OrphanCount - 1), %% 1. Delete all the orphaned blocks from the block index case ar_kv:delete_range(block_index_db, << OrphanHeight:256 >>, << (TipHeight + 1):256 >>) of @@ -221,8 +221,8 @@ get_block_time_history_from_blocks([B | Blocks], PrevB) -> false -> get_block_time_history_from_blocks(Blocks, B); true -> - [{B#block.indep_hash, ar_block:get_block_time_history_element(B, PrevB)} - | get_block_time_history_from_blocks(Blocks, B)] + [{B#block.indep_hash, ar_block:get_block_time_history_element(B, PrevB)} | + get_block_time_history_from_blocks(Blocks, B)] end. store_block_time_history_part2([]) -> @@ -299,28 +299,30 @@ put_tx_confirmation_data(B) -> %% @doc Return {BlockHeight, BlockHash} belonging to the block where %% the given transaction was included. get_tx_confirmation_data(TXID) -> - case ar_kv:get(tx_confirmation_db, TXID) of - {ok, Binary} -> - {ok, binary_to_term(Binary)}; - not_found -> - {ok, Config} = application:get_env(arweave, config), - case lists:member(arql, Config#config.disable) of - true -> - not_found; - _ -> - case catch ar_arql_db:select_block_by_tx_id(ar_util:encode(TXID)) of - {ok, #{ - height := Height, - indep_hash := EncodedIndepHash - }} -> - {ok, {Height, ar_util:decode(EncodedIndepHash)}}; - not_found -> - not_found; - {'EXIT', {timeout, {gen_server, call, [ar_arql_db, _]}}} -> - {error, timeout} - end - end - end. + case ar_kv:get(tx_confirmation_db, TXID) of + {ok, Binary} -> + {ok, binary_to_term(Binary)}; + not_found -> + {ok, Config} = application:get_env(arweave, config), + case lists:member(arql, Config#config.disable) of + true -> + {error, not_found}; + false -> + try ar_arql_db:select_block_by_tx_id(ar_util:encode(TXID)) of + {ok, #{ + height := Height, + indep_hash := EncodedIndepHash + }} -> + {ok, {Height, ar_util:decode(EncodedIndepHash)}}; + _ -> + {error, not_found} + catch + _Class:_Exception -> + {error, unknown_error} + end + end + end. + %% @doc Read a block from disk, given a height %% and a block index (used to determine the hash by height). @@ -833,14 +835,17 @@ read_migrated_v1_tx_file(Filename) -> case read_tx_data_from_kv_storage(ID) of {ok, Data} -> {ok, TX#tx{ data = Data }}; - Error -> - Error - end + Error -> + Error + end; + Error -> + Error end; Error -> Error end. + read_tx_data_from_kv_storage(ID) -> case ar_data_sync:get_tx_data(ID) of {ok, Data} -> @@ -915,8 +920,7 @@ read_wallet_list_from_chunk_files(WalletListHash) when is_binary(WalletListHash) Error end; read_wallet_list_from_chunk_files(WL) when is_list(WL) -> - {ok, ar_patricia_tree:from_proplist([{get_wallet_key(T), get_wallet_value(T)} - || T <- WL])}. + {ok, ar_patricia_tree:from_proplist([{get_wallet_key(T), get_wallet_value(T)} || T <- WL])}. get_wallet_key(T) -> element(1, T). @@ -1467,7 +1471,7 @@ store_and_retrieve_wallet_list2(Tree, InsertedKeys, IsUpdate) -> %% From: https://www.erlang.org/doc/programming_examples/list_comprehensions.html#permutations permutations([]) -> [[]]; -permutations(L) -> [[H|T] || H <- L, T <- permutations(L--[H])]. +permutations(L) -> [[H | T] || H <- L, T <- permutations(L -- [H])]. assert_wallet_trees_equal(Expected, Actual) -> ?assertEqual( diff --git a/apps/arweave/src/ar_sync_record.erl b/apps/arweave/src/ar_sync_record.erl index d384112b1..737ea354d 100644 --- a/apps/arweave/src/ar_sync_record.erl +++ b/apps/arweave/src/ar_sync_record.erl @@ -144,9 +144,8 @@ is_recorded(Offset, {ID, Type}) -> true -> {{true, Type}, "default"}; false -> - StorageModules = [Module - || {_, _, Packing} = Module <- ar_storage_module:get_all(Offset), - Packing == Type], + StorageModules = [Module || {_, _, Packing} = + Module <- ar_storage_module:get_all(Offset), Packing == Type], is_recorded_any_by_type(Offset, ID, StorageModules) end; is_recorded(Offset, ID) -> diff --git a/apps/arweave/src/ar_tx.erl b/apps/arweave/src/ar_tx.erl index 2634f3242..9de6aec38 100644 --- a/apps/arweave/src/ar_tx.erl +++ b/apps/arweave/src/ar_tx.erl @@ -699,16 +699,14 @@ test_sign_tx() -> ), InvalidTXs = [ sign( - generate_chunk_tree( % a quantity with empty target - NewTX#tx{ format = 2, quantity = 1 } - ), + % a quantity with empty target + generate_chunk_tree(NewTX#tx{ format = 2, quantity = 1 }), Priv, Pub ), sign_v1( - generate_chunk_tree( % a target without quantity - NewTX#tx{ format = 1, target = crypto:strong_rand_bytes(32) } - ), + % a target without quantity + generate_chunk_tree(NewTX#tx{ format = 1, target = crypto:strong_rand_bytes(32) }), Priv, Pub ) diff --git a/apps/arweave/src/ar_tx_blacklist.erl b/apps/arweave/src/ar_tx_blacklist.erl index c1e3976b3..e8b351aa6 100644 --- a/apps/arweave/src/ar_tx_blacklist.erl +++ b/apps/arweave/src/ar_tx_blacklist.erl @@ -190,7 +190,7 @@ handle_cast(maybe_request_takedown, State) -> false -> State end, - State3 = + State3 = case DTS + ?REQUEST_TAKEDOWN_DELAY_MS < Now of true -> request_data_takedown(State2); diff --git a/apps/arweave/src/ar_tx_db.erl b/apps/arweave/src/ar_tx_db.erl index 0d44f6344..aa97757a4 100644 --- a/apps/arweave/src/ar_tx_db.erl +++ b/apps/arweave/src/ar_tx_db.erl @@ -13,7 +13,7 @@ %% write-once values. put_error_codes(TXID, ErrorCodes) -> ets:insert(?MODULE, {TXID, ErrorCodes}), - {ok, _} = timer:apply_after(1800*1000, ?MODULE, clear_error_codes, [TXID]), + {ok, _} = timer:apply_after(1800 * 1000, ?MODULE, clear_error_codes, [TXID]), ok. %% @doc Retreive a term from the meta db. diff --git a/apps/arweave/src/ar_tx_emitter_worker.erl b/apps/arweave/src/ar_tx_emitter_worker.erl index 75ee3603a..b6075856c 100644 --- a/apps/arweave/src/ar_tx_emitter_worker.erl +++ b/apps/arweave/src/ar_tx_emitter_worker.erl @@ -72,7 +72,7 @@ handle_info({gun_down, _, http, normal, _, _}, State) -> {noreply, State}; handle_info({gun_down, _, http, closed, _, _}, State) -> {noreply, State}; -handle_info({gun_down, _, http, {error,econnrefused}, _, _}, State) -> +handle_info({gun_down, _, http, {error, econnrefused}, _, _}, State) -> {noreply, State}; handle_info({gun_up, _, http}, State) -> {noreply, State}; diff --git a/apps/arweave/src/ar_tx_replay_pool.erl b/apps/arweave/src/ar_tx_replay_pool.erl index 8fc6bba5e..414f35e0a 100644 --- a/apps/arweave/src/ar_tx_replay_pool.erl +++ b/apps/arweave/src/ar_tx_replay_pool.erl @@ -77,7 +77,7 @@ verify_block_txs([TX | TXs], {true, _, true} -> invalid; _ -> - verify_block_txs(TXs, + verify_block_txs(TXs, {Rate, PricePerGiBMinute, KryderPlusRateMultiplier, Denomination, Height, RedenominationHeight, Timestamp, NewWallets, BlockAnchors, RecentTXMap, NewMempool, NewCount, NewSize}) @@ -109,7 +109,6 @@ verify_tx2(Args) -> {TX, Rate, PricePerGiBMinute, KryderPlusRateMultiplier, Denomination, Height, RedenominationHeight, Timestamp, FloatingWallets, BlockAnchors, RecentTXMap, Mempool, VerifySignature} = Args, - case ar_tx:verify(TX, {Rate, PricePerGiBMinute, KryderPlusRateMultiplier, Denomination, RedenominationHeight, Height, FloatingWallets, Timestamp}, VerifySignature) of true -> diff --git a/apps/arweave/src/ar_util.erl b/apps/arweave/src/ar_util.erl index fd654c8b1..f7b501b28 100644 --- a/apps/arweave/src/ar_util.erl +++ b/apps/arweave/src/ar_util.erl @@ -46,7 +46,7 @@ pick_random(_, 0) -> []; pick_random([], _) -> []; pick_random(List, N) -> Elem = pick_random(List), - [Elem|pick_random(List -- [Elem], N - 1)]. + [Elem | pick_random(List -- [Elem], N - 1)]. %% @doc Select a random element from a list. pick_random(Xs) -> @@ -98,8 +98,8 @@ peer_to_str(Bin) when is_binary(Bin) -> peer_to_str(Str) when is_list(Str) -> Str; peer_to_str({A, B, C, D, Port}) -> - integer_to_list(A) ++ "_" ++ integer_to_list(B) ++ "_" ++ integer_to_list(C) ++ "_" - ++ integer_to_list(D) ++ "_" ++ integer_to_list(Port). + integer_to_list(A) ++ "_" ++ integer_to_list(B) ++ "_" ++ integer_to_list(C) ++ "_" ++ + integer_to_list(D) ++ "_" ++ integer_to_list(Port). %% @doc Parses a port string into an integer. parse_port(Int) when is_integer(Int) -> Int; @@ -141,9 +141,9 @@ unique(Xs) when not is_list(Xs) -> [Xs]; unique(Xs) -> unique([], Xs). unique(Res, []) -> lists:reverse(Res); -unique(Res, [X|Xs]) -> +unique(Res, [X | Xs]) -> case lists:member(X, Res) of - false -> unique([X|Res], Xs); + false -> unique([X | Res], Xs); true -> unique(Res, Xs) end. @@ -263,7 +263,7 @@ parse_list_indices(_BadInput, _N) -> error. shuffle_list(List) -> - lists:sort(fun(_,_) -> rand:uniform() < 0.5 end, List). + lists:sort(fun(_, _) -> rand:uniform() < 0.5 end, List). %%% %%% Tests. @@ -275,7 +275,7 @@ basic_unique_test() -> %% @doc Ensure that hosts are formatted as lists correctly. basic_peer_format_test() -> - "127.0.0.1:9001" = format_peer({127,0,0,1,9001}). + "127.0.0.1:9001" = format_peer({127, 0, 0, 1, 9001}). %% @doc Ensure that pick_random's are actually in the starting list. pick_random_test() -> diff --git a/apps/arweave/src/ar_wallet.erl b/apps/arweave/src/ar_wallet.erl index 8965ea496..39fe4616b 100644 --- a/apps/arweave/src/ar_wallet.erl +++ b/apps/arweave/src/ar_wallet.erl @@ -22,7 +22,7 @@ new() -> new(?DEFAULT_KEY_TYPE). new(KeyType = {KeyAlg, PublicExpnt}) when KeyType =:= {?RSA_SIGN_ALG, 65537} -> - {[_, Pub], [_, Pub, Priv|_]} = {[_, Pub], [_, Pub, Priv|_]} + {[_, Pub], [_, Pub, Priv | _]} = {[_, Pub], [_, Pub, Priv | _]} = crypto:generate_key(KeyAlg, {?RSA_PRIV_KEY_SZ, PublicExpnt}), {{KeyType, Priv, Pub}, {KeyType, Pub}}; new(KeyType = {KeyAlg, KeyCrv}) when KeyAlg =:= ?ECDSA_SIGN_ALG andalso KeyCrv =:= secp256k1 -> @@ -309,7 +309,7 @@ base64_address_with_optional_checksum_to_decoded_address(AddrBase64) -> end end. -base64_address_with_optional_checksum_to_decoded_address_safe(AddrBase64)-> +base64_address_with_optional_checksum_to_decoded_address_safe(AddrBase64) -> try D = base64_address_with_optional_checksum_to_decoded_address(AddrBase64), {ok, D} @@ -440,26 +440,17 @@ checksum_test() -> %% 65 bytes. InvalidLongAddress = <<"01234567890123456789012345678901234567890123456789012345678901234">>, InvalidLongAddressBase64 = ar_util:encode(InvalidLongAddress), - case catch base64_address_with_optional_checksum_to_decoded_address(<>) of - {'EXIT', _} -> ok - end, + {'EXIT', _} = base64_address_with_optional_checksum_to_decoded_address(<>), %% 100 bytes. InvalidLongAddress2 = <<"0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789">>, InvalidLongAddress2Base64 = ar_util:encode(InvalidLongAddress2), - case catch base64_address_with_optional_checksum_to_decoded_address(<>) of - {'EXIT', _} -> ok - end, + {'EXIT', _} = base64_address_with_optional_checksum_to_decoded_address(<>), %% 10 bytes InvalidShortAddress = <<"0123456789">>, InvalidShortAddressBase64 = ar_util:encode(InvalidShortAddress), - case catch base64_address_with_optional_checksum_to_decoded_address(<>) of - {'EXIT', _} -> ok - end, + {'EXIT', _} = base64_address_with_optional_checksum_to_decoded_address(<>), InvalidChecksum = ar_util:encode(<< 0:32 >>), - case catch base64_address_with_optional_checksum_to_decoded_address( - << AddrBase64/binary, ":", InvalidChecksum/binary >>) of - {error, invalid_address_checksum} -> ok - end, - case catch base64_address_with_optional_checksum_to_decoded_address(<<":MDA">>) of - {'EXIT', _} -> ok - end. + {error, invalid_address_checksum} = base64_address_with_optional_checksum_to_decoded_address( + << AddrBase64/binary, ":", InvalidChecksum/binary >>), + {'EXIT', _} = base64_address_with_optional_checksum_to_decoded_address(<<":MDA">>), + ok. diff --git a/apps/arweave/src/ar_weave.erl b/apps/arweave/src/ar_weave.erl index 75e214fbb..73f56c499 100644 --- a/apps/arweave/src/ar_weave.erl +++ b/apps/arweave/src/ar_weave.erl @@ -35,8 +35,7 @@ init(WalletList, Diff, GenesisDataSize) -> TX = create_genesis_tx(Key, GenesisDataSize), WalletList2 = WalletList ++ [{ar_wallet:to_address(Key), 0, TX#tx.id}], TXs = [TX], - AccountTree = ar_patricia_tree:from_proplist([{A, {B, LTX}} - || {A, B, LTX} <- WalletList2]), + AccountTree = ar_patricia_tree:from_proplist([{A, {B, LTX}} || {A, B, LTX} <- WalletList2]), WLH = element(1, ar_block:hash_wallet_list(AccountTree)), SizeTaggedTXs = ar_block:generate_size_tagged_list_from_txs(TXs, 0), BlockSize = case SizeTaggedTXs of [] -> 0; _ -> element(2, lists:last(SizeTaggedTXs)) end, @@ -139,7 +138,7 @@ add_mainnet_v1_genesis_txs() -> SourcePath = "data/genesis_txs/" ++ F, TargetPath = Config#config.data_dir ++ "/" ++ ?TX_DIR ++ "/" ++ F, file:copy(SourcePath, TargetPath), - [ar_util:decode(hd(string:split(F, ".")))|Acc] + [ar_util:decode(hd(string:split(F, "."))) | Acc] end, [], Files diff --git a/apps/arweave/src/rsa_pss.erl b/apps/arweave/src/rsa_pss.erl index 33ddb5b64..492b9d469 100644 --- a/apps/arweave/src/rsa_pss.erl +++ b/apps/arweave/src/rsa_pss.erl @@ -19,8 +19,9 @@ -export([verify/4]). %% Types --type rsa_public_key() :: #'RSAPublicKey'{}. --type rsa_private_key() :: #'RSAPrivateKey'{}. +-export_type([rsa_public_key/0, rsa_private_key/0, rsa_digest_type/0]). +-opaque rsa_public_key() :: #'RSAPublicKey'{}. +-opaque rsa_private_key() :: #'RSAPrivateKey'{}. -type rsa_digest_type() :: 'md5' | 'sha' | 'sha224' | 'sha256' | 'sha384' | 'sha512'. -define(PSS_TRAILER_FIELD, 16#BC). diff --git a/config/elvis.config b/config/elvis.config index 715b0a26a..65f3e9905 100644 --- a/config/elvis.config +++ b/config/elvis.config @@ -9,6 +9,7 @@ rules => [ {elvis_style, consistent_variable_casing, disable}, {elvis_style, dont_repeat_yourself, disable}, + {elvis_style, god_modules, disable}, {elvis_style, nesting_level, disable}, {elvis_style, no_debug_call, disable}, {elvis_style, no_catch_expressions, disable}, @@ -18,7 +19,8 @@ {elvis_style, variable_naming_convention, disable}, {elvis_style, atom_naming_convention, disable}, {elvis_style, param_pattern_matching, disable}, - {elvis_text_style, line_length, #{ limit => 120 }}, + {elvis_style, used_ignored_variable, disable}, + {elvis_text_style, line_length, disable}, {elvis_text_style, no_tabs, disable} ] }