Skip to content

Commit f0d8372

Browse files
jasoneyuslepukhin
authored andcommitted
Make dss operations lockless.
Rather than protecting dss operations with a mutex, use atomic operations. This has negligible impact on synchronization overhead during typical dss allocation, but is a substantial improvement for chunk_in_dss() and the newly added chunk_dss_mergeable(), which can be called multiple times during chunk deallocations. This change also has the advantage of avoiding tsd in deallocation paths associated with purging, which resolves potential deadlocks during thread exit due to attempted tsd resurrection. This resolves #425.
1 parent cdd92de commit f0d8372

File tree

11 files changed

+127
-146
lines changed

11 files changed

+127
-146
lines changed

include/jemalloc/internal/chunk.h

-3
Original file line numberDiff line numberDiff line change
@@ -71,9 +71,6 @@ bool chunk_purge_wrapper(tsdn_t *tsdn, arena_t *arena,
7171
chunk_hooks_t *chunk_hooks, void *chunk, size_t size, size_t offset,
7272
size_t length);
7373
bool chunk_boot(void);
74-
void chunk_prefork(tsdn_t *tsdn);
75-
void chunk_postfork_parent(tsdn_t *tsdn);
76-
void chunk_postfork_child(tsdn_t *tsdn);
7774

7875
#endif /* JEMALLOC_H_EXTERNS */
7976
/******************************************************************************/

include/jemalloc/internal/chunk_dss.h

+5-7
Original file line numberDiff line numberDiff line change
@@ -21,15 +21,13 @@ extern const char *dss_prec_names[];
2121
/******************************************************************************/
2222
#ifdef JEMALLOC_H_EXTERNS
2323

24-
dss_prec_t chunk_dss_prec_get(tsdn_t *tsdn);
25-
bool chunk_dss_prec_set(tsdn_t *tsdn, dss_prec_t dss_prec);
24+
dss_prec_t chunk_dss_prec_get(void);
25+
bool chunk_dss_prec_set(dss_prec_t dss_prec);
2626
void *chunk_alloc_dss(tsdn_t *tsdn, arena_t *arena, void *new_addr,
2727
size_t size, size_t alignment, bool *zero, bool *commit);
28-
bool chunk_in_dss(tsdn_t *tsdn, void *chunk);
29-
bool chunk_dss_boot(void);
30-
void chunk_dss_prefork(tsdn_t *tsdn);
31-
void chunk_dss_postfork_parent(tsdn_t *tsdn);
32-
void chunk_dss_postfork_child(tsdn_t *tsdn);
28+
bool chunk_in_dss(void *chunk);
29+
bool chunk_dss_mergeable(void *chunk_a, void *chunk_b);
30+
void chunk_dss_boot(void);
3331

3432
#endif /* JEMALLOC_H_EXTERNS */
3533
/******************************************************************************/

include/jemalloc/internal/huge.h

+1-1
Original file line numberDiff line numberDiff line change
@@ -17,7 +17,7 @@ bool huge_ralloc_no_move(tsdn_t *tsdn, void *ptr, size_t oldsize,
1717
void *huge_ralloc(tsd_t *tsd, arena_t *arena, void *ptr, size_t oldsize,
1818
size_t usize, size_t alignment, bool zero, tcache_t *tcache);
1919
#ifdef JEMALLOC_JET
20-
typedef void (huge_dalloc_junk_t)(tsdn_t *, void *, size_t);
20+
typedef void (huge_dalloc_junk_t)(void *, size_t);
2121
extern huge_dalloc_junk_t *huge_dalloc_junk;
2222
#endif
2323
void huge_dalloc(tsdn_t *tsdn, void *ptr);

include/jemalloc/internal/private_symbols.txt

+1-6
Original file line numberDiff line numberDiff line change
@@ -168,20 +168,15 @@ chunk_dalloc_mmap
168168
chunk_dalloc_wrapper
169169
chunk_deregister
170170
chunk_dss_boot
171-
chunk_dss_postfork_child
172-
chunk_dss_postfork_parent
171+
chunk_dss_mergeable
173172
chunk_dss_prec_get
174173
chunk_dss_prec_set
175-
chunk_dss_prefork
176174
chunk_hooks_default
177175
chunk_hooks_get
178176
chunk_hooks_set
179177
chunk_in_dss
180178
chunk_lookup
181179
chunk_npages
182-
chunk_postfork_child
183-
chunk_postfork_parent
184-
chunk_prefork
185180
chunk_purge_wrapper
186181
chunk_register
187182
chunks_rtree

src/arena.c

+1-1
Original file line numberDiff line numberDiff line change
@@ -3552,7 +3552,7 @@ arena_new(tsdn_t *tsdn, unsigned ind)
35523552
(uint64_t)(uintptr_t)arena;
35533553
}
35543554

3555-
arena->dss_prec = chunk_dss_prec_get(tsdn);
3555+
arena->dss_prec = chunk_dss_prec_get();
35563556

35573557
ql_new(&arena->achunks);
35583558

src/chunk.c

+9-37
Original file line numberDiff line numberDiff line change
@@ -610,10 +610,10 @@ chunk_dalloc_cache(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
610610
}
611611

612612
static bool
613-
chunk_dalloc_default_impl(tsdn_t *tsdn, void *chunk, size_t size)
613+
chunk_dalloc_default_impl(void *chunk, size_t size)
614614
{
615615

616-
if (!have_dss || !chunk_in_dss(tsdn, chunk))
616+
if (!have_dss || !chunk_in_dss(chunk))
617617
return (chunk_dalloc_mmap(chunk, size));
618618
return (true);
619619
}
@@ -622,11 +622,8 @@ static bool
622622
chunk_dalloc_default(void *chunk, size_t size, bool committed,
623623
unsigned arena_ind)
624624
{
625-
tsdn_t *tsdn;
626-
627-
tsdn = tsdn_fetch();
628625

629-
return (chunk_dalloc_default_impl(tsdn, chunk, size));
626+
return (chunk_dalloc_default_impl(chunk, size));
630627
}
631628

632629
void
@@ -644,7 +641,7 @@ chunk_dalloc_wrapper(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
644641
/* Try to deallocate. */
645642
if (chunk_hooks->dalloc == chunk_dalloc_default) {
646643
/* Call directly to propagate tsdn. */
647-
err = chunk_dalloc_default_impl(tsdn, chunk, size);
644+
err = chunk_dalloc_default_impl(chunk, size);
648645
} else
649646
err = chunk_hooks->dalloc(chunk, size, committed, arena->ind);
650647

@@ -717,13 +714,12 @@ chunk_split_default(void *chunk, size_t size, size_t size_a, size_t size_b,
717714
}
718715

719716
static bool
720-
chunk_merge_default_impl(tsdn_t *tsdn, void *chunk_a, void *chunk_b)
717+
chunk_merge_default_impl(void *chunk_a, void *chunk_b)
721718
{
722719

723720
if (!maps_coalesce)
724721
return (true);
725-
if (have_dss && chunk_in_dss(tsdn, chunk_a) != chunk_in_dss(tsdn,
726-
chunk_b))
722+
if (have_dss && !chunk_dss_mergeable(chunk_a, chunk_b))
727723
return (true);
728724

729725
return (false);
@@ -733,11 +729,8 @@ static bool
733729
chunk_merge_default(void *chunk_a, size_t size_a, void *chunk_b, size_t size_b,
734730
bool committed, unsigned arena_ind)
735731
{
736-
tsdn_t *tsdn;
737-
738-
tsdn = tsdn_fetch();
739732

740-
return (chunk_merge_default_impl(tsdn, chunk_a, chunk_b));
733+
return (chunk_merge_default_impl(chunk_a, chunk_b));
741734
}
742735

743736
static rtree_node_elm_t *
@@ -781,32 +774,11 @@ chunk_boot(void)
781774
chunksize_mask = chunksize - 1;
782775
chunk_npages = (chunksize >> LG_PAGE);
783776

784-
if (have_dss && chunk_dss_boot())
785-
return (true);
777+
if (have_dss)
778+
chunk_dss_boot();
786779
if (rtree_new(&chunks_rtree, (unsigned)((ZU(1) << (LG_SIZEOF_PTR+3)) -
787780
opt_lg_chunk), chunks_rtree_node_alloc, NULL))
788781
return (true);
789782

790783
return (false);
791784
}
792-
793-
void
794-
chunk_prefork(tsdn_t *tsdn)
795-
{
796-
797-
chunk_dss_prefork(tsdn);
798-
}
799-
800-
void
801-
chunk_postfork_parent(tsdn_t *tsdn)
802-
{
803-
804-
chunk_dss_postfork_parent(tsdn);
805-
}
806-
807-
void
808-
chunk_postfork_child(tsdn_t *tsdn)
809-
{
810-
811-
chunk_dss_postfork_child(tsdn);
812-
}

0 commit comments

Comments
 (0)