Skip to content

Commit

Permalink
Merge tag 'bitmap-6.0-rc1' of https://github.com/norov/linux
Browse files Browse the repository at this point in the history
Pull bitmap updates from Yury Norov:

 - fix the duplicated comments on bitmap_to_arr64() (Qu Wenruo)

 - optimize out non-atomic bitops on compile-time constants (Alexander
   Lobakin)

 - cleanup bitmap-related headers (Yury Norov)

 - x86/olpc: fix 'logical not is only applied to the left hand side'
   (Alexander Lobakin)

 - lib/nodemask: inline wrappers around bitmap (Yury Norov)

* tag 'bitmap-6.0-rc1' of https://github.com/norov/linux: (26 commits)
  lib/nodemask: inline next_node_in() and node_random()
  powerpc: drop dependency on <asm/machdep.h> in archrandom.h
  x86/olpc: fix 'logical not is only applied to the left hand side'
  lib/cpumask: move some one-line wrappers to header file
  headers/deps: mm: align MANITAINERS and Docs with new gfp.h structure
  headers/deps: mm: Split <linux/gfp_types.h> out of <linux/gfp.h>
  headers/deps: mm: Optimize <linux/gfp.h> header dependencies
  lib/cpumask: move trivial wrappers around find_bit to the header
  lib/cpumask: change return types to unsigned where appropriate
  cpumask: change return types to bool where appropriate
  lib/bitmap: change type of bitmap_weight to unsigned long
  lib/bitmap: change return types to bool where appropriate
  arm: align find_bit declarations with generic kernel
  iommu/vt-d: avoid invalid memory access via node_online(NUMA_NO_NODE)
  lib/test_bitmap: test the tail after bitmap_to_arr64()
  lib/bitmap: fix off-by-one in bitmap_to_arr64()
  lib: test_bitmap: add compile-time optimization/evaluations assertions
  bitmap: don't assume compiler evaluates small mem*() builtins calls
  net/ice: fix initializing the bitmap in the switch code
  bitops: let optimize out non-atomic bitops on compile-time constants
  ...
  • Loading branch information
torvalds committed Aug 8, 2022
2 parents 3bc1bc0 + 36d4b36 commit 4e23eee
Show file tree
Hide file tree
Showing 38 changed files with 1,076 additions and 789 deletions.
8 changes: 4 additions & 4 deletions Documentation/core-api/mm-api.rst
Original file line number Diff line number Diff line change
Expand Up @@ -22,16 +22,16 @@ Memory Allocation Controls
.. kernel-doc:: include/linux/gfp.h
:internal:

.. kernel-doc:: include/linux/gfp.h
.. kernel-doc:: include/linux/gfp_types.h
:doc: Page mobility and placement hints

.. kernel-doc:: include/linux/gfp.h
.. kernel-doc:: include/linux/gfp_types.h
:doc: Watermark modifiers

.. kernel-doc:: include/linux/gfp.h
.. kernel-doc:: include/linux/gfp_types.h
:doc: Reclaim modifiers

.. kernel-doc:: include/linux/gfp.h
.. kernel-doc:: include/linux/gfp_types.h
:doc: Useful GFP flag combinations

The Slab Cache
Expand Down
2 changes: 1 addition & 1 deletion MAINTAINERS
Original file line number Diff line number Diff line change
Expand Up @@ -3603,7 +3603,6 @@ F: lib/bitmap.c
F: lib/cpumask.c
F: lib/find_bit.c
F: lib/find_bit_benchmark.c
F: lib/nodemask.c
F: lib/test_bitmap.c
F: tools/include/linux/bitmap.h
F: tools/include/linux/find.h
Expand Down Expand Up @@ -13136,6 +13135,7 @@ W: http://www.linux-mm.org
T: git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm
T: quilt git://git.kernel.org/pub/scm/linux/kernel/git/akpm/25-new
F: include/linux/gfp.h
F: include/linux/gfp_types.h
F: include/linux/memory_hotplug.h
F: include/linux/mm.h
F: include/linux/mmzone.h
Expand Down
32 changes: 17 additions & 15 deletions arch/alpha/include/asm/bitops.h
Original file line number Diff line number Diff line change
Expand Up @@ -46,8 +46,8 @@ set_bit(unsigned long nr, volatile void * addr)
/*
* WARNING: non atomic version.
*/
static inline void
__set_bit(unsigned long nr, volatile void * addr)
static __always_inline void
arch___set_bit(unsigned long nr, volatile unsigned long *addr)
{
int *m = ((int *) addr) + (nr >> 5);

Expand Down Expand Up @@ -82,8 +82,8 @@ clear_bit_unlock(unsigned long nr, volatile void * addr)
/*
* WARNING: non atomic version.
*/
static __inline__ void
__clear_bit(unsigned long nr, volatile void * addr)
static __always_inline void
arch___clear_bit(unsigned long nr, volatile unsigned long *addr)
{
int *m = ((int *) addr) + (nr >> 5);

Expand All @@ -94,7 +94,7 @@ static inline void
__clear_bit_unlock(unsigned long nr, volatile void * addr)
{
smp_mb();
__clear_bit(nr, addr);
arch___clear_bit(nr, addr);
}

static inline void
Expand All @@ -118,8 +118,8 @@ change_bit(unsigned long nr, volatile void * addr)
/*
* WARNING: non atomic version.
*/
static __inline__ void
__change_bit(unsigned long nr, volatile void * addr)
static __always_inline void
arch___change_bit(unsigned long nr, volatile unsigned long *addr)
{
int *m = ((int *) addr) + (nr >> 5);

Expand Down Expand Up @@ -186,8 +186,8 @@ test_and_set_bit_lock(unsigned long nr, volatile void *addr)
/*
* WARNING: non atomic version.
*/
static inline int
__test_and_set_bit(unsigned long nr, volatile void * addr)
static __always_inline bool
arch___test_and_set_bit(unsigned long nr, volatile unsigned long *addr)
{
unsigned long mask = 1 << (nr & 0x1f);
int *m = ((int *) addr) + (nr >> 5);
Expand Down Expand Up @@ -230,8 +230,8 @@ test_and_clear_bit(unsigned long nr, volatile void * addr)
/*
* WARNING: non atomic version.
*/
static inline int
__test_and_clear_bit(unsigned long nr, volatile void * addr)
static __always_inline bool
arch___test_and_clear_bit(unsigned long nr, volatile unsigned long *addr)
{
unsigned long mask = 1 << (nr & 0x1f);
int *m = ((int *) addr) + (nr >> 5);
Expand Down Expand Up @@ -272,8 +272,8 @@ test_and_change_bit(unsigned long nr, volatile void * addr)
/*
* WARNING: non atomic version.
*/
static __inline__ int
__test_and_change_bit(unsigned long nr, volatile void * addr)
static __always_inline bool
arch___test_and_change_bit(unsigned long nr, volatile unsigned long *addr)
{
unsigned long mask = 1 << (nr & 0x1f);
int *m = ((int *) addr) + (nr >> 5);
Expand All @@ -283,8 +283,8 @@ __test_and_change_bit(unsigned long nr, volatile void * addr)
return (old & mask) != 0;
}

static inline int
test_bit(int nr, const volatile void * addr)
static __always_inline bool
arch_test_bit(unsigned long nr, const volatile unsigned long *addr)
{
return (1UL & (((const int *) addr)[nr >> 5] >> (nr & 31))) != 0UL;
}
Expand Down Expand Up @@ -450,6 +450,8 @@ sched_find_first_bit(const unsigned long b[2])
return __ffs(tmp) + ofs;
}

#include <asm-generic/bitops/non-instrumented-non-atomic.h>

#include <asm-generic/bitops/le.h>

#include <asm-generic/bitops/ext2-atomic-setbit.h>
Expand Down
18 changes: 10 additions & 8 deletions arch/arm/include/asm/bitops.h
Original file line number Diff line number Diff line change
Expand Up @@ -160,18 +160,20 @@ extern int _test_and_change_bit(int nr, volatile unsigned long * p);
/*
* Little endian assembly bitops. nr = 0 -> byte 0 bit 0.
*/
extern int _find_first_zero_bit_le(const unsigned long *p, unsigned size);
extern int _find_next_zero_bit_le(const unsigned long *p, int size, int offset);
extern int _find_first_bit_le(const unsigned long *p, unsigned size);
extern int _find_next_bit_le(const unsigned long *p, int size, int offset);
unsigned long _find_first_zero_bit_le(const unsigned long *p, unsigned long size);
unsigned long _find_next_zero_bit_le(const unsigned long *p,
unsigned long size, unsigned long offset);
unsigned long _find_first_bit_le(const unsigned long *p, unsigned long size);
unsigned long _find_next_bit_le(const unsigned long *p, unsigned long size, unsigned long offset);

/*
* Big endian assembly bitops. nr = 0 -> byte 3 bit 0.
*/
extern int _find_first_zero_bit_be(const unsigned long *p, unsigned size);
extern int _find_next_zero_bit_be(const unsigned long *p, int size, int offset);
extern int _find_first_bit_be(const unsigned long *p, unsigned size);
extern int _find_next_bit_be(const unsigned long *p, int size, int offset);
unsigned long _find_first_zero_bit_be(const unsigned long *p, unsigned long size);
unsigned long _find_next_zero_bit_be(const unsigned long *p,
unsigned long size, unsigned long offset);
unsigned long _find_first_bit_be(const unsigned long *p, unsigned long size);
unsigned long _find_next_bit_be(const unsigned long *p, unsigned long size, unsigned long offset);

#ifndef CONFIG_SMP
/*
Expand Down
24 changes: 15 additions & 9 deletions arch/hexagon/include/asm/bitops.h
Original file line number Diff line number Diff line change
Expand Up @@ -127,38 +127,45 @@ static inline void change_bit(int nr, volatile void *addr)
* be atomic, particularly for things like slab_lock and slab_unlock.
*
*/
static inline void __clear_bit(int nr, volatile unsigned long *addr)
static __always_inline void
arch___clear_bit(unsigned long nr, volatile unsigned long *addr)
{
test_and_clear_bit(nr, addr);
}

static inline void __set_bit(int nr, volatile unsigned long *addr)
static __always_inline void
arch___set_bit(unsigned long nr, volatile unsigned long *addr)
{
test_and_set_bit(nr, addr);
}

static inline void __change_bit(int nr, volatile unsigned long *addr)
static __always_inline void
arch___change_bit(unsigned long nr, volatile unsigned long *addr)
{
test_and_change_bit(nr, addr);
}

/* Apparently, at least some of these are allowed to be non-atomic */
static inline int __test_and_clear_bit(int nr, volatile unsigned long *addr)
static __always_inline bool
arch___test_and_clear_bit(unsigned long nr, volatile unsigned long *addr)
{
return test_and_clear_bit(nr, addr);
}

static inline int __test_and_set_bit(int nr, volatile unsigned long *addr)
static __always_inline bool
arch___test_and_set_bit(unsigned long nr, volatile unsigned long *addr)
{
return test_and_set_bit(nr, addr);
}

static inline int __test_and_change_bit(int nr, volatile unsigned long *addr)
static __always_inline bool
arch___test_and_change_bit(unsigned long nr, volatile unsigned long *addr)
{
return test_and_change_bit(nr, addr);
}

static inline int __test_bit(int nr, const volatile unsigned long *addr)
static __always_inline bool
arch_test_bit(unsigned long nr, const volatile unsigned long *addr)
{
int retval;

Expand All @@ -172,8 +179,6 @@ static inline int __test_bit(int nr, const volatile unsigned long *addr)
return retval;
}

#define test_bit(nr, addr) __test_bit(nr, addr)

/*
* ffz - find first zero in word.
* @word: The word to search
Expand Down Expand Up @@ -271,6 +276,7 @@ static inline unsigned long __fls(unsigned long word)
}

#include <asm-generic/bitops/lock.h>
#include <asm-generic/bitops/non-instrumented-non-atomic.h>

#include <asm-generic/bitops/fls64.h>
#include <asm-generic/bitops/sched.h>
Expand Down
42 changes: 22 additions & 20 deletions arch/ia64/include/asm/bitops.h
Original file line number Diff line number Diff line change
Expand Up @@ -53,16 +53,16 @@ set_bit (int nr, volatile void *addr)
}

/**
* __set_bit - Set a bit in memory
* arch___set_bit - Set a bit in memory
* @nr: the bit to set
* @addr: the address to start counting from
*
* Unlike set_bit(), this function is non-atomic and may be reordered.
* If it's called on the same region of memory simultaneously, the effect
* may be that only one operation succeeds.
*/
static __inline__ void
__set_bit (int nr, volatile void *addr)
static __always_inline void
arch___set_bit(unsigned long nr, volatile unsigned long *addr)
{
*((__u32 *) addr + (nr >> 5)) |= (1 << (nr & 31));
}
Expand Down Expand Up @@ -135,16 +135,16 @@ __clear_bit_unlock(int nr, void *addr)
}

/**
* __clear_bit - Clears a bit in memory (non-atomic version)
* arch___clear_bit - Clears a bit in memory (non-atomic version)
* @nr: the bit to clear
* @addr: the address to start counting from
*
* Unlike clear_bit(), this function is non-atomic and may be reordered.
* If it's called on the same region of memory simultaneously, the effect
* may be that only one operation succeeds.
*/
static __inline__ void
__clear_bit (int nr, volatile void *addr)
static __always_inline void
arch___clear_bit(unsigned long nr, volatile unsigned long *addr)
{
*((__u32 *) addr + (nr >> 5)) &= ~(1 << (nr & 31));
}
Expand Down Expand Up @@ -175,16 +175,16 @@ change_bit (int nr, volatile void *addr)
}

/**
* __change_bit - Toggle a bit in memory
* arch___change_bit - Toggle a bit in memory
* @nr: the bit to toggle
* @addr: the address to start counting from
*
* Unlike change_bit(), this function is non-atomic and may be reordered.
* If it's called on the same region of memory simultaneously, the effect
* may be that only one operation succeeds.
*/
static __inline__ void
__change_bit (int nr, volatile void *addr)
static __always_inline void
arch___change_bit(unsigned long nr, volatile unsigned long *addr)
{
*((__u32 *) addr + (nr >> 5)) ^= (1 << (nr & 31));
}
Expand Down Expand Up @@ -224,16 +224,16 @@ test_and_set_bit (int nr, volatile void *addr)
#define test_and_set_bit_lock test_and_set_bit

/**
* __test_and_set_bit - Set a bit and return its old value
* arch___test_and_set_bit - Set a bit and return its old value
* @nr: Bit to set
* @addr: Address to count from
*
* This operation is non-atomic and can be reordered.
* If two examples of this operation race, one can appear to succeed
* but actually fail. You must protect multiple accesses with a lock.
*/
static __inline__ int
__test_and_set_bit (int nr, volatile void *addr)
static __always_inline bool
arch___test_and_set_bit(unsigned long nr, volatile unsigned long *addr)
{
__u32 *p = (__u32 *) addr + (nr >> 5);
__u32 m = 1 << (nr & 31);
Expand Down Expand Up @@ -269,16 +269,16 @@ test_and_clear_bit (int nr, volatile void *addr)
}

/**
* __test_and_clear_bit - Clear a bit and return its old value
* arch___test_and_clear_bit - Clear a bit and return its old value
* @nr: Bit to clear
* @addr: Address to count from
*
* This operation is non-atomic and can be reordered.
* If two examples of this operation race, one can appear to succeed
* but actually fail. You must protect multiple accesses with a lock.
*/
static __inline__ int
__test_and_clear_bit(int nr, volatile void * addr)
static __always_inline bool
arch___test_and_clear_bit(unsigned long nr, volatile unsigned long *addr)
{
__u32 *p = (__u32 *) addr + (nr >> 5);
__u32 m = 1 << (nr & 31);
Expand Down Expand Up @@ -314,14 +314,14 @@ test_and_change_bit (int nr, volatile void *addr)
}

/**
* __test_and_change_bit - Change a bit and return its old value
* arch___test_and_change_bit - Change a bit and return its old value
* @nr: Bit to change
* @addr: Address to count from
*
* This operation is non-atomic and can be reordered.
*/
static __inline__ int
__test_and_change_bit (int nr, void *addr)
static __always_inline bool
arch___test_and_change_bit(unsigned long nr, volatile unsigned long *addr)
{
__u32 old, bit = (1 << (nr & 31));
__u32 *m = (__u32 *) addr + (nr >> 5);
Expand All @@ -331,8 +331,8 @@ __test_and_change_bit (int nr, void *addr)
return (old & bit) != 0;
}

static __inline__ int
test_bit (int nr, const volatile void *addr)
static __always_inline bool
arch_test_bit(unsigned long nr, const volatile unsigned long *addr)
{
return 1 & (((const volatile __u32 *) addr)[nr >> 5] >> (nr & 31));
}
Expand Down Expand Up @@ -443,6 +443,8 @@ static __inline__ unsigned long __arch_hweight64(unsigned long x)

#ifdef __KERNEL__

#include <asm-generic/bitops/non-instrumented-non-atomic.h>

#include <asm-generic/bitops/le.h>

#include <asm-generic/bitops/ext2-atomic-setbit.h>
Expand Down
2 changes: 1 addition & 1 deletion arch/ia64/include/asm/processor.h
Original file line number Diff line number Diff line change
Expand Up @@ -538,7 +538,7 @@ ia64_get_irr(unsigned int vector)
{
unsigned int reg = vector / 64;
unsigned int bit = vector % 64;
u64 irr;
unsigned long irr;

switch (reg) {
case 0: irr = ia64_getreg(_IA64_REG_CR_IRR0); break;
Expand Down
Loading

0 comments on commit 4e23eee

Please sign in to comment.