Skip to content

Commit

Permalink
Merge tag 'locking-core-2024-01-08' of git://git.kernel.org/pub/scm/l…
Browse files Browse the repository at this point in the history
…inux/kernel/git/tip/tip

Pull locking updates from Ingo Molar:
 "Lock guards:

   - Use lock guards in the ptrace code

   - Introduce conditional guards to extend to conditional lock
     primitives like mutex_trylock()/mutex_lock_interruptible()/etc.

  lockdep:

   - Optimize 'struct lock_class' to be smaller

   - Update file patterns in MAINTAINERS

  mutexes:

   - Document mutex lifetime rules a bit more"

* tag 'locking-core-2024-01-08' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  locking/mutex: Clarify that mutex_unlock(), and most other sleeping locks, can still use the lock object after it's unlocked
  locking/mutex: Document that mutex_unlock() is non-atomic
  ptrace: Convert ptrace_attach() to use lock guards
  locking/lockdep: Slightly reorder 'struct lock_class' to save some memory
  MAINTAINERS: Add include/linux/lockdep*.h
  cleanup: Add conditional guard support
  • Loading branch information
torvalds committed Jan 9, 2024
2 parents f0a78b3 + 2b9d9e0 commit 6cbf5b3
Show file tree
Hide file tree
Showing 10 changed files with 184 additions and 77 deletions.
18 changes: 18 additions & 0 deletions Documentation/locking/mutex-design.rst
Original file line number Diff line number Diff line change
Expand Up @@ -101,6 +101,24 @@ features that make lock debugging easier and faster:
- Detects multi-task circular deadlocks and prints out all affected
locks and tasks (and only those tasks).

Mutexes - and most other sleeping locks like rwsems - do not provide an
implicit reference for the memory they occupy, which reference is released
with mutex_unlock().

[ This is in contrast with spin_unlock() [or completion_done()], which
APIs can be used to guarantee that the memory is not touched by the
lock implementation after spin_unlock()/completion_done() releases
the lock. ]

mutex_unlock() may access the mutex structure even after it has internally
released the lock already - so it's not safe for another context to
acquire the mutex and assume that the mutex_unlock() context is not using
the structure anymore.

The mutex user must ensure that the mutex is not destroyed while a
release operation is still in progress - in other words, callers of
mutex_unlock() must ensure that the mutex stays alive until mutex_unlock()
has returned.

Interfaces
----------
Expand Down
2 changes: 1 addition & 1 deletion MAINTAINERS
Original file line number Diff line number Diff line change
Expand Up @@ -12424,7 +12424,7 @@ S: Maintained
T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git locking/core
F: Documentation/locking/
F: arch/*/include/asm/spinlock*.h
F: include/linux/lockdep.h
F: include/linux/lockdep*.h
F: include/linux/mutex*.h
F: include/linux/rwlock*.h
F: include/linux/rwsem*.h
Expand Down
52 changes: 49 additions & 3 deletions include/linux/cleanup.h
Original file line number Diff line number Diff line change
Expand Up @@ -125,25 +125,55 @@ static inline class_##_name##_t class_##_name##ext##_constructor(_init_args) \
* trivial wrapper around DEFINE_CLASS() above specifically
* for locks.
*
* DEFINE_GUARD_COND(name, ext, condlock)
* wrapper around EXTEND_CLASS above to add conditional lock
* variants to a base class, eg. mutex_trylock() or
* mutex_lock_interruptible().
*
* guard(name):
* an anonymous instance of the (guard) class
* an anonymous instance of the (guard) class, not recommended for
* conditional locks.
*
* scoped_guard (name, args...) { }:
* similar to CLASS(name, scope)(args), except the variable (with the
* explicit name 'scope') is declard in a for-loop such that its scope is
* bound to the next (compound) statement.
*
* for conditional locks the loop body is skipped when the lock is not
* acquired.
*
* scoped_cond_guard (name, fail, args...) { }:
* similar to scoped_guard(), except it does fail when the lock
* acquire fails.
*
*/

#define DEFINE_GUARD(_name, _type, _lock, _unlock) \
DEFINE_CLASS(_name, _type, _unlock, ({ _lock; _T; }), _type _T)
DEFINE_CLASS(_name, _type, if (_T) { _unlock; }, ({ _lock; _T; }), _type _T); \
static inline void * class_##_name##_lock_ptr(class_##_name##_t *_T) \
{ return *_T; }

#define DEFINE_GUARD_COND(_name, _ext, _condlock) \
EXTEND_CLASS(_name, _ext, \
({ void *_t = _T; if (_T && !(_condlock)) _t = NULL; _t; }), \
class_##_name##_t _T) \
static inline void * class_##_name##_ext##_lock_ptr(class_##_name##_t *_T) \
{ return class_##_name##_lock_ptr(_T); }

#define guard(_name) \
CLASS(_name, __UNIQUE_ID(guard))

#define __guard_ptr(_name) class_##_name##_lock_ptr

#define scoped_guard(_name, args...) \
for (CLASS(_name, scope)(args), \
*done = NULL; !done; done = (void *)1)
*done = NULL; __guard_ptr(_name)(&scope) && !done; done = (void *)1)

#define scoped_cond_guard(_name, _fail, args...) \
for (CLASS(_name, scope)(args), \
*done = NULL; !done; done = (void *)1) \
if (!__guard_ptr(_name)(&scope)) _fail; \
else

/*
* Additional helper macros for generating lock guards with types, either for
Expand All @@ -152,6 +182,7 @@ static inline class_##_name##_t class_##_name##ext##_constructor(_init_args) \
*
* DEFINE_LOCK_GUARD_0(name, lock, unlock, ...)
* DEFINE_LOCK_GUARD_1(name, type, lock, unlock, ...)
* DEFINE_LOCK_GUARD_1_COND(name, ext, condlock)
*
* will result in the following type:
*
Expand All @@ -173,6 +204,11 @@ typedef struct { \
static inline void class_##_name##_destructor(class_##_name##_t *_T) \
{ \
if (_T->lock) { _unlock; } \
} \
\
static inline void *class_##_name##_lock_ptr(class_##_name##_t *_T) \
{ \
return _T->lock; \
}


Expand Down Expand Up @@ -201,4 +237,14 @@ __DEFINE_LOCK_GUARD_1(_name, _type, _lock)
__DEFINE_UNLOCK_GUARD(_name, void, _unlock, __VA_ARGS__) \
__DEFINE_LOCK_GUARD_0(_name, _lock)

#define DEFINE_LOCK_GUARD_1_COND(_name, _ext, _condlock) \
EXTEND_CLASS(_name, _ext, \
({ class_##_name##_t _t = { .lock = l }, *_T = &_t;\
if (_T->lock && !(_condlock)) _T->lock = NULL; \
_t; }), \
typeof_member(class_##_name##_t, lock) l) \
static inline void * class_##_name##_ext##_lock_ptr(class_##_name##_t *_T) \
{ return class_##_name##_lock_ptr(_T); }


#endif /* __LINUX_GUARDS_H */
2 changes: 1 addition & 1 deletion include/linux/lockdep_types.h
Original file line number Diff line number Diff line change
Expand Up @@ -127,12 +127,12 @@ struct lock_class {
unsigned long usage_mask;
const struct lock_trace *usage_traces[LOCK_TRACE_STATES];

const char *name;
/*
* Generation counter, when doing certain classes of graph walking,
* to ensure that we check one node only once:
*/
int name_version;
const char *name;

u8 wait_type_inner;
u8 wait_type_outer;
Expand Down
3 changes: 2 additions & 1 deletion include/linux/mutex.h
Original file line number Diff line number Diff line change
Expand Up @@ -221,6 +221,7 @@ extern void mutex_unlock(struct mutex *lock);
extern int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock);

DEFINE_GUARD(mutex, struct mutex *, mutex_lock(_T), mutex_unlock(_T))
DEFINE_FREE(mutex, struct mutex *, if (_T) mutex_unlock(_T))
DEFINE_GUARD_COND(mutex, _try, mutex_trylock(_T))
DEFINE_GUARD_COND(mutex, _intr, mutex_lock_interruptible(_T) == 0)

#endif /* __LINUX_MUTEX_H */
8 changes: 4 additions & 4 deletions include/linux/rwsem.h
Original file line number Diff line number Diff line change
Expand Up @@ -203,11 +203,11 @@ extern void up_read(struct rw_semaphore *sem);
extern void up_write(struct rw_semaphore *sem);

DEFINE_GUARD(rwsem_read, struct rw_semaphore *, down_read(_T), up_read(_T))
DEFINE_GUARD(rwsem_write, struct rw_semaphore *, down_write(_T), up_write(_T))

DEFINE_FREE(up_read, struct rw_semaphore *, if (_T) up_read(_T))
DEFINE_FREE(up_write, struct rw_semaphore *, if (_T) up_write(_T))
DEFINE_GUARD_COND(rwsem_read, _try, down_read_trylock(_T))
DEFINE_GUARD_COND(rwsem_read, _intr, down_read_interruptible(_T) == 0)

DEFINE_GUARD(rwsem_write, struct rw_semaphore *, down_write(_T), up_write(_T))
DEFINE_GUARD_COND(rwsem_write, _try, down_write_trylock(_T))

/*
* downgrade write lock to read lock
Expand Down
2 changes: 2 additions & 0 deletions include/linux/sched/task.h
Original file line number Diff line number Diff line change
Expand Up @@ -226,4 +226,6 @@ static inline void task_unlock(struct task_struct *p)
spin_unlock(&p->alloc_lock);
}

DEFINE_GUARD(task_lock, struct task_struct *, task_lock(_T), task_unlock(_T))

#endif /* _LINUX_SCHED_TASK_H */
41 changes: 41 additions & 0 deletions include/linux/spinlock.h
Original file line number Diff line number Diff line change
Expand Up @@ -507,6 +507,8 @@ DEFINE_LOCK_GUARD_1(raw_spinlock, raw_spinlock_t,
raw_spin_lock(_T->lock),
raw_spin_unlock(_T->lock))

DEFINE_LOCK_GUARD_1_COND(raw_spinlock, _try, raw_spin_trylock(_T->lock))

DEFINE_LOCK_GUARD_1(raw_spinlock_nested, raw_spinlock_t,
raw_spin_lock_nested(_T->lock, SINGLE_DEPTH_NESTING),
raw_spin_unlock(_T->lock))
Expand All @@ -515,23 +517,62 @@ DEFINE_LOCK_GUARD_1(raw_spinlock_irq, raw_spinlock_t,
raw_spin_lock_irq(_T->lock),
raw_spin_unlock_irq(_T->lock))

DEFINE_LOCK_GUARD_1_COND(raw_spinlock_irq, _try, raw_spin_trylock_irq(_T->lock))

DEFINE_LOCK_GUARD_1(raw_spinlock_irqsave, raw_spinlock_t,
raw_spin_lock_irqsave(_T->lock, _T->flags),
raw_spin_unlock_irqrestore(_T->lock, _T->flags),
unsigned long flags)

DEFINE_LOCK_GUARD_1_COND(raw_spinlock_irqsave, _try,
raw_spin_trylock_irqsave(_T->lock, _T->flags))

DEFINE_LOCK_GUARD_1(spinlock, spinlock_t,
spin_lock(_T->lock),
spin_unlock(_T->lock))

DEFINE_LOCK_GUARD_1_COND(spinlock, _try, spin_trylock(_T->lock))

DEFINE_LOCK_GUARD_1(spinlock_irq, spinlock_t,
spin_lock_irq(_T->lock),
spin_unlock_irq(_T->lock))

DEFINE_LOCK_GUARD_1_COND(spinlock_irq, _try,
spin_trylock_irq(_T->lock))

DEFINE_LOCK_GUARD_1(spinlock_irqsave, spinlock_t,
spin_lock_irqsave(_T->lock, _T->flags),
spin_unlock_irqrestore(_T->lock, _T->flags),
unsigned long flags)

DEFINE_LOCK_GUARD_1_COND(spinlock_irqsave, _try,
spin_trylock_irqsave(_T->lock, _T->flags))

DEFINE_LOCK_GUARD_1(read_lock, rwlock_t,
read_lock(_T->lock),
read_unlock(_T->lock))

DEFINE_LOCK_GUARD_1(read_lock_irq, rwlock_t,
read_lock_irq(_T->lock),
read_unlock_irq(_T->lock))

DEFINE_LOCK_GUARD_1(read_lock_irqsave, rwlock_t,
read_lock_irqsave(_T->lock, _T->flags),
read_unlock_irqrestore(_T->lock, _T->flags),
unsigned long flags)

DEFINE_LOCK_GUARD_1(write_lock, rwlock_t,
write_lock(_T->lock),
write_unlock(_T->lock))

DEFINE_LOCK_GUARD_1(write_lock_irq, rwlock_t,
write_lock_irq(_T->lock),
write_unlock_irq(_T->lock))

DEFINE_LOCK_GUARD_1(write_lock_irqsave, rwlock_t,
write_lock_irqsave(_T->lock, _T->flags),
write_unlock_irqrestore(_T->lock, _T->flags),
unsigned long flags)

#undef __LINUX_INSIDE_SPINLOCK_H
#endif /* __LINUX_SPINLOCK_H */
5 changes: 5 additions & 0 deletions kernel/locking/mutex.c
Original file line number Diff line number Diff line change
Expand Up @@ -532,6 +532,11 @@ static noinline void __sched __mutex_unlock_slowpath(struct mutex *lock, unsigne
* This function must not be used in interrupt context. Unlocking
* of a not locked mutex is not allowed.
*
* The caller must ensure that the mutex stays alive until this function has
* returned - mutex_unlock() can NOT directly be used to release an object such
* that another concurrent task can free it.
* Mutexes are different from spinlocks & refcounts in this aspect.
*
* This function is similar to (but not equivalent to) up().
*/
void __sched mutex_unlock(struct mutex *lock)
Expand Down
Loading

0 comments on commit 6cbf5b3

Please sign in to comment.