Skip to content

Commit

Permalink
drm/msm: Drop struct_mutex in shrinker path
Browse files Browse the repository at this point in the history
Now that the inactive_list is protected by mm_lock, and everything
else on per-obj basis is protected by obj->resv, we no longer depend
on struct_mutex.

Signed-off-by: Rob Clark <[email protected]>
Reviewed-by: Kristian H. Kristensen <[email protected]>
Signed-off-by: Rob Clark <[email protected]>
  • Loading branch information
robclark committed Nov 5, 2020
1 parent f92f026 commit cf11c1f
Show file tree
Hide file tree
Showing 2 changed files with 0 additions and 55 deletions.
1 change: 0 additions & 1 deletion drivers/gpu/drm/msm/msm_gem.c
Original file line number Diff line number Diff line change
Expand Up @@ -687,7 +687,6 @@ void msm_gem_purge(struct drm_gem_object *obj)
struct drm_device *dev = obj->dev;
struct msm_gem_object *msm_obj = to_msm_bo(obj);

WARN_ON(!mutex_is_locked(&dev->struct_mutex));
WARN_ON(!is_purgeable(msm_obj));
WARN_ON(obj->import_attach);

Expand Down
54 changes: 0 additions & 54 deletions drivers/gpu/drm/msm/msm_gem_shrinker.c
Original file line number Diff line number Diff line change
Expand Up @@ -8,48 +8,13 @@
#include "msm_gem.h"
#include "msm_gpu_trace.h"

static bool msm_gem_shrinker_lock(struct drm_device *dev, bool *unlock)
{
/* NOTE: we are *closer* to being able to get rid of
* mutex_trylock_recursive().. the msm_gem code itself does
* not need struct_mutex, although codepaths that can trigger
* shrinker are still called in code-paths that hold the
* struct_mutex.
*
* Also, msm_obj->madv is protected by struct_mutex.
*
* The next step is probably split out a seperate lock for
* protecting inactive_list, so that shrinker does not need
* struct_mutex.
*/
switch (mutex_trylock_recursive(&dev->struct_mutex)) {
case MUTEX_TRYLOCK_FAILED:
return false;

case MUTEX_TRYLOCK_SUCCESS:
*unlock = true;
return true;

case MUTEX_TRYLOCK_RECURSIVE:
*unlock = false;
return true;
}

BUG();
}

static unsigned long
msm_gem_shrinker_count(struct shrinker *shrinker, struct shrink_control *sc)
{
struct msm_drm_private *priv =
container_of(shrinker, struct msm_drm_private, shrinker);
struct drm_device *dev = priv->dev;
struct msm_gem_object *msm_obj;
unsigned long count = 0;
bool unlock;

if (!msm_gem_shrinker_lock(dev, &unlock))
return 0;

mutex_lock(&priv->mm_lock);

Expand All @@ -63,9 +28,6 @@ msm_gem_shrinker_count(struct shrinker *shrinker, struct shrink_control *sc)

mutex_unlock(&priv->mm_lock);

if (unlock)
mutex_unlock(&dev->struct_mutex);

return count;
}

Expand All @@ -74,13 +36,8 @@ msm_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc)
{
struct msm_drm_private *priv =
container_of(shrinker, struct msm_drm_private, shrinker);
struct drm_device *dev = priv->dev;
struct msm_gem_object *msm_obj;
unsigned long freed = 0;
bool unlock;

if (!msm_gem_shrinker_lock(dev, &unlock))
return SHRINK_STOP;

mutex_lock(&priv->mm_lock);

Expand All @@ -98,9 +55,6 @@ msm_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc)

mutex_unlock(&priv->mm_lock);

if (unlock)
mutex_unlock(&dev->struct_mutex);

if (freed > 0)
trace_msm_gem_purge(freed << PAGE_SHIFT);

Expand All @@ -112,13 +66,8 @@ msm_gem_shrinker_vmap(struct notifier_block *nb, unsigned long event, void *ptr)
{
struct msm_drm_private *priv =
container_of(nb, struct msm_drm_private, vmap_notifier);
struct drm_device *dev = priv->dev;
struct msm_gem_object *msm_obj;
unsigned unmapped = 0;
bool unlock;

if (!msm_gem_shrinker_lock(dev, &unlock))
return NOTIFY_DONE;

mutex_lock(&priv->mm_lock);

Expand All @@ -141,9 +90,6 @@ msm_gem_shrinker_vmap(struct notifier_block *nb, unsigned long event, void *ptr)

mutex_unlock(&priv->mm_lock);

if (unlock)
mutex_unlock(&dev->struct_mutex);

*(unsigned long *)ptr += unmapped;

if (unmapped > 0)
Expand Down

0 comments on commit cf11c1f

Please sign in to comment.