Skip to content

Commit

Permalink
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel…
Browse files Browse the repository at this point in the history
…/git/viro/vfs

Pull vfs pile 4 from Al Viro:
 "list_lru pile, mostly"

This came out of Andrew's pile, Al ended up doing the merge work so that
Andrew didn't have to.

Additionally, a few fixes.

* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs: (42 commits)
  super: fix for destroy lrus
  list_lru: dynamically adjust node arrays
  shrinker: Kill old ->shrink API.
  shrinker: convert remaining shrinkers to count/scan API
  staging/lustre/libcfs: cleanup linux-mem.h
  staging/lustre/ptlrpc: convert to new shrinker API
  staging/lustre/obdclass: convert lu_object shrinker to count/scan API
  staging/lustre/ldlm: convert to shrinkers to count/scan API
  hugepage: convert huge zero page shrinker to new shrinker API
  i915: bail out earlier when shrinker cannot acquire mutex
  drivers: convert shrinkers to new count/scan API
  fs: convert fs shrinkers to new scan/count API
  xfs: fix dquot isolation hang
  xfs-convert-dquot-cache-lru-to-list_lru-fix
  xfs: convert dquot cache lru to list_lru
  xfs: rework buffer dispose list tracking
  xfs-convert-buftarg-lru-to-generic-code-fix
  xfs: convert buftarg LRU to generic code
  fs: convert inode and dentry shrinking to be node aware
  vmscan: per-node deferred work
  ...
  • Loading branch information
torvalds committed Sep 12, 2013
2 parents 3cc69b6 + bf2ba3b commit 26935fb
Show file tree
Hide file tree
Showing 56 changed files with 1,777 additions and 1,161 deletions.
4 changes: 4 additions & 0 deletions Documentation/filesystems/porting
Original file line number Diff line number Diff line change
Expand Up @@ -451,3 +451,7 @@ in your dentry operations instead.
--
[mandatory]
->readdir() is gone now; switch to ->iterate()
[mandatory]
vfs_follow_link has been removed. Filesystems must use nd_set_link
from ->follow_link for normal symlinks, or nd_jump_link for magic
/proc/<pid> style links.
25 changes: 18 additions & 7 deletions arch/x86/kvm/mmu.c
Original file line number Diff line number Diff line change
Expand Up @@ -4421,13 +4421,12 @@ void kvm_mmu_invalidate_mmio_sptes(struct kvm *kvm)
}
}

static int mmu_shrink(struct shrinker *shrink, struct shrink_control *sc)
static unsigned long
mmu_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
{
struct kvm *kvm;
int nr_to_scan = sc->nr_to_scan;

if (nr_to_scan == 0)
goto out;
unsigned long freed = 0;

raw_spin_lock(&kvm_lock);

Expand Down Expand Up @@ -4462,25 +4461,37 @@ static int mmu_shrink(struct shrinker *shrink, struct shrink_control *sc)
goto unlock;
}

prepare_zap_oldest_mmu_page(kvm, &invalid_list);
if (prepare_zap_oldest_mmu_page(kvm, &invalid_list))
freed++;
kvm_mmu_commit_zap_page(kvm, &invalid_list);

unlock:
spin_unlock(&kvm->mmu_lock);
srcu_read_unlock(&kvm->srcu, idx);

/*
* unfair on small ones
* per-vm shrinkers cry out
* sadness comes quickly
*/
list_move_tail(&kvm->vm_list, &vm_list);
break;
}

raw_spin_unlock(&kvm_lock);
return freed;

out:
}

static unsigned long
mmu_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
{
return percpu_counter_read_positive(&kvm_total_used_mmu_pages);
}

static struct shrinker mmu_shrinker = {
.shrink = mmu_shrink,
.count_objects = mmu_shrink_count,
.scan_objects = mmu_shrink_scan,
.seeks = DEFAULT_SEEKS * 10,
};

Expand Down
4 changes: 2 additions & 2 deletions drivers/gpu/drm/i915/i915_dma.c
Original file line number Diff line number Diff line change
Expand Up @@ -1676,7 +1676,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
return 0;

out_gem_unload:
if (dev_priv->mm.inactive_shrinker.shrink)
if (dev_priv->mm.inactive_shrinker.scan_objects)
unregister_shrinker(&dev_priv->mm.inactive_shrinker);

if (dev->pdev->msi_enabled)
Expand Down Expand Up @@ -1715,7 +1715,7 @@ int i915_driver_unload(struct drm_device *dev)

i915_teardown_sysfs(dev);

if (dev_priv->mm.inactive_shrinker.shrink)
if (dev_priv->mm.inactive_shrinker.scan_objects)
unregister_shrinker(&dev_priv->mm.inactive_shrinker);

mutex_lock(&dev->struct_mutex);
Expand Down
82 changes: 57 additions & 25 deletions drivers/gpu/drm/i915/i915_gem.c
Original file line number Diff line number Diff line change
Expand Up @@ -57,10 +57,12 @@ static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj,
struct drm_i915_fence_reg *fence,
bool enable);

static int i915_gem_inactive_shrink(struct shrinker *shrinker,
struct shrink_control *sc);
static unsigned long i915_gem_inactive_count(struct shrinker *shrinker,
struct shrink_control *sc);
static unsigned long i915_gem_inactive_scan(struct shrinker *shrinker,
struct shrink_control *sc);
static long i915_gem_purge(struct drm_i915_private *dev_priv, long target);
static void i915_gem_shrink_all(struct drm_i915_private *dev_priv);
static long i915_gem_shrink_all(struct drm_i915_private *dev_priv);
static void i915_gem_object_truncate(struct drm_i915_gem_object *obj);

static bool cpu_cache_is_coherent(struct drm_device *dev,
Expand Down Expand Up @@ -1769,16 +1771,21 @@ i915_gem_purge(struct drm_i915_private *dev_priv, long target)
return __i915_gem_shrink(dev_priv, target, true);
}

static void
static long
i915_gem_shrink_all(struct drm_i915_private *dev_priv)
{
struct drm_i915_gem_object *obj, *next;
long freed = 0;

i915_gem_evict_everything(dev_priv->dev);

list_for_each_entry_safe(obj, next, &dev_priv->mm.unbound_list,
global_list)
global_list) {
if (obj->pages_pin_count == 0)
freed += obj->base.size >> PAGE_SHIFT;
i915_gem_object_put_pages(obj);
}
return freed;
}

static int
Expand Down Expand Up @@ -4558,7 +4565,8 @@ i915_gem_load(struct drm_device *dev)

dev_priv->mm.interruptible = true;

dev_priv->mm.inactive_shrinker.shrink = i915_gem_inactive_shrink;
dev_priv->mm.inactive_shrinker.scan_objects = i915_gem_inactive_scan;
dev_priv->mm.inactive_shrinker.count_objects = i915_gem_inactive_count;
dev_priv->mm.inactive_shrinker.seeks = DEFAULT_SEEKS;
register_shrinker(&dev_priv->mm.inactive_shrinker);
}
Expand Down Expand Up @@ -4781,54 +4789,44 @@ static bool mutex_is_locked_by(struct mutex *mutex, struct task_struct *task)
#endif
}

static int
i915_gem_inactive_shrink(struct shrinker *shrinker, struct shrink_control *sc)
static unsigned long
i915_gem_inactive_count(struct shrinker *shrinker, struct shrink_control *sc)
{
struct drm_i915_private *dev_priv =
container_of(shrinker,
struct drm_i915_private,
mm.inactive_shrinker);
struct drm_device *dev = dev_priv->dev;
struct drm_i915_gem_object *obj;
int nr_to_scan = sc->nr_to_scan;
bool unlock = true;
int cnt;
unsigned long count;

if (!mutex_trylock(&dev->struct_mutex)) {
if (!mutex_is_locked_by(&dev->struct_mutex, current))
return 0;
return SHRINK_STOP;

if (dev_priv->mm.shrinker_no_lock_stealing)
return 0;
return SHRINK_STOP;

unlock = false;
}

if (nr_to_scan) {
nr_to_scan -= i915_gem_purge(dev_priv, nr_to_scan);
if (nr_to_scan > 0)
nr_to_scan -= __i915_gem_shrink(dev_priv, nr_to_scan,
false);
if (nr_to_scan > 0)
i915_gem_shrink_all(dev_priv);
}

cnt = 0;
count = 0;
list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list)
if (obj->pages_pin_count == 0)
cnt += obj->base.size >> PAGE_SHIFT;
count += obj->base.size >> PAGE_SHIFT;

list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
if (obj->active)
continue;

if (obj->pin_count == 0 && obj->pages_pin_count == 0)
cnt += obj->base.size >> PAGE_SHIFT;
count += obj->base.size >> PAGE_SHIFT;
}

if (unlock)
mutex_unlock(&dev->struct_mutex);
return cnt;
return count;
}

/* All the new VM stuff */
Expand Down Expand Up @@ -4892,6 +4890,40 @@ unsigned long i915_gem_obj_size(struct drm_i915_gem_object *o,
return 0;
}

static unsigned long
i915_gem_inactive_scan(struct shrinker *shrinker, struct shrink_control *sc)
{
struct drm_i915_private *dev_priv =
container_of(shrinker,
struct drm_i915_private,
mm.inactive_shrinker);
struct drm_device *dev = dev_priv->dev;
int nr_to_scan = sc->nr_to_scan;
unsigned long freed;
bool unlock = true;

if (!mutex_trylock(&dev->struct_mutex)) {
if (!mutex_is_locked_by(&dev->struct_mutex, current))
return 0;

if (dev_priv->mm.shrinker_no_lock_stealing)
return 0;

unlock = false;
}

freed = i915_gem_purge(dev_priv, nr_to_scan);
if (freed < nr_to_scan)
freed += __i915_gem_shrink(dev_priv, nr_to_scan,
false);
if (freed < nr_to_scan)
freed += i915_gem_shrink_all(dev_priv);

if (unlock)
mutex_unlock(&dev->struct_mutex);
return freed;
}

struct i915_vma *i915_gem_obj_to_vma(struct drm_i915_gem_object *obj,
struct i915_address_space *vm)
{
Expand Down
44 changes: 28 additions & 16 deletions drivers/gpu/drm/ttm/ttm_page_alloc.c
Original file line number Diff line number Diff line change
Expand Up @@ -377,28 +377,26 @@ static int ttm_page_pool_free(struct ttm_page_pool *pool, unsigned nr_free)
return nr_free;
}

/* Get good estimation how many pages are free in pools */
static int ttm_pool_get_num_unused_pages(void)
{
unsigned i;
int total = 0;
for (i = 0; i < NUM_POOLS; ++i)
total += _manager->pools[i].npages;

return total;
}

/**
* Callback for mm to request pool to reduce number of page held.
*
* XXX: (dchinner) Deadlock warning!
*
* ttm_page_pool_free() does memory allocation using GFP_KERNEL. that means
* this can deadlock when called a sc->gfp_mask that is not equal to
* GFP_KERNEL.
*
* This code is crying out for a shrinker per pool....
*/
static int ttm_pool_mm_shrink(struct shrinker *shrink,
struct shrink_control *sc)
static unsigned long
ttm_pool_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
{
static atomic_t start_pool = ATOMIC_INIT(0);
unsigned i;
unsigned pool_offset = atomic_add_return(1, &start_pool);
struct ttm_page_pool *pool;
int shrink_pages = sc->nr_to_scan;
unsigned long freed = 0;

pool_offset = pool_offset % NUM_POOLS;
/* select start pool in round robin fashion */
Expand All @@ -408,14 +406,28 @@ static int ttm_pool_mm_shrink(struct shrinker *shrink,
break;
pool = &_manager->pools[(i + pool_offset)%NUM_POOLS];
shrink_pages = ttm_page_pool_free(pool, nr_free);
freed += nr_free - shrink_pages;
}
/* return estimated number of unused pages in pool */
return ttm_pool_get_num_unused_pages();
return freed;
}


static unsigned long
ttm_pool_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
{
unsigned i;
unsigned long count = 0;

for (i = 0; i < NUM_POOLS; ++i)
count += _manager->pools[i].npages;

return count;
}

static void ttm_pool_mm_shrink_init(struct ttm_pool_manager *manager)
{
manager->mm_shrink.shrink = &ttm_pool_mm_shrink;
manager->mm_shrink.count_objects = ttm_pool_shrink_count;
manager->mm_shrink.scan_objects = ttm_pool_shrink_scan;
manager->mm_shrink.seeks = 1;
register_shrinker(&manager->mm_shrink);
}
Expand Down
Loading

0 comments on commit 26935fb

Please sign in to comment.