Skip to content

Commit

Permalink
swiotlb: Export swiotlb_max_segment to users
Browse files Browse the repository at this point in the history
So they can figure out what is the optimal number of pages
that can be contingously stitched together without fear of
bounce buffer.

We also expose an mechanism for sub-users of SWIOTLB API, such
as Xen-SWIOTLB to set the max segment value. And lastly
if swiotlb=force is set (which mandates we bounce buffer everything)
we set max_segment so at least we can bounce buffer one 4K page
instead of a giant 512KB one for which we may not have space.

Signed-off-by: Konrad Rzeszutek Wilk <[email protected]>
Reported-and-Tested-by: Juergen Gross <[email protected]>
  • Loading branch information
konradwilk committed Jan 6, 2017
1 parent fff5d99 commit 7453c54
Show file tree
Hide file tree
Showing 4 changed files with 34 additions and 10 deletions.
11 changes: 1 addition & 10 deletions drivers/gpu/drm/i915/i915_gem.c
Original file line number Diff line number Diff line change
Expand Up @@ -2290,15 +2290,6 @@ void __i915_gem_object_put_pages(struct drm_i915_gem_object *obj,
mutex_unlock(&obj->mm.lock);
}

static unsigned int swiotlb_max_size(void)
{
#if IS_ENABLED(CONFIG_SWIOTLB)
return rounddown(swiotlb_nr_tbl() << IO_TLB_SHIFT, PAGE_SIZE);
#else
return 0;
#endif
}

static void i915_sg_trim(struct sg_table *orig_st)
{
struct sg_table new_st;
Expand Down Expand Up @@ -2345,7 +2336,7 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
GEM_BUG_ON(obj->base.read_domains & I915_GEM_GPU_DOMAINS);
GEM_BUG_ON(obj->base.write_domain & I915_GEM_GPU_DOMAINS);

max_segment = swiotlb_max_size();
max_segment = swiotlb_max_segment();
if (!max_segment)
max_segment = rounddown(UINT_MAX, PAGE_SIZE);

Expand Down
4 changes: 4 additions & 0 deletions drivers/xen/swiotlb-xen.c
Original file line number Diff line number Diff line change
Expand Up @@ -275,6 +275,10 @@ int __ref xen_swiotlb_init(int verbose, bool early)
rc = 0;
} else
rc = swiotlb_late_init_with_tbl(xen_io_tlb_start, xen_io_tlb_nslabs);

if (!rc)
swiotlb_set_max_segment(PAGE_SIZE);

return rc;
error:
if (repeat--) {
Expand Down
3 changes: 3 additions & 0 deletions include/linux/swiotlb.h
Original file line number Diff line number Diff line change
Expand Up @@ -114,11 +114,14 @@ swiotlb_dma_supported(struct device *hwdev, u64 mask);

#ifdef CONFIG_SWIOTLB
extern void __init swiotlb_free(void);
unsigned int swiotlb_max_segment(void);
#else
static inline void swiotlb_free(void) { }
static inline unsigned int swiotlb_max_segment(void) { return 0; }
#endif

extern void swiotlb_print_info(void);
extern int is_swiotlb_buffer(phys_addr_t paddr);
extern void swiotlb_set_max_segment(unsigned int);

#endif /* __LINUX_SWIOTLB_H */
26 changes: 26 additions & 0 deletions lib/swiotlb.c
Original file line number Diff line number Diff line change
Expand Up @@ -82,6 +82,12 @@ static phys_addr_t io_tlb_overflow_buffer;
static unsigned int *io_tlb_list;
static unsigned int io_tlb_index;

/*
* Max segment that we can provide which (if pages are contingous) will
* not be bounced (unless SWIOTLB_FORCE is set).
*/
unsigned int max_segment;

/*
* We need to save away the original address corresponding to a mapped entry
* for the sync operations.
Expand Down Expand Up @@ -124,6 +130,20 @@ unsigned long swiotlb_nr_tbl(void)
}
EXPORT_SYMBOL_GPL(swiotlb_nr_tbl);

unsigned int swiotlb_max_segment(void)
{
return max_segment;
}
EXPORT_SYMBOL_GPL(swiotlb_max_segment);

void swiotlb_set_max_segment(unsigned int val)
{
if (swiotlb_force == SWIOTLB_FORCE)
max_segment = 1;
else
max_segment = rounddown(val, PAGE_SIZE);
}

/* default to 64MB */
#define IO_TLB_DEFAULT_SIZE (64UL<<20)
unsigned long swiotlb_size_or_default(void)
Expand Down Expand Up @@ -205,6 +225,7 @@ int __init swiotlb_init_with_tbl(char *tlb, unsigned long nslabs, int verbose)
if (verbose)
swiotlb_print_info();

swiotlb_set_max_segment(io_tlb_nslabs << IO_TLB_SHIFT);
return 0;
}

Expand Down Expand Up @@ -283,6 +304,7 @@ swiotlb_late_init_with_default_size(size_t default_size)
rc = swiotlb_late_init_with_tbl(vstart, io_tlb_nslabs);
if (rc)
free_pages((unsigned long)vstart, order);

return rc;
}

Expand Down Expand Up @@ -337,6 +359,8 @@ swiotlb_late_init_with_tbl(char *tlb, unsigned long nslabs)

late_alloc = 1;

swiotlb_set_max_segment(io_tlb_nslabs << IO_TLB_SHIFT);

return 0;

cleanup4:
Expand All @@ -351,6 +375,7 @@ swiotlb_late_init_with_tbl(char *tlb, unsigned long nslabs)
io_tlb_end = 0;
io_tlb_start = 0;
io_tlb_nslabs = 0;
max_segment = 0;
return -ENOMEM;
}

Expand Down Expand Up @@ -379,6 +404,7 @@ void __init swiotlb_free(void)
PAGE_ALIGN(io_tlb_nslabs << IO_TLB_SHIFT));
}
io_tlb_nslabs = 0;
max_segment = 0;
}

int is_swiotlb_buffer(phys_addr_t paddr)
Expand Down

0 comments on commit 7453c54

Please sign in to comment.