Skip to content

Commit

Permalink
swiotlb: reduce the swiotlb buffer size on allocation failure
Browse files Browse the repository at this point in the history
At the moment the AMD encrypted platform reserves 6% of RAM for SWIOTLB
or 1GB, whichever is less. However it is possible that there is no block
big enough in the low memory which make SWIOTLB allocation fail and
the kernel continues without DMA. In such case a VM hangs on DMA.

This moves alloc+remap to a helper and calls it from a loop where
the size is halved on each iteration.

This updates default_nslabs on successful allocation which looks like
an oversight as not doing so should have broken callers of
swiotlb_size_or_default().

Signed-off-by: Alexey Kardashevskiy <[email protected]>
Reviewed-by: Pankaj Gupta <[email protected]>
Signed-off-by: Christoph Hellwig <[email protected]>
  • Loading branch information
aik authored and Christoph Hellwig committed Nov 1, 2022
1 parent 30a0b95 commit 8d58aa4
Showing 1 changed file with 39 additions and 24 deletions.
63 changes: 39 additions & 24 deletions kernel/dma/swiotlb.c
Original file line number Diff line number Diff line change
Expand Up @@ -300,6 +300,37 @@ static void swiotlb_init_io_tlb_mem(struct io_tlb_mem *mem, phys_addr_t start,
return;
}

static void *swiotlb_memblock_alloc(unsigned long nslabs, unsigned int flags,
int (*remap)(void *tlb, unsigned long nslabs))
{
size_t bytes = PAGE_ALIGN(nslabs << IO_TLB_SHIFT);
void *tlb;

/*
* By default allocate the bounce buffer memory from low memory, but
* allow to pick a location everywhere for hypervisors with guest
* memory encryption.
*/
if (flags & SWIOTLB_ANY)
tlb = memblock_alloc(bytes, PAGE_SIZE);
else
tlb = memblock_alloc_low(bytes, PAGE_SIZE);

if (!tlb) {
pr_warn("%s: Failed to allocate %zu bytes tlb structure\n",
__func__, bytes);
return NULL;
}

if (remap && remap(tlb, nslabs) < 0) {
memblock_free(tlb, PAGE_ALIGN(bytes));
pr_warn("%s: Failed to remap %zu bytes\n", __func__, bytes);
return NULL;
}

return tlb;
}

/*
* Statically reserve bounce buffer space and initialize bounce buffer data
* structures for the software IO TLB used to implement the DMA API.
Expand All @@ -310,7 +341,6 @@ void __init swiotlb_init_remap(bool addressing_limit, unsigned int flags,
struct io_tlb_mem *mem = &io_tlb_default_mem;
unsigned long nslabs;
size_t alloc_size;
size_t bytes;
void *tlb;

if (!addressing_limit && !swiotlb_force_bounce)
Expand All @@ -326,31 +356,16 @@ void __init swiotlb_init_remap(bool addressing_limit, unsigned int flags,
swiotlb_adjust_nareas(num_possible_cpus());

nslabs = default_nslabs;
/*
* By default allocate the bounce buffer memory from low memory, but
* allow to pick a location everywhere for hypervisors with guest
* memory encryption.
*/
retry:
bytes = PAGE_ALIGN(nslabs << IO_TLB_SHIFT);
if (flags & SWIOTLB_ANY)
tlb = memblock_alloc(bytes, PAGE_SIZE);
else
tlb = memblock_alloc_low(bytes, PAGE_SIZE);
if (!tlb) {
pr_warn("%s: failed to allocate tlb structure\n", __func__);
return;
}

if (remap && remap(tlb, nslabs) < 0) {
memblock_free(tlb, PAGE_ALIGN(bytes));

while ((tlb = swiotlb_memblock_alloc(nslabs, flags, remap)) == NULL) {
if (nslabs <= IO_TLB_MIN_SLABS)
return;
nslabs = ALIGN(nslabs >> 1, IO_TLB_SEGSIZE);
if (nslabs >= IO_TLB_MIN_SLABS)
goto retry;
}

pr_warn("%s: Failed to remap %zu bytes\n", __func__, bytes);
return;
if (default_nslabs != nslabs) {
pr_info("SWIOTLB bounce buffer size adjusted %lu -> %lu slabs",
default_nslabs, nslabs);
default_nslabs = nslabs;
}

alloc_size = PAGE_ALIGN(array_size(sizeof(*mem->slots), nslabs));
Expand Down

0 comments on commit 8d58aa4

Please sign in to comment.