Skip to content

Commit

Permalink
Merge tag 'sound-fix-5.16-rc1' of git://git.kernel.org/pub/scm/linux/…
Browse files Browse the repository at this point in the history
…kernel/git/tiwai/sound

Pull sound fixes from Takashi Iwai:
 "A collection of fixes for 5.16-rc1, notably for a few regressions that
  were found in 5.15 and pre-rc1:

   - revert of the unification of SG-buffer helper functions on x86 and
     the relevant fix

   - regression fixes for mmap after the recent code refactoring

   - two NULL dereference fixes in HD-audio controller driver

   - UAF fixes in ALSA timer core

   - a few usual HD-audio and FireWire quirks"

* tag 'sound-fix-5.16-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/tiwai/sound:
  ALSA: fireworks: add support for Loud Onyx 1200f quirk
  ALSA: hda: fix general protection fault in azx_runtime_idle
  ALSA: hda: Free card instance properly at probe errors
  ALSA: hda/realtek: Add quirk for HP EliteBook 840 G7 mute LED
  ALSA: memalloc: Remove a stale comment
  ALSA: synth: missing check for possible NULL after the call to kstrdup
  ALSA: memalloc: Use proper SG helpers for noncontig allocations
  ALSA: pci: rme: Fix unaligned buffer addresses
  ALSA: firewire-motu: add support for MOTU Track 16
  ALSA: PCM: Fix NULL dereference at mmap checks
  ALSA: hda/realtek: Add quirk for ASUS UX550VE
  ALSA: timer: Unconditionally unlink slave instances, too
  ALSA: memalloc: Catch call with NULL snd_dma_buffer pointer
  Revert "ALSA: memalloc: Convert x86 SG-buffer handling with non-contiguous type"
  ALSA: hda/realtek: Add a quirk for Acer Spin SP513-54N
  ALSA: firewire-motu: add support for MOTU Traveler mk3
  ALSA: hda/realtek: Headset fixup for Clevo NH77HJQ
  ALSA: timer: Fix use-after-free problem
  • Loading branch information
torvalds committed Nov 12, 2021
2 parents 304ac80 + 0ca3727 commit 0d5d746
Show file tree
Hide file tree
Showing 15 changed files with 386 additions and 102 deletions.
14 changes: 7 additions & 7 deletions include/sound/memalloc.h
Original file line number Diff line number Diff line change
Expand Up @@ -36,6 +36,13 @@ struct snd_dma_device {
#define SNDRV_DMA_TYPE_CONTINUOUS 1 /* continuous no-DMA memory */
#define SNDRV_DMA_TYPE_DEV 2 /* generic device continuous */
#define SNDRV_DMA_TYPE_DEV_WC 5 /* continuous write-combined */
#ifdef CONFIG_SND_DMA_SGBUF
#define SNDRV_DMA_TYPE_DEV_SG 3 /* generic device SG-buffer */
#define SNDRV_DMA_TYPE_DEV_WC_SG 6 /* SG write-combined */
#else
#define SNDRV_DMA_TYPE_DEV_SG SNDRV_DMA_TYPE_DEV /* no SG-buf support */
#define SNDRV_DMA_TYPE_DEV_WC_SG SNDRV_DMA_TYPE_DEV_WC
#endif
#ifdef CONFIG_GENERIC_ALLOCATOR
#define SNDRV_DMA_TYPE_DEV_IRAM 4 /* generic device iram-buffer */
#else
Expand All @@ -44,13 +51,6 @@ struct snd_dma_device {
#define SNDRV_DMA_TYPE_VMALLOC 7 /* vmalloc'ed buffer */
#define SNDRV_DMA_TYPE_NONCONTIG 8 /* non-coherent SG buffer */
#define SNDRV_DMA_TYPE_NONCOHERENT 9 /* non-coherent buffer */
#ifdef CONFIG_SND_DMA_SGBUF
#define SNDRV_DMA_TYPE_DEV_SG SNDRV_DMA_TYPE_NONCONTIG
#define SNDRV_DMA_TYPE_DEV_WC_SG 6 /* SG write-combined */
#else
#define SNDRV_DMA_TYPE_DEV_SG SNDRV_DMA_TYPE_DEV /* no SG-buf support */
#define SNDRV_DMA_TYPE_DEV_WC_SG SNDRV_DMA_TYPE_DEV_WC
#endif

/*
* info for buffer allocation
Expand Down
1 change: 1 addition & 0 deletions sound/core/Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,7 @@ snd-$(CONFIG_SND_JACK) += ctljack.o jack.o
snd-pcm-y := pcm.o pcm_native.o pcm_lib.o pcm_misc.o \
pcm_memory.o memalloc.o
snd-pcm-$(CONFIG_SND_PCM_TIMER) += pcm_timer.o
snd-pcm-$(CONFIG_SND_DMA_SGBUF) += sgbuf.o
snd-pcm-$(CONFIG_SND_PCM_ELD) += pcm_drm_eld.o
snd-pcm-$(CONFIG_SND_PCM_IEC958) += pcm_iec958.o

Expand Down
105 changes: 62 additions & 43 deletions sound/core/memalloc.c
Original file line number Diff line number Diff line change
Expand Up @@ -183,8 +183,11 @@ EXPORT_SYMBOL_GPL(snd_devm_alloc_dir_pages);
int snd_dma_buffer_mmap(struct snd_dma_buffer *dmab,
struct vm_area_struct *area)
{
const struct snd_malloc_ops *ops = snd_dma_get_ops(dmab);
const struct snd_malloc_ops *ops;

if (!dmab)
return -ENOENT;
ops = snd_dma_get_ops(dmab);
if (ops && ops->mmap)
return ops->mmap(dmab, area);
else
Expand Down Expand Up @@ -549,60 +552,73 @@ static void snd_dma_noncontig_sync(struct snd_dma_buffer *dmab,
}
}

static const struct snd_malloc_ops snd_dma_noncontig_ops = {
.alloc = snd_dma_noncontig_alloc,
.free = snd_dma_noncontig_free,
.mmap = snd_dma_noncontig_mmap,
.sync = snd_dma_noncontig_sync,
/* re-use vmalloc helpers for get_* ops */
.get_addr = snd_dma_vmalloc_get_addr,
.get_page = snd_dma_vmalloc_get_page,
.get_chunk_size = snd_dma_vmalloc_get_chunk_size,
};
static inline void snd_dma_noncontig_iter_set(struct snd_dma_buffer *dmab,
struct sg_page_iter *piter,
size_t offset)
{
struct sg_table *sgt = dmab->private_data;

/* x86-specific SG-buffer with WC pages */
#ifdef CONFIG_SND_DMA_SGBUF
#define vmalloc_to_virt(v) (unsigned long)page_to_virt(vmalloc_to_page(v))
__sg_page_iter_start(piter, sgt->sgl, sgt->orig_nents,
offset >> PAGE_SHIFT);
}

static void *snd_dma_sg_wc_alloc(struct snd_dma_buffer *dmab, size_t size)
static dma_addr_t snd_dma_noncontig_get_addr(struct snd_dma_buffer *dmab,
size_t offset)
{
void *p = snd_dma_noncontig_alloc(dmab, size);
size_t ofs;
struct sg_dma_page_iter iter;

if (!p)
return NULL;
for (ofs = 0; ofs < size; ofs += PAGE_SIZE)
set_memory_uc(vmalloc_to_virt(p + ofs), 1);
return p;
snd_dma_noncontig_iter_set(dmab, &iter.base, offset);
__sg_page_iter_dma_next(&iter);
return sg_page_iter_dma_address(&iter) + offset % PAGE_SIZE;
}

static void snd_dma_sg_wc_free(struct snd_dma_buffer *dmab)
static struct page *snd_dma_noncontig_get_page(struct snd_dma_buffer *dmab,
size_t offset)
{
size_t ofs;
struct sg_page_iter iter;

for (ofs = 0; ofs < dmab->bytes; ofs += PAGE_SIZE)
set_memory_wb(vmalloc_to_virt(dmab->area + ofs), 1);
snd_dma_noncontig_free(dmab);
snd_dma_noncontig_iter_set(dmab, &iter, offset);
__sg_page_iter_next(&iter);
return sg_page_iter_page(&iter);
}

static int snd_dma_sg_wc_mmap(struct snd_dma_buffer *dmab,
struct vm_area_struct *area)
static unsigned int
snd_dma_noncontig_get_chunk_size(struct snd_dma_buffer *dmab,
unsigned int ofs, unsigned int size)
{
area->vm_page_prot = pgprot_writecombine(area->vm_page_prot);
/* FIXME: dma_mmap_noncontiguous() works? */
return -ENOENT; /* continue with the default mmap handler */
struct sg_dma_page_iter iter;
unsigned int start, end;
unsigned long addr;

start = ALIGN_DOWN(ofs, PAGE_SIZE);
end = ofs + size - 1; /* the last byte address */
snd_dma_noncontig_iter_set(dmab, &iter.base, start);
if (!__sg_page_iter_dma_next(&iter))
return 0;
/* check page continuity */
addr = sg_page_iter_dma_address(&iter);
for (;;) {
start += PAGE_SIZE;
if (start > end)
break;
addr += PAGE_SIZE;
if (!__sg_page_iter_dma_next(&iter) ||
sg_page_iter_dma_address(&iter) != addr)
return start - ofs;
}
/* ok, all on continuous pages */
return size;
}

const struct snd_malloc_ops snd_dma_sg_wc_ops = {
.alloc = snd_dma_sg_wc_alloc,
.free = snd_dma_sg_wc_free,
.mmap = snd_dma_sg_wc_mmap,
static const struct snd_malloc_ops snd_dma_noncontig_ops = {
.alloc = snd_dma_noncontig_alloc,
.free = snd_dma_noncontig_free,
.mmap = snd_dma_noncontig_mmap,
.sync = snd_dma_noncontig_sync,
.get_addr = snd_dma_vmalloc_get_addr,
.get_page = snd_dma_vmalloc_get_page,
.get_chunk_size = snd_dma_vmalloc_get_chunk_size,
.get_addr = snd_dma_noncontig_get_addr,
.get_page = snd_dma_noncontig_get_page,
.get_chunk_size = snd_dma_noncontig_get_chunk_size,
};
#endif /* CONFIG_SND_DMA_SGBUF */

/*
* Non-coherent pages allocator
Expand Down Expand Up @@ -663,17 +679,20 @@ static const struct snd_malloc_ops *dma_ops[] = {
[SNDRV_DMA_TYPE_DEV_WC] = &snd_dma_wc_ops,
[SNDRV_DMA_TYPE_NONCONTIG] = &snd_dma_noncontig_ops,
[SNDRV_DMA_TYPE_NONCOHERENT] = &snd_dma_noncoherent_ops,
#ifdef CONFIG_SND_DMA_SGBUF
[SNDRV_DMA_TYPE_DEV_WC_SG] = &snd_dma_sg_wc_ops,
#endif
#ifdef CONFIG_GENERIC_ALLOCATOR
[SNDRV_DMA_TYPE_DEV_IRAM] = &snd_dma_iram_ops,
#endif /* CONFIG_GENERIC_ALLOCATOR */
#endif /* CONFIG_HAS_DMA */
#ifdef CONFIG_SND_DMA_SGBUF
[SNDRV_DMA_TYPE_DEV_SG] = &snd_dma_sg_ops,
[SNDRV_DMA_TYPE_DEV_WC_SG] = &snd_dma_sg_ops,
#endif
};

static const struct snd_malloc_ops *snd_dma_get_ops(struct snd_dma_buffer *dmab)
{
if (WARN_ON_ONCE(!dmab))
return NULL;
if (WARN_ON_ONCE(dmab->dev.type <= SNDRV_DMA_TYPE_UNKNOWN ||
dmab->dev.type >= ARRAY_SIZE(dma_ops)))
return NULL;
Expand Down
201 changes: 201 additions & 0 deletions sound/core/sgbuf.c
Original file line number Diff line number Diff line change
@@ -0,0 +1,201 @@
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Scatter-Gather buffer
*
* Copyright (c) by Takashi Iwai <[email protected]>
*/

#include <linux/slab.h>
#include <linux/mm.h>
#include <linux/vmalloc.h>
#include <linux/export.h>
#include <sound/memalloc.h>
#include "memalloc_local.h"

struct snd_sg_page {
void *buf;
dma_addr_t addr;
};

struct snd_sg_buf {
int size; /* allocated byte size */
int pages; /* allocated pages */
int tblsize; /* allocated table size */
struct snd_sg_page *table; /* address table */
struct page **page_table; /* page table (for vmap/vunmap) */
struct device *dev;
};

/* table entries are align to 32 */
#define SGBUF_TBL_ALIGN 32
#define sgbuf_align_table(tbl) ALIGN((tbl), SGBUF_TBL_ALIGN)

static void snd_dma_sg_free(struct snd_dma_buffer *dmab)
{
struct snd_sg_buf *sgbuf = dmab->private_data;
struct snd_dma_buffer tmpb;
int i;

if (!sgbuf)
return;

vunmap(dmab->area);
dmab->area = NULL;

tmpb.dev.type = SNDRV_DMA_TYPE_DEV;
if (dmab->dev.type == SNDRV_DMA_TYPE_DEV_WC_SG)
tmpb.dev.type = SNDRV_DMA_TYPE_DEV_WC;
tmpb.dev.dev = sgbuf->dev;
for (i = 0; i < sgbuf->pages; i++) {
if (!(sgbuf->table[i].addr & ~PAGE_MASK))
continue; /* continuous pages */
tmpb.area = sgbuf->table[i].buf;
tmpb.addr = sgbuf->table[i].addr & PAGE_MASK;
tmpb.bytes = (sgbuf->table[i].addr & ~PAGE_MASK) << PAGE_SHIFT;
snd_dma_free_pages(&tmpb);
}

kfree(sgbuf->table);
kfree(sgbuf->page_table);
kfree(sgbuf);
dmab->private_data = NULL;
}

#define MAX_ALLOC_PAGES 32

static void *snd_dma_sg_alloc(struct snd_dma_buffer *dmab, size_t size)
{
struct snd_sg_buf *sgbuf;
unsigned int i, pages, chunk, maxpages;
struct snd_dma_buffer tmpb;
struct snd_sg_page *table;
struct page **pgtable;
int type = SNDRV_DMA_TYPE_DEV;
pgprot_t prot = PAGE_KERNEL;
void *area;

dmab->private_data = sgbuf = kzalloc(sizeof(*sgbuf), GFP_KERNEL);
if (!sgbuf)
return NULL;
if (dmab->dev.type == SNDRV_DMA_TYPE_DEV_WC_SG) {
type = SNDRV_DMA_TYPE_DEV_WC;
#ifdef pgprot_noncached
prot = pgprot_noncached(PAGE_KERNEL);
#endif
}
sgbuf->dev = dmab->dev.dev;
pages = snd_sgbuf_aligned_pages(size);
sgbuf->tblsize = sgbuf_align_table(pages);
table = kcalloc(sgbuf->tblsize, sizeof(*table), GFP_KERNEL);
if (!table)
goto _failed;
sgbuf->table = table;
pgtable = kcalloc(sgbuf->tblsize, sizeof(*pgtable), GFP_KERNEL);
if (!pgtable)
goto _failed;
sgbuf->page_table = pgtable;

/* allocate pages */
maxpages = MAX_ALLOC_PAGES;
while (pages > 0) {
chunk = pages;
/* don't be too eager to take a huge chunk */
if (chunk > maxpages)
chunk = maxpages;
chunk <<= PAGE_SHIFT;
if (snd_dma_alloc_pages_fallback(type, dmab->dev.dev,
chunk, &tmpb) < 0) {
if (!sgbuf->pages)
goto _failed;
size = sgbuf->pages * PAGE_SIZE;
break;
}
chunk = tmpb.bytes >> PAGE_SHIFT;
for (i = 0; i < chunk; i++) {
table->buf = tmpb.area;
table->addr = tmpb.addr;
if (!i)
table->addr |= chunk; /* mark head */
table++;
*pgtable++ = virt_to_page(tmpb.area);
tmpb.area += PAGE_SIZE;
tmpb.addr += PAGE_SIZE;
}
sgbuf->pages += chunk;
pages -= chunk;
if (chunk < maxpages)
maxpages = chunk;
}

sgbuf->size = size;
area = vmap(sgbuf->page_table, sgbuf->pages, VM_MAP, prot);
if (!area)
goto _failed;
return area;

_failed:
snd_dma_sg_free(dmab); /* free the table */
return NULL;
}

static dma_addr_t snd_dma_sg_get_addr(struct snd_dma_buffer *dmab,
size_t offset)
{
struct snd_sg_buf *sgbuf = dmab->private_data;
dma_addr_t addr;

addr = sgbuf->table[offset >> PAGE_SHIFT].addr;
addr &= ~((dma_addr_t)PAGE_SIZE - 1);
return addr + offset % PAGE_SIZE;
}

static struct page *snd_dma_sg_get_page(struct snd_dma_buffer *dmab,
size_t offset)
{
struct snd_sg_buf *sgbuf = dmab->private_data;
unsigned int idx = offset >> PAGE_SHIFT;

if (idx >= (unsigned int)sgbuf->pages)
return NULL;
return sgbuf->page_table[idx];
}

static unsigned int snd_dma_sg_get_chunk_size(struct snd_dma_buffer *dmab,
unsigned int ofs,
unsigned int size)
{
struct snd_sg_buf *sg = dmab->private_data;
unsigned int start, end, pg;

start = ofs >> PAGE_SHIFT;
end = (ofs + size - 1) >> PAGE_SHIFT;
/* check page continuity */
pg = sg->table[start].addr >> PAGE_SHIFT;
for (;;) {
start++;
if (start > end)
break;
pg++;
if ((sg->table[start].addr >> PAGE_SHIFT) != pg)
return (start << PAGE_SHIFT) - ofs;
}
/* ok, all on continuous pages */
return size;
}

static int snd_dma_sg_mmap(struct snd_dma_buffer *dmab,
struct vm_area_struct *area)
{
if (dmab->dev.type == SNDRV_DMA_TYPE_DEV_WC_SG)
area->vm_page_prot = pgprot_writecombine(area->vm_page_prot);
return -ENOENT; /* continue with the default mmap handler */
}

const struct snd_malloc_ops snd_dma_sg_ops = {
.alloc = snd_dma_sg_alloc,
.free = snd_dma_sg_free,
.get_addr = snd_dma_sg_get_addr,
.get_page = snd_dma_sg_get_page,
.get_chunk_size = snd_dma_sg_get_chunk_size,
.mmap = snd_dma_sg_mmap,
};
Loading

0 comments on commit 0d5d746

Please sign in to comment.