forked from torvalds/linux
-
Notifications
You must be signed in to change notification settings - Fork 0
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
xen/grant-dma-ops: Add option to restrict memory access under Xen
Introduce Xen grant DMA-mapping layer which contains special DMA-mapping routines for providing grant references as DMA addresses to be used by frontends (e.g. virtio) in Xen guests. Add the needed functionality by providing a special set of DMA ops handling the needed grant operations for the I/O pages. The subsequent commit will introduce the use case for xen-grant DMA ops layer to enable using virtio devices in Xen guests in a safe manner. Signed-off-by: Juergen Gross <[email protected]> Signed-off-by: Oleksandr Tyshchenko <[email protected]> Reviewed-by: Stefano Stabellini <[email protected]> Link: https://lore.kernel.org/r/[email protected] Signed-off-by: Juergen Gross <[email protected]>
- Loading branch information
Showing
4 changed files
with
325 additions
and
0 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,312 @@ | ||
// SPDX-License-Identifier: GPL-2.0-only | ||
/* | ||
* Xen grant DMA-mapping layer - contains special DMA-mapping routines | ||
* for providing grant references as DMA addresses to be used by frontends | ||
* (e.g. virtio) in Xen guests | ||
* | ||
* Copyright (c) 2021, Juergen Gross <[email protected]> | ||
*/ | ||
|
||
#include <linux/module.h> | ||
#include <linux/dma-map-ops.h> | ||
#include <linux/of.h> | ||
#include <linux/pfn.h> | ||
#include <linux/xarray.h> | ||
#include <xen/xen.h> | ||
#include <xen/xen-ops.h> | ||
#include <xen/grant_table.h> | ||
|
||
struct xen_grant_dma_data { | ||
/* The ID of backend domain */ | ||
domid_t backend_domid; | ||
/* Is device behaving sane? */ | ||
bool broken; | ||
}; | ||
|
||
static DEFINE_XARRAY(xen_grant_dma_devices); | ||
|
||
#define XEN_GRANT_DMA_ADDR_OFF (1ULL << 63) | ||
|
||
static inline dma_addr_t grant_to_dma(grant_ref_t grant) | ||
{ | ||
return XEN_GRANT_DMA_ADDR_OFF | ((dma_addr_t)grant << PAGE_SHIFT); | ||
} | ||
|
||
static inline grant_ref_t dma_to_grant(dma_addr_t dma) | ||
{ | ||
return (grant_ref_t)((dma & ~XEN_GRANT_DMA_ADDR_OFF) >> PAGE_SHIFT); | ||
} | ||
|
||
static struct xen_grant_dma_data *find_xen_grant_dma_data(struct device *dev) | ||
{ | ||
struct xen_grant_dma_data *data; | ||
|
||
xa_lock(&xen_grant_dma_devices); | ||
data = xa_load(&xen_grant_dma_devices, (unsigned long)dev); | ||
xa_unlock(&xen_grant_dma_devices); | ||
|
||
return data; | ||
} | ||
|
||
/* | ||
* DMA ops for Xen frontends (e.g. virtio). | ||
* | ||
* Used to act as a kind of software IOMMU for Xen guests by using grants as | ||
* DMA addresses. | ||
* Such a DMA address is formed by using the grant reference as a frame | ||
* number and setting the highest address bit (this bit is for the backend | ||
* to be able to distinguish it from e.g. a mmio address). | ||
* | ||
* Note that for now we hard wire dom0 to be the backend domain. In order | ||
* to support any domain as backend we'd need to add a way to communicate | ||
* the domid of this backend, e.g. via Xenstore, via the PCI-device's | ||
* config space or DT/ACPI. | ||
*/ | ||
static void *xen_grant_dma_alloc(struct device *dev, size_t size, | ||
dma_addr_t *dma_handle, gfp_t gfp, | ||
unsigned long attrs) | ||
{ | ||
struct xen_grant_dma_data *data; | ||
unsigned int i, n_pages = PFN_UP(size); | ||
unsigned long pfn; | ||
grant_ref_t grant; | ||
void *ret; | ||
|
||
data = find_xen_grant_dma_data(dev); | ||
if (!data) | ||
return NULL; | ||
|
||
if (unlikely(data->broken)) | ||
return NULL; | ||
|
||
ret = alloc_pages_exact(n_pages * PAGE_SIZE, gfp); | ||
if (!ret) | ||
return NULL; | ||
|
||
pfn = virt_to_pfn(ret); | ||
|
||
if (gnttab_alloc_grant_reference_seq(n_pages, &grant)) { | ||
free_pages_exact(ret, n_pages * PAGE_SIZE); | ||
return NULL; | ||
} | ||
|
||
for (i = 0; i < n_pages; i++) { | ||
gnttab_grant_foreign_access_ref(grant + i, data->backend_domid, | ||
pfn_to_gfn(pfn + i), 0); | ||
} | ||
|
||
*dma_handle = grant_to_dma(grant); | ||
|
||
return ret; | ||
} | ||
|
||
static void xen_grant_dma_free(struct device *dev, size_t size, void *vaddr, | ||
dma_addr_t dma_handle, unsigned long attrs) | ||
{ | ||
struct xen_grant_dma_data *data; | ||
unsigned int i, n_pages = PFN_UP(size); | ||
grant_ref_t grant; | ||
|
||
data = find_xen_grant_dma_data(dev); | ||
if (!data) | ||
return; | ||
|
||
if (unlikely(data->broken)) | ||
return; | ||
|
||
grant = dma_to_grant(dma_handle); | ||
|
||
for (i = 0; i < n_pages; i++) { | ||
if (unlikely(!gnttab_end_foreign_access_ref(grant + i))) { | ||
dev_alert(dev, "Grant still in use by backend domain, disabled for further use\n"); | ||
data->broken = true; | ||
return; | ||
} | ||
} | ||
|
||
gnttab_free_grant_reference_seq(grant, n_pages); | ||
|
||
free_pages_exact(vaddr, n_pages * PAGE_SIZE); | ||
} | ||
|
||
static struct page *xen_grant_dma_alloc_pages(struct device *dev, size_t size, | ||
dma_addr_t *dma_handle, | ||
enum dma_data_direction dir, | ||
gfp_t gfp) | ||
{ | ||
void *vaddr; | ||
|
||
vaddr = xen_grant_dma_alloc(dev, size, dma_handle, gfp, 0); | ||
if (!vaddr) | ||
return NULL; | ||
|
||
return virt_to_page(vaddr); | ||
} | ||
|
||
static void xen_grant_dma_free_pages(struct device *dev, size_t size, | ||
struct page *vaddr, dma_addr_t dma_handle, | ||
enum dma_data_direction dir) | ||
{ | ||
xen_grant_dma_free(dev, size, page_to_virt(vaddr), dma_handle, 0); | ||
} | ||
|
||
static dma_addr_t xen_grant_dma_map_page(struct device *dev, struct page *page, | ||
unsigned long offset, size_t size, | ||
enum dma_data_direction dir, | ||
unsigned long attrs) | ||
{ | ||
struct xen_grant_dma_data *data; | ||
unsigned int i, n_pages = PFN_UP(size); | ||
grant_ref_t grant; | ||
dma_addr_t dma_handle; | ||
|
||
if (WARN_ON(dir == DMA_NONE)) | ||
return DMA_MAPPING_ERROR; | ||
|
||
data = find_xen_grant_dma_data(dev); | ||
if (!data) | ||
return DMA_MAPPING_ERROR; | ||
|
||
if (unlikely(data->broken)) | ||
return DMA_MAPPING_ERROR; | ||
|
||
if (gnttab_alloc_grant_reference_seq(n_pages, &grant)) | ||
return DMA_MAPPING_ERROR; | ||
|
||
for (i = 0; i < n_pages; i++) { | ||
gnttab_grant_foreign_access_ref(grant + i, data->backend_domid, | ||
xen_page_to_gfn(page) + i, dir == DMA_TO_DEVICE); | ||
} | ||
|
||
dma_handle = grant_to_dma(grant) + offset; | ||
|
||
return dma_handle; | ||
} | ||
|
||
static void xen_grant_dma_unmap_page(struct device *dev, dma_addr_t dma_handle, | ||
size_t size, enum dma_data_direction dir, | ||
unsigned long attrs) | ||
{ | ||
struct xen_grant_dma_data *data; | ||
unsigned int i, n_pages = PFN_UP(size); | ||
grant_ref_t grant; | ||
|
||
if (WARN_ON(dir == DMA_NONE)) | ||
return; | ||
|
||
data = find_xen_grant_dma_data(dev); | ||
if (!data) | ||
return; | ||
|
||
if (unlikely(data->broken)) | ||
return; | ||
|
||
grant = dma_to_grant(dma_handle); | ||
|
||
for (i = 0; i < n_pages; i++) { | ||
if (unlikely(!gnttab_end_foreign_access_ref(grant + i))) { | ||
dev_alert(dev, "Grant still in use by backend domain, disabled for further use\n"); | ||
data->broken = true; | ||
return; | ||
} | ||
} | ||
|
||
gnttab_free_grant_reference_seq(grant, n_pages); | ||
} | ||
|
||
static void xen_grant_dma_unmap_sg(struct device *dev, struct scatterlist *sg, | ||
int nents, enum dma_data_direction dir, | ||
unsigned long attrs) | ||
{ | ||
struct scatterlist *s; | ||
unsigned int i; | ||
|
||
if (WARN_ON(dir == DMA_NONE)) | ||
return; | ||
|
||
for_each_sg(sg, s, nents, i) | ||
xen_grant_dma_unmap_page(dev, s->dma_address, sg_dma_len(s), dir, | ||
attrs); | ||
} | ||
|
||
static int xen_grant_dma_map_sg(struct device *dev, struct scatterlist *sg, | ||
int nents, enum dma_data_direction dir, | ||
unsigned long attrs) | ||
{ | ||
struct scatterlist *s; | ||
unsigned int i; | ||
|
||
if (WARN_ON(dir == DMA_NONE)) | ||
return -EINVAL; | ||
|
||
for_each_sg(sg, s, nents, i) { | ||
s->dma_address = xen_grant_dma_map_page(dev, sg_page(s), s->offset, | ||
s->length, dir, attrs); | ||
if (s->dma_address == DMA_MAPPING_ERROR) | ||
goto out; | ||
|
||
sg_dma_len(s) = s->length; | ||
} | ||
|
||
return nents; | ||
|
||
out: | ||
xen_grant_dma_unmap_sg(dev, sg, i, dir, attrs | DMA_ATTR_SKIP_CPU_SYNC); | ||
sg_dma_len(sg) = 0; | ||
|
||
return -EIO; | ||
} | ||
|
||
static int xen_grant_dma_supported(struct device *dev, u64 mask) | ||
{ | ||
return mask == DMA_BIT_MASK(64); | ||
} | ||
|
||
static const struct dma_map_ops xen_grant_dma_ops = { | ||
.alloc = xen_grant_dma_alloc, | ||
.free = xen_grant_dma_free, | ||
.alloc_pages = xen_grant_dma_alloc_pages, | ||
.free_pages = xen_grant_dma_free_pages, | ||
.mmap = dma_common_mmap, | ||
.get_sgtable = dma_common_get_sgtable, | ||
.map_page = xen_grant_dma_map_page, | ||
.unmap_page = xen_grant_dma_unmap_page, | ||
.map_sg = xen_grant_dma_map_sg, | ||
.unmap_sg = xen_grant_dma_unmap_sg, | ||
.dma_supported = xen_grant_dma_supported, | ||
}; | ||
|
||
void xen_grant_setup_dma_ops(struct device *dev) | ||
{ | ||
struct xen_grant_dma_data *data; | ||
|
||
data = find_xen_grant_dma_data(dev); | ||
if (data) { | ||
dev_err(dev, "Xen grant DMA data is already created\n"); | ||
return; | ||
} | ||
|
||
data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL); | ||
if (!data) | ||
goto err; | ||
|
||
/* XXX The dom0 is hardcoded as the backend domain for now */ | ||
data->backend_domid = 0; | ||
|
||
if (xa_err(xa_store(&xen_grant_dma_devices, (unsigned long)dev, data, | ||
GFP_KERNEL))) { | ||
dev_err(dev, "Cannot store Xen grant DMA data\n"); | ||
goto err; | ||
} | ||
|
||
dev->dma_ops = &xen_grant_dma_ops; | ||
|
||
return; | ||
|
||
err: | ||
dev_err(dev, "Cannot set up Xen grant DMA ops, retain platform DMA ops\n"); | ||
} | ||
|
||
MODULE_DESCRIPTION("Xen grant DMA-mapping layer"); | ||
MODULE_AUTHOR("Juergen Gross <[email protected]>"); | ||
MODULE_LICENSE("GPL"); |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters