Skip to content

Commit

Permalink
virtiofs: set up virtio_fs dax_device
Browse files Browse the repository at this point in the history
Setup a dax device.

Use the shm capability to find the cache entry and map it.

The DAX window is accessed by the fs/dax.c infrastructure and must have
struct pages (at least on x86).  Use devm_memremap_pages() to map the
DAX window PCI BAR and allocate struct page.

Signed-off-by: Stefan Hajnoczi <[email protected]>
Signed-off-by: Dr. David Alan Gilbert <[email protected]>
Signed-off-by: Vivek Goyal <[email protected]>
Signed-off-by: Sebastien Boeuf <[email protected]>
Signed-off-by: Liu Bo <[email protected]>
Signed-off-by: Miklos Szeredi <[email protected]>
  • Loading branch information
stefanhaRH authored and Miklos Szeredi committed Sep 10, 2020
1 parent f4fd4ae commit 22f3787
Show file tree
Hide file tree
Showing 2 changed files with 141 additions and 0 deletions.
138 changes: 138 additions & 0 deletions fs/fuse/virtio_fs.c
Original file line number Diff line number Diff line change
Expand Up @@ -5,12 +5,16 @@
*/

#include <linux/fs.h>
#include <linux/dax.h>
#include <linux/pci.h>
#include <linux/pfn_t.h>
#include <linux/module.h>
#include <linux/virtio.h>
#include <linux/virtio_fs.h>
#include <linux/delay.h>
#include <linux/fs_context.h>
#include <linux/highmem.h>
#include <linux/uio.h>
#include "fuse_i.h"

/* List of virtio-fs device instances and a lock for the list. Also provides
Expand Down Expand Up @@ -49,6 +53,12 @@ struct virtio_fs {
struct virtio_fs_vq *vqs;
unsigned int nvqs; /* number of virtqueues */
unsigned int num_request_queues; /* number of request queues */
struct dax_device *dax_dev;

/* DAX memory window where file contents are mapped */
void *window_kaddr;
phys_addr_t window_phys_addr;
size_t window_len;
};

struct virtio_fs_forget_req {
Expand Down Expand Up @@ -686,6 +696,130 @@ static void virtio_fs_cleanup_vqs(struct virtio_device *vdev,
vdev->config->del_vqs(vdev);
}

/* Map a window offset to a page frame number. The window offset will have
* been produced by .iomap_begin(), which maps a file offset to a window
* offset.
*/
static long virtio_fs_direct_access(struct dax_device *dax_dev, pgoff_t pgoff,
long nr_pages, void **kaddr, pfn_t *pfn)
{
struct virtio_fs *fs = dax_get_private(dax_dev);
phys_addr_t offset = PFN_PHYS(pgoff);
size_t max_nr_pages = fs->window_len/PAGE_SIZE - pgoff;

if (kaddr)
*kaddr = fs->window_kaddr + offset;
if (pfn)
*pfn = phys_to_pfn_t(fs->window_phys_addr + offset,
PFN_DEV | PFN_MAP);
return nr_pages > max_nr_pages ? max_nr_pages : nr_pages;
}

static size_t virtio_fs_copy_from_iter(struct dax_device *dax_dev,
pgoff_t pgoff, void *addr,
size_t bytes, struct iov_iter *i)
{
return copy_from_iter(addr, bytes, i);
}

static size_t virtio_fs_copy_to_iter(struct dax_device *dax_dev,
pgoff_t pgoff, void *addr,
size_t bytes, struct iov_iter *i)
{
return copy_to_iter(addr, bytes, i);
}

static int virtio_fs_zero_page_range(struct dax_device *dax_dev,
pgoff_t pgoff, size_t nr_pages)
{
long rc;
void *kaddr;

rc = dax_direct_access(dax_dev, pgoff, nr_pages, &kaddr, NULL);
if (rc < 0)
return rc;
memset(kaddr, 0, nr_pages << PAGE_SHIFT);
dax_flush(dax_dev, kaddr, nr_pages << PAGE_SHIFT);
return 0;
}

static const struct dax_operations virtio_fs_dax_ops = {
.direct_access = virtio_fs_direct_access,
.copy_from_iter = virtio_fs_copy_from_iter,
.copy_to_iter = virtio_fs_copy_to_iter,
.zero_page_range = virtio_fs_zero_page_range,
};

static void virtio_fs_cleanup_dax(void *data)
{
struct dax_device *dax_dev = data;

kill_dax(dax_dev);
put_dax(dax_dev);
}

static int virtio_fs_setup_dax(struct virtio_device *vdev, struct virtio_fs *fs)
{
struct virtio_shm_region cache_reg;
struct dev_pagemap *pgmap;
bool have_cache;

if (!IS_ENABLED(CONFIG_FUSE_DAX))
return 0;

/* Get cache region */
have_cache = virtio_get_shm_region(vdev, &cache_reg,
(u8)VIRTIO_FS_SHMCAP_ID_CACHE);
if (!have_cache) {
dev_notice(&vdev->dev, "%s: No cache capability\n", __func__);
return 0;
}

if (!devm_request_mem_region(&vdev->dev, cache_reg.addr, cache_reg.len,
dev_name(&vdev->dev))) {
dev_warn(&vdev->dev, "could not reserve region addr=0x%llx len=0x%llx\n",
cache_reg.addr, cache_reg.len);
return -EBUSY;
}

dev_notice(&vdev->dev, "Cache len: 0x%llx @ 0x%llx\n", cache_reg.len,
cache_reg.addr);

pgmap = devm_kzalloc(&vdev->dev, sizeof(*pgmap), GFP_KERNEL);
if (!pgmap)
return -ENOMEM;

pgmap->type = MEMORY_DEVICE_FS_DAX;

/* Ideally we would directly use the PCI BAR resource but
* devm_memremap_pages() wants its own copy in pgmap. So
* initialize a struct resource from scratch (only the start
* and end fields will be used).
*/
pgmap->res = (struct resource){
.name = "virtio-fs dax window",
.start = (phys_addr_t) cache_reg.addr,
.end = (phys_addr_t) cache_reg.addr + cache_reg.len - 1,
};

fs->window_kaddr = devm_memremap_pages(&vdev->dev, pgmap);
if (IS_ERR(fs->window_kaddr))
return PTR_ERR(fs->window_kaddr);

fs->window_phys_addr = (phys_addr_t) cache_reg.addr;
fs->window_len = (phys_addr_t) cache_reg.len;

dev_dbg(&vdev->dev, "%s: window kaddr 0x%px phys_addr 0x%llx len 0x%llx\n",
__func__, fs->window_kaddr, cache_reg.addr, cache_reg.len);

fs->dax_dev = alloc_dax(fs, NULL, &virtio_fs_dax_ops, 0);
if (IS_ERR(fs->dax_dev))
return PTR_ERR(fs->dax_dev);

return devm_add_action_or_reset(&vdev->dev, virtio_fs_cleanup_dax,
fs->dax_dev);
}

static int virtio_fs_probe(struct virtio_device *vdev)
{
struct virtio_fs *fs;
Expand All @@ -707,6 +841,10 @@ static int virtio_fs_probe(struct virtio_device *vdev)

/* TODO vq affinity */

ret = virtio_fs_setup_dax(vdev, fs);
if (ret < 0)
goto out_vqs;

/* Bring the device online in case the filesystem is mounted and
* requests need to be sent before we return.
*/
Expand Down
3 changes: 3 additions & 0 deletions include/uapi/linux/virtio_fs.h
Original file line number Diff line number Diff line change
Expand Up @@ -16,4 +16,7 @@ struct virtio_fs_config {
__le32 num_request_queues;
} __attribute__((packed));

/* For the id field in virtio_pci_shm_cap */
#define VIRTIO_FS_SHMCAP_ID_CACHE 0

#endif /* _UAPI_LINUX_VIRTIO_FS_H */

0 comments on commit 22f3787

Please sign in to comment.