Skip to content

Commit

Permalink
Merge tag 'misc-habanalabs-next-2020-09-22' of git://people.freedeskt…
Browse files Browse the repository at this point in the history
…op.org/~gabbayo/linux into char-misc-next

Oded writes:

This tag contains the following changes for kernel 5.10-rc1:

- Stop using the DRM's dma-fence module and instead use kernel completions.
- Support PCIe AER
- Use dma_mmap_coherent for memory allocated using dma_alloc_coherent
- Use smallest possible alignment when allocating virtual addresses in our
  MMU driver.
- Refactor MMU driver code to be device-oriented
- Allow user to check CS status without any sleep
- Add an option to map a Command Buffer to the Device's MMU
- Expose sync manager resource allocation to user through INFO IOCTL
- Convert code to use standard BIT(), GENMASK() and FIELD_PREP()
- Many small fixes (casting, better error messages, remove unused
  defines, h/w configuration fixes, etc.)

* tag 'misc-habanalabs-next-2020-09-22' of git://people.freedesktop.org/~gabbayo/linux: (46 commits)
  habanalabs: update scratchpad register map
  habanalabs: add indication of security-enabled F/W
  habanalabs/gaudi: fix DMA completions max outstanding to 15
  habanalabs/gaudi: remove axi drain support
  habanalabs: update firmware interface file
  habanalabs: Add an option to map CB to device MMU
  habanalabs: Save context in a command buffer object
  habanalabs: no need for DMA_SHARED_BUFFER
  habanalabs: allow to wait on CS without sleep
  habanalabs/gaudi: increase timeout for boot fit load
  habanalabs: add debugfs support for MMU with 6 HOPs
  habanalabs: add num_hops to hl_mmu_properties
  habanalabs: refactor MMU as device-oriented
  habanalabs: rename mmu.c to mmu_v1.c
  habanalabs: use smallest possible alignment for virtual addresses
  habanalabs: check flag before reset because of f/w event
  habanalabs: increase PQ COMP_OFFSET by one nibble
  habanalabs: Fix alignment issue in cpucp_info structure
  habanalabs: remove unused define
  habanalabs: remove unused ASIC function pointer
  ...
  • Loading branch information
gregkh committed Sep 22, 2020
2 parents e82ed73 + f279e5c commit 9e07279
Show file tree
Hide file tree
Showing 33 changed files with 8,662 additions and 7,704 deletions.
18 changes: 17 additions & 1 deletion Documentation/ABI/testing/sysfs-driver-habanalabs
Original file line number Diff line number Diff line change
Expand Up @@ -2,13 +2,17 @@ What: /sys/class/habanalabs/hl<n>/armcp_kernel_ver
Date: Jan 2019
KernelVersion: 5.1
Contact: [email protected]
Description: Version of the Linux kernel running on the device's CPU
Description: Version of the Linux kernel running on the device's CPU.
Will be DEPRECATED in Linux kernel version 5.10, and be
replaced with cpucp_kernel_ver

What: /sys/class/habanalabs/hl<n>/armcp_ver
Date: Jan 2019
KernelVersion: 5.1
Contact: [email protected]
Description: Version of the application running on the device's CPU
Will be DEPRECATED in Linux kernel version 5.10, and be
replaced with cpucp_ver

What: /sys/class/habanalabs/hl<n>/clk_max_freq_mhz
Date: Jun 2019
Expand All @@ -33,6 +37,18 @@ KernelVersion: 5.1
Contact: [email protected]
Description: Version of the Device's CPLD F/W

What: /sys/class/habanalabs/hl<n>/cpucp_kernel_ver
Date: Oct 2020
KernelVersion: 5.10
Contact: [email protected]
Description: Version of the Linux kernel running on the device's CPU

What: /sys/class/habanalabs/hl<n>/cpucp_ver
Date: Oct 2020
KernelVersion: 5.10
Contact: [email protected]
Description: Version of the application running on the device's CPU

What: /sys/class/habanalabs/hl<n>/device_type
Date: Jan 2019
KernelVersion: 5.1
Expand Down
1 change: 0 additions & 1 deletion drivers/misc/habanalabs/Kconfig
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,6 @@ config HABANA_AI
tristate "HabanaAI accelerators (habanalabs)"
depends on PCI && HAS_IOMEM
select FRAME_VECTOR
select DMA_SHARED_BUFFER
select GENERIC_ALLOCATOR
select HWMON
help
Expand Down
4 changes: 2 additions & 2 deletions drivers/misc/habanalabs/common/Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -3,5 +3,5 @@ HL_COMMON_FILES := common/habanalabs_drv.o common/device.o common/context.o \
common/asid.o common/habanalabs_ioctl.o \
common/command_buffer.o common/hw_queue.o common/irq.o \
common/sysfs.o common/hwmon.o common/memory.o \
common/command_submission.o common/mmu.o common/firmware_if.o \
common/pci.o
common/command_submission.o common/mmu.o common/mmu_v1.o \
common/firmware_if.o common/pci.o
229 changes: 210 additions & 19 deletions drivers/misc/habanalabs/common/command_buffer.c
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,131 @@
#include <linux/uaccess.h>
#include <linux/genalloc.h>

static int cb_map_mem(struct hl_ctx *ctx, struct hl_cb *cb)
{
struct hl_device *hdev = ctx->hdev;
struct asic_fixed_properties *prop = &hdev->asic_prop;
struct hl_vm_va_block *va_block, *tmp;
dma_addr_t bus_addr;
u64 virt_addr;
u32 page_size = prop->pmmu.page_size;
s32 offset;
int rc;

if (!hdev->supports_cb_mapping) {
dev_err_ratelimited(hdev->dev,
"Cannot map CB because no VA range is allocated for CB mapping\n");
return -EINVAL;
}

if (!hdev->mmu_enable) {
dev_err_ratelimited(hdev->dev,
"Cannot map CB because MMU is disabled\n");
return -EINVAL;
}

INIT_LIST_HEAD(&cb->va_block_list);

for (bus_addr = cb->bus_address;
bus_addr < cb->bus_address + cb->size;
bus_addr += page_size) {

virt_addr = (u64) gen_pool_alloc(ctx->cb_va_pool, page_size);
if (!virt_addr) {
dev_err(hdev->dev,
"Failed to allocate device virtual address for CB\n");
rc = -ENOMEM;
goto err_va_pool_free;
}

va_block = kzalloc(sizeof(*va_block), GFP_KERNEL);
if (!va_block) {
rc = -ENOMEM;
gen_pool_free(ctx->cb_va_pool, virt_addr, page_size);
goto err_va_pool_free;
}

va_block->start = virt_addr;
va_block->end = virt_addr + page_size;
va_block->size = page_size;
list_add_tail(&va_block->node, &cb->va_block_list);
}

mutex_lock(&ctx->mmu_lock);

bus_addr = cb->bus_address;
offset = 0;
list_for_each_entry(va_block, &cb->va_block_list, node) {
rc = hl_mmu_map(ctx, va_block->start, bus_addr, va_block->size,
list_is_last(&va_block->node,
&cb->va_block_list));
if (rc) {
dev_err(hdev->dev, "Failed to map VA %#llx to CB\n",
va_block->start);
goto err_va_umap;
}

bus_addr += va_block->size;
offset += va_block->size;
}

hdev->asic_funcs->mmu_invalidate_cache(hdev, false, VM_TYPE_USERPTR);

mutex_unlock(&ctx->mmu_lock);

cb->is_mmu_mapped = true;

return 0;

err_va_umap:
list_for_each_entry(va_block, &cb->va_block_list, node) {
if (offset <= 0)
break;
hl_mmu_unmap(ctx, va_block->start, va_block->size,
offset <= va_block->size);
offset -= va_block->size;
}

hdev->asic_funcs->mmu_invalidate_cache(hdev, true, VM_TYPE_USERPTR);

mutex_unlock(&ctx->mmu_lock);

err_va_pool_free:
list_for_each_entry_safe(va_block, tmp, &cb->va_block_list, node) {
gen_pool_free(ctx->cb_va_pool, va_block->start, va_block->size);
list_del(&va_block->node);
kfree(va_block);
}

return rc;
}

static void cb_unmap_mem(struct hl_ctx *ctx, struct hl_cb *cb)
{
struct hl_device *hdev = ctx->hdev;
struct hl_vm_va_block *va_block, *tmp;

mutex_lock(&ctx->mmu_lock);

list_for_each_entry(va_block, &cb->va_block_list, node)
if (hl_mmu_unmap(ctx, va_block->start, va_block->size,
list_is_last(&va_block->node,
&cb->va_block_list)))
dev_warn_ratelimited(hdev->dev,
"Failed to unmap CB's va 0x%llx\n",
va_block->start);

hdev->asic_funcs->mmu_invalidate_cache(hdev, true, VM_TYPE_USERPTR);

mutex_unlock(&ctx->mmu_lock);

list_for_each_entry_safe(va_block, tmp, &cb->va_block_list, node) {
gen_pool_free(ctx->cb_va_pool, va_block->start, va_block->size);
list_del(&va_block->node);
kfree(va_block);
}
}

static void cb_fini(struct hl_device *hdev, struct hl_cb *cb)
{
if (cb->is_internal)
Expand Down Expand Up @@ -47,6 +172,11 @@ static void cb_release(struct kref *ref)

hl_debugfs_remove_cb(cb);

if (cb->is_mmu_mapped)
cb_unmap_mem(cb->ctx, cb);

hl_ctx_put(cb->ctx);

cb_do_release(hdev, cb);
}

Expand Down Expand Up @@ -107,11 +237,12 @@ static struct hl_cb *hl_cb_alloc(struct hl_device *hdev, u32 cb_size,
}

int hl_cb_create(struct hl_device *hdev, struct hl_cb_mgr *mgr,
u32 cb_size, u64 *handle, int ctx_id, bool internal_cb)
struct hl_ctx *ctx, u32 cb_size, bool internal_cb,
bool map_cb, u64 *handle)
{
struct hl_cb *cb;
bool alloc_new_cb = true;
int rc;
int rc, ctx_id = ctx->asid;

/*
* Can't use generic function to check this because of special case
Expand Down Expand Up @@ -163,18 +294,32 @@ int hl_cb_create(struct hl_device *hdev, struct hl_cb_mgr *mgr,
}

cb->hdev = hdev;
cb->ctx_id = ctx_id;
cb->ctx = ctx;
hl_ctx_get(hdev, cb->ctx);

if (map_cb) {
if (ctx_id == HL_KERNEL_ASID_ID) {
dev_err(hdev->dev,
"CB mapping is not supported for kernel context\n");
rc = -EINVAL;
goto release_cb;
}

rc = cb_map_mem(ctx, cb);
if (rc)
goto release_cb;
}

spin_lock(&mgr->cb_lock);
rc = idr_alloc(&mgr->cb_handles, cb, 1, 0, GFP_ATOMIC);
spin_unlock(&mgr->cb_lock);

if (rc < 0) {
dev_err(hdev->dev, "Failed to allocate IDR for a new CB\n");
goto release_cb;
goto unmap_mem;
}

cb->id = rc;
cb->id = (u64) rc;

kref_init(&cb->refcount);
spin_lock_init(&cb->lock);
Expand All @@ -183,14 +328,18 @@ int hl_cb_create(struct hl_device *hdev, struct hl_cb_mgr *mgr,
* idr is 32-bit so we can safely OR it with a mask that is above
* 32 bit
*/
*handle = cb->id | HL_MMAP_CB_MASK;
*handle = cb->id | HL_MMAP_TYPE_CB;
*handle <<= PAGE_SHIFT;

hl_debugfs_add_cb(cb);

return 0;

unmap_mem:
if (cb->is_mmu_mapped)
cb_unmap_mem(cb->ctx, cb);
release_cb:
hl_ctx_put(cb->ctx);
cb_do_release(hdev, cb);
out_err:
*handle = 0;
Expand Down Expand Up @@ -250,9 +399,10 @@ int hl_cb_ioctl(struct hl_fpriv *hpriv, void *data)
args->in.cb_size, HL_MAX_CB_SIZE);
rc = -EINVAL;
} else {
rc = hl_cb_create(hdev, &hpriv->cb_mgr,
args->in.cb_size, &handle,
hpriv->ctx->asid, false);
rc = hl_cb_create(hdev, &hpriv->cb_mgr, hpriv->ctx,
args->in.cb_size, false,
!!(args->in.flags & HL_CB_FLAGS_MAP),
&handle);
}

memset(args, 0, sizeof(*args));
Expand Down Expand Up @@ -300,11 +450,14 @@ int hl_cb_mmap(struct hl_fpriv *hpriv, struct vm_area_struct *vma)
{
struct hl_device *hdev = hpriv->hdev;
struct hl_cb *cb;
phys_addr_t address;
u32 handle, user_cb_size;
int rc;

/* We use the page offset to hold the idr and thus we need to clear
* it before doing the mmap itself
*/
handle = vma->vm_pgoff;
vma->vm_pgoff = 0;

/* reference was taken here */
cb = hl_cb_get(hdev, &hpriv->cb_mgr, handle);
Expand Down Expand Up @@ -356,12 +509,8 @@ int hl_cb_mmap(struct hl_fpriv *hpriv, struct vm_area_struct *vma)

vma->vm_private_data = cb;

/* Calculate address for CB */
address = virt_to_phys((void *) (uintptr_t) cb->kernel_address);

rc = hdev->asic_funcs->cb_mmap(hdev, vma, cb->kernel_address,
address, cb->size);

rc = hdev->asic_funcs->cb_mmap(hdev, vma, (void *) cb->kernel_address,
cb->bus_address, cb->size);
if (rc) {
spin_lock(&cb->lock);
cb->mmap = false;
Expand Down Expand Up @@ -425,7 +574,7 @@ void hl_cb_mgr_fini(struct hl_device *hdev, struct hl_cb_mgr *mgr)
if (kref_put(&cb->refcount, cb_release) != 1)
dev_err(hdev->dev,
"CB %d for CTX ID %d is still alive\n",
id, cb->ctx_id);
id, cb->ctx->asid);
}

idr_destroy(&mgr->cb_handles);
Expand All @@ -438,8 +587,8 @@ struct hl_cb *hl_cb_kernel_create(struct hl_device *hdev, u32 cb_size,
struct hl_cb *cb;
int rc;

rc = hl_cb_create(hdev, &hdev->kernel_cb_mgr, cb_size, &cb_handle,
HL_KERNEL_ASID_ID, internal_cb);
rc = hl_cb_create(hdev, &hdev->kernel_cb_mgr, hdev->kernel_ctx, cb_size,
internal_cb, false, &cb_handle);
if (rc) {
dev_err(hdev->dev,
"Failed to allocate CB for the kernel driver %d\n", rc);
Expand Down Expand Up @@ -495,3 +644,45 @@ int hl_cb_pool_fini(struct hl_device *hdev)

return 0;
}

int hl_cb_va_pool_init(struct hl_ctx *ctx)
{
struct hl_device *hdev = ctx->hdev;
struct asic_fixed_properties *prop = &hdev->asic_prop;
int rc;

if (!hdev->supports_cb_mapping)
return 0;

ctx->cb_va_pool = gen_pool_create(__ffs(prop->pmmu.page_size), -1);
if (!ctx->cb_va_pool) {
dev_err(hdev->dev,
"Failed to create VA gen pool for CB mapping\n");
return -ENOMEM;
}

rc = gen_pool_add(ctx->cb_va_pool, prop->cb_va_start_addr,
prop->cb_va_end_addr - prop->cb_va_start_addr, -1);
if (rc) {
dev_err(hdev->dev,
"Failed to add memory to VA gen pool for CB mapping\n");
goto err_pool_destroy;
}

return 0;

err_pool_destroy:
gen_pool_destroy(ctx->cb_va_pool);

return rc;
}

void hl_cb_va_pool_fini(struct hl_ctx *ctx)
{
struct hl_device *hdev = ctx->hdev;

if (!hdev->supports_cb_mapping)
return;

gen_pool_destroy(ctx->cb_va_pool);
}
Loading

0 comments on commit 9e07279

Please sign in to comment.