iommufd/viommu: Add IOMMU_VIOMMU_ALLOC ioctl
Add a new ioctl for user space to do a vIOMMU allocation. It must be based on a nesting parent HWPT, so take its refcount. IOMMU driver wanting to support vIOMMUs must define its IOMMU_VIOMMU_TYPE_ in the uAPI header and implement a viommu_alloc op in its iommu_ops. Link: https://patch.msgid.link/r/dc2b8ba9ac935007beff07c1761c31cd097ed780.1730836219.git.nicolinc@nvidia.com Reviewed-by: Jason Gunthorpe <jgg@nvidia.com> Reviewed-by: Kevin Tian <kevin.tian@intel.com> Signed-off-by: Nicolin Chen <nicolinc@nvidia.com> Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
This commit is contained in:
committed by
Jason Gunthorpe
parent
d56d1e8405
commit
4db97c21ed
@@ -7,7 +7,8 @@ iommufd-y := \
|
||||
ioas.o \
|
||||
main.o \
|
||||
pages.o \
|
||||
vfio_compat.o
|
||||
vfio_compat.o \
|
||||
viommu.o
|
||||
|
||||
iommufd-$(CONFIG_IOMMUFD_TEST) += selftest.o
|
||||
|
||||
|
||||
@@ -506,6 +506,9 @@ static inline int iommufd_hwpt_replace_device(struct iommufd_device *idev,
|
||||
return iommu_group_replace_domain(idev->igroup->group, hwpt->domain);
|
||||
}
|
||||
|
||||
int iommufd_viommu_alloc_ioctl(struct iommufd_ucmd *ucmd);
|
||||
void iommufd_viommu_destroy(struct iommufd_object *obj);
|
||||
|
||||
#ifdef CONFIG_IOMMUFD_TEST
|
||||
int iommufd_test(struct iommufd_ucmd *ucmd);
|
||||
void iommufd_selftest_destroy(struct iommufd_object *obj);
|
||||
|
||||
@@ -307,6 +307,7 @@ union ucmd_buffer {
|
||||
struct iommu_ioas_unmap unmap;
|
||||
struct iommu_option option;
|
||||
struct iommu_vfio_ioas vfio_ioas;
|
||||
struct iommu_viommu_alloc viommu;
|
||||
#ifdef CONFIG_IOMMUFD_TEST
|
||||
struct iommu_test_cmd test;
|
||||
#endif
|
||||
@@ -360,6 +361,8 @@ static const struct iommufd_ioctl_op iommufd_ioctl_ops[] = {
|
||||
val64),
|
||||
IOCTL_OP(IOMMU_VFIO_IOAS, iommufd_vfio_ioas, struct iommu_vfio_ioas,
|
||||
__reserved),
|
||||
IOCTL_OP(IOMMU_VIOMMU_ALLOC, iommufd_viommu_alloc_ioctl,
|
||||
struct iommu_viommu_alloc, out_viommu_id),
|
||||
#ifdef CONFIG_IOMMUFD_TEST
|
||||
IOCTL_OP(IOMMU_TEST_CMD, iommufd_test, struct iommu_test_cmd, last),
|
||||
#endif
|
||||
@@ -495,6 +498,9 @@ static const struct iommufd_object_ops iommufd_object_ops[] = {
|
||||
[IOMMUFD_OBJ_FAULT] = {
|
||||
.destroy = iommufd_fault_destroy,
|
||||
},
|
||||
[IOMMUFD_OBJ_VIOMMU] = {
|
||||
.destroy = iommufd_viommu_destroy,
|
||||
},
|
||||
#ifdef CONFIG_IOMMUFD_TEST
|
||||
[IOMMUFD_OBJ_SELFTEST] = {
|
||||
.destroy = iommufd_selftest_destroy,
|
||||
|
||||
81
drivers/iommu/iommufd/viommu.c
Normal file
81
drivers/iommu/iommufd/viommu.c
Normal file
@@ -0,0 +1,81 @@
|
||||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
/* Copyright (c) 2024, NVIDIA CORPORATION & AFFILIATES
|
||||
*/
|
||||
#include "iommufd_private.h"
|
||||
|
||||
void iommufd_viommu_destroy(struct iommufd_object *obj)
|
||||
{
|
||||
struct iommufd_viommu *viommu =
|
||||
container_of(obj, struct iommufd_viommu, obj);
|
||||
|
||||
if (viommu->ops && viommu->ops->destroy)
|
||||
viommu->ops->destroy(viommu);
|
||||
refcount_dec(&viommu->hwpt->common.obj.users);
|
||||
}
|
||||
|
||||
int iommufd_viommu_alloc_ioctl(struct iommufd_ucmd *ucmd)
|
||||
{
|
||||
struct iommu_viommu_alloc *cmd = ucmd->cmd;
|
||||
struct iommufd_hwpt_paging *hwpt_paging;
|
||||
struct iommufd_viommu *viommu;
|
||||
struct iommufd_device *idev;
|
||||
const struct iommu_ops *ops;
|
||||
int rc;
|
||||
|
||||
if (cmd->flags || cmd->type == IOMMU_VIOMMU_TYPE_DEFAULT)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
idev = iommufd_get_device(ucmd, cmd->dev_id);
|
||||
if (IS_ERR(idev))
|
||||
return PTR_ERR(idev);
|
||||
|
||||
ops = dev_iommu_ops(idev->dev);
|
||||
if (!ops->viommu_alloc) {
|
||||
rc = -EOPNOTSUPP;
|
||||
goto out_put_idev;
|
||||
}
|
||||
|
||||
hwpt_paging = iommufd_get_hwpt_paging(ucmd, cmd->hwpt_id);
|
||||
if (IS_ERR(hwpt_paging)) {
|
||||
rc = PTR_ERR(hwpt_paging);
|
||||
goto out_put_idev;
|
||||
}
|
||||
|
||||
if (!hwpt_paging->nest_parent) {
|
||||
rc = -EINVAL;
|
||||
goto out_put_hwpt;
|
||||
}
|
||||
|
||||
viommu = ops->viommu_alloc(idev->dev, hwpt_paging->common.domain,
|
||||
ucmd->ictx, cmd->type);
|
||||
if (IS_ERR(viommu)) {
|
||||
rc = PTR_ERR(viommu);
|
||||
goto out_put_hwpt;
|
||||
}
|
||||
|
||||
viommu->type = cmd->type;
|
||||
viommu->ictx = ucmd->ictx;
|
||||
viommu->hwpt = hwpt_paging;
|
||||
refcount_inc(&viommu->hwpt->common.obj.users);
|
||||
/*
|
||||
* It is the most likely case that a physical IOMMU is unpluggable. A
|
||||
* pluggable IOMMU instance (if exists) is responsible for refcounting
|
||||
* on its own.
|
||||
*/
|
||||
viommu->iommu_dev = __iommu_get_iommu_dev(idev->dev);
|
||||
|
||||
cmd->out_viommu_id = viommu->obj.id;
|
||||
rc = iommufd_ucmd_respond(ucmd, sizeof(*cmd));
|
||||
if (rc)
|
||||
goto out_abort;
|
||||
iommufd_object_finalize(ucmd->ictx, &viommu->obj);
|
||||
goto out_put_hwpt;
|
||||
|
||||
out_abort:
|
||||
iommufd_object_abort_and_destroy(ucmd->ictx, &viommu->obj);
|
||||
out_put_hwpt:
|
||||
iommufd_put_object(ucmd->ictx, &hwpt_paging->common.obj);
|
||||
out_put_idev:
|
||||
iommufd_put_object(ucmd->ictx, &idev->obj);
|
||||
return rc;
|
||||
}
|
||||
@@ -52,6 +52,7 @@ enum {
|
||||
IOMMUFD_CMD_HWPT_INVALIDATE = 0x8d,
|
||||
IOMMUFD_CMD_FAULT_QUEUE_ALLOC = 0x8e,
|
||||
IOMMUFD_CMD_IOAS_MAP_FILE = 0x8f,
|
||||
IOMMUFD_CMD_VIOMMU_ALLOC = 0x90,
|
||||
};
|
||||
|
||||
/**
|
||||
@@ -822,4 +823,43 @@ struct iommu_fault_alloc {
|
||||
__u32 out_fault_fd;
|
||||
};
|
||||
#define IOMMU_FAULT_QUEUE_ALLOC _IO(IOMMUFD_TYPE, IOMMUFD_CMD_FAULT_QUEUE_ALLOC)
|
||||
|
||||
/**
|
||||
* enum iommu_viommu_type - Virtual IOMMU Type
|
||||
* @IOMMU_VIOMMU_TYPE_DEFAULT: Reserved for future use
|
||||
*/
|
||||
enum iommu_viommu_type {
|
||||
IOMMU_VIOMMU_TYPE_DEFAULT = 0,
|
||||
};
|
||||
|
||||
/**
|
||||
* struct iommu_viommu_alloc - ioctl(IOMMU_VIOMMU_ALLOC)
|
||||
* @size: sizeof(struct iommu_viommu_alloc)
|
||||
* @flags: Must be 0
|
||||
* @type: Type of the virtual IOMMU. Must be defined in enum iommu_viommu_type
|
||||
* @dev_id: The device's physical IOMMU will be used to back the virtual IOMMU
|
||||
* @hwpt_id: ID of a nesting parent HWPT to associate to
|
||||
* @out_viommu_id: Output virtual IOMMU ID for the allocated object
|
||||
*
|
||||
* Allocate a virtual IOMMU object, representing the underlying physical IOMMU's
|
||||
* virtualization support that is a security-isolated slice of the real IOMMU HW
|
||||
* that is unique to a specific VM. Operations global to the IOMMU are connected
|
||||
* to the vIOMMU, such as:
|
||||
* - Security namespace for guest owned ID, e.g. guest-controlled cache tags
|
||||
* - Non-device-affiliated event reporting, e.g. invalidation queue errors
|
||||
* - Access to a sharable nesting parent pagetable across physical IOMMUs
|
||||
* - Virtualization of various platforms IDs, e.g. RIDs and others
|
||||
* - Delivery of paravirtualized invalidation
|
||||
* - Direct assigned invalidation queues
|
||||
* - Direct assigned interrupts
|
||||
*/
|
||||
struct iommu_viommu_alloc {
|
||||
__u32 size;
|
||||
__u32 flags;
|
||||
__u32 type;
|
||||
__u32 dev_id;
|
||||
__u32 hwpt_id;
|
||||
__u32 out_viommu_id;
|
||||
};
|
||||
#define IOMMU_VIOMMU_ALLOC _IO(IOMMUFD_TYPE, IOMMUFD_CMD_VIOMMU_ALLOC)
|
||||
#endif
|
||||
|
||||
Reference in New Issue
Block a user