v2: - function xe_device_is_vf_enabled has been refactored to xe_sriov_pf_has_vfs_enabled and moved to xe_sriov_pf_helper.h. - The code now distinctly checks for SR-IOV VF mode and SR-IOV PF with VFs enabled. - Log messages have been updated to explicitly state the current mode. - The function xe_multi_ccs_mode_enabled is moved to xe_device.h
v3: Described missed arg documentation for xe_sriov_pf_has_vfs_enabled v4: - sysfs interface for CCS mode is not initialized when operating in SRIOV VF Mode. - xe_sriov_pf_has_vfs_enabled() check is sufficient while CCS mode enablement. - remove unnecessary comments as flow is self explanatory. v5:(review comments from Michal) - Add xe device level CCS mode block with mutex lock and CCS mode state - necessesary functions to manage ccs mode state to provide strict mutual exclusive support b/w CCS mode & SRIOV VF enabling v6: - Re modeled implementation based on lockdown the PF using custom guard supported functions by Michal Signed-off-by: Nareshkumar Gollakoti <[email protected]> --- drivers/gpu/drm/xe/xe_gt_ccs_mode.c | 47 ++++++++++++++++++++++++----- 1 file changed, 39 insertions(+), 8 deletions(-) diff --git a/drivers/gpu/drm/xe/xe_gt_ccs_mode.c b/drivers/gpu/drm/xe/xe_gt_ccs_mode.c index 50fffc9ebf62..495bf517a6d3 100644 --- a/drivers/gpu/drm/xe/xe_gt_ccs_mode.c +++ b/drivers/gpu/drm/xe/xe_gt_ccs_mode.c @@ -13,6 +13,7 @@ #include "xe_gt_sysfs.h" #include "xe_mmio.h" #include "xe_sriov.h" +#include "xe_sriov_pf_helpers.h" static void __xe_gt_apply_ccs_mode(struct xe_gt *gt, u32 num_engines) { @@ -108,6 +109,30 @@ ccs_mode_show(struct device *kdev, return sysfs_emit(buf, "%u\n", gt->ccs_mode); } +static int xe_gt_prepare_ccs_mode_enabling(struct xe_device *xe, + struct xe_gt *gt) +{ + /* + * The arm guard is only activated during CCS mode enabling, + * and this shuould happen when CCS mode is in default mode. + * lockdown arm guard ensures there is no VFS enabling + * as CCS mode enabling in progress/enabled. + */ + if (!(gt->ccs_mode > 1)) + return xe_sriov_pf_arm_guard(xe, &xe->sriov.pf.guard_vfs_enabling, + true, NULL); + return 0; +} + +static void xe_gt_finish_ccs_mode_enabling(struct xe_device *xe, + struct xe_gt *gt) +{ + /* disarm the guard, if CCS mode is reverted to default */ + if (!(gt->ccs_mode > 1)) + xe_sriov_pf_disarm_guard(xe, &xe->sriov.pf.guard_vfs_enabling, + true, NULL); +} + static ssize_t ccs_mode_store(struct device *kdev, struct device_attribute *attr, const char *buff, size_t count) @@ -117,15 +142,13 @@ ccs_mode_store(struct device *kdev, struct device_attribute *attr, u32 num_engines, num_slices; int ret; - if (IS_SRIOV(xe)) { - xe_gt_dbg(gt, "Can't change compute mode when running as %s\n", - xe_sriov_mode_to_string(xe_device_sriov_mode(xe))); - return -EOPNOTSUPP; - } + ret = xe_gt_prepare_ccs_mode_enabling(xe, gt); + if (ret) + return ret; ret = kstrtou32(buff, 0, &num_engines); if (ret) - return ret; + goto err; /* * Ensure number of engines specified is valid and there is an @@ -135,7 +158,8 @@ ccs_mode_store(struct device *kdev, struct device_attribute *attr, if (!num_engines || num_engines > num_slices || num_slices % num_engines) { xe_gt_dbg(gt, "Invalid compute config, %d engines %d slices\n", num_engines, num_slices); - return -EINVAL; + ret = -EINVAL; + goto err; } /* CCS mode can only be updated when there are no drm clients */ @@ -143,7 +167,8 @@ ccs_mode_store(struct device *kdev, struct device_attribute *attr, if (!list_empty(&xe->drm.filelist)) { mutex_unlock(&xe->drm.filelist_mutex); xe_gt_dbg(gt, "Rejecting compute mode change as there are active drm clients\n"); - return -EBUSY; + ret = -EBUSY; + goto err; } if (gt->ccs_mode != num_engines) { @@ -155,7 +180,13 @@ ccs_mode_store(struct device *kdev, struct device_attribute *attr, mutex_unlock(&xe->drm.filelist_mutex); + xe_gt_finish_ccs_mode_enabling(xe, gt); + return count; +err: + xe_gt_finish_ccs_mode_enabling(xe, gt); + + return ret; } static DEVICE_ATTR_RW(ccs_mode); -- 2.43.0
