|
| 1 | +From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 |
| 2 | +From: Jianfeng Liu < [email protected]> |
| 3 | +Date: Sun, 28 Sep 2025 14:31:09 +0800 |
| 4 | +Subject: Revert "drm/xe/guc: Set RCS/CCS yield policy" |
| 5 | + |
| 6 | +This reverts commit dd1a415dcfd5984bf83abd804c3cd9e0ff9dde30. |
| 7 | +--- |
| 8 | + drivers/gpu/drm/xe/abi/guc_actions_abi.h | 1 - |
| 9 | + drivers/gpu/drm/xe/abi/guc_klvs_abi.h | 25 ---- |
| 10 | + drivers/gpu/drm/xe/xe_gt.c | 3 +- |
| 11 | + drivers/gpu/drm/xe/xe_guc.c | 6 +- |
| 12 | + drivers/gpu/drm/xe/xe_guc_submit.c | 66 ---------- |
| 13 | + drivers/gpu/drm/xe/xe_guc_submit.h | 2 - |
| 14 | + 6 files changed, 5 insertions(+), 98 deletions(-) |
| 15 | + |
| 16 | +diff --git a/drivers/gpu/drm/xe/abi/guc_actions_abi.h b/drivers/gpu/drm/xe/abi/guc_actions_abi.h |
| 17 | +index 111111111111..222222222222 100644 |
| 18 | +--- a/drivers/gpu/drm/xe/abi/guc_actions_abi.h |
| 19 | ++++ b/drivers/gpu/drm/xe/abi/guc_actions_abi.h |
| 20 | +@@ -117,7 +117,6 @@ enum xe_guc_action { |
| 21 | + XE_GUC_ACTION_ENTER_S_STATE = 0x501, |
| 22 | + XE_GUC_ACTION_EXIT_S_STATE = 0x502, |
| 23 | + XE_GUC_ACTION_GLOBAL_SCHED_POLICY_CHANGE = 0x506, |
| 24 | +- XE_GUC_ACTION_UPDATE_SCHEDULING_POLICIES_KLV = 0x509, |
| 25 | + XE_GUC_ACTION_SCHED_CONTEXT = 0x1000, |
| 26 | + XE_GUC_ACTION_SCHED_CONTEXT_MODE_SET = 0x1001, |
| 27 | + XE_GUC_ACTION_SCHED_CONTEXT_MODE_DONE = 0x1002, |
| 28 | +diff --git a/drivers/gpu/drm/xe/abi/guc_klvs_abi.h b/drivers/gpu/drm/xe/abi/guc_klvs_abi.h |
| 29 | +index 111111111111..222222222222 100644 |
| 30 | +--- a/drivers/gpu/drm/xe/abi/guc_klvs_abi.h |
| 31 | ++++ b/drivers/gpu/drm/xe/abi/guc_klvs_abi.h |
| 32 | +@@ -17,7 +17,6 @@ |
| 33 | + * | 0 | 31:16 | **KEY** - KLV key identifier | |
| 34 | + * | | | - `GuC Self Config KLVs`_ | |
| 35 | + * | | | - `GuC Opt In Feature KLVs`_ | |
| 36 | +- * | | | - `GuC Scheduling Policies KLVs`_ | |
| 37 | + * | | | - `GuC VGT Policy KLVs`_ | |
| 38 | + * | | | - `GuC VF Configuration KLVs`_ | |
| 39 | + * | | | | |
| 40 | +@@ -140,30 +139,6 @@ enum { |
| 41 | + #define GUC_KLV_OPT_IN_FEATURE_EXT_CAT_ERR_TYPE_KEY 0x4001 |
| 42 | + #define GUC_KLV_OPT_IN_FEATURE_EXT_CAT_ERR_TYPE_LEN 0u |
| 43 | + |
| 44 | +-/** |
| 45 | +- * DOC: GuC Scheduling Policies KLVs |
| 46 | +- * |
| 47 | +- * `GuC KLV`_ keys available for use with UPDATE_SCHEDULING_POLICIES_KLV. |
| 48 | +- * |
| 49 | +- * _`GUC_KLV_SCHEDULING_POLICIES_RENDER_COMPUTE_YIELD` : 0x1001 |
| 50 | +- * Some platforms do not allow concurrent execution of RCS and CCS |
| 51 | +- * workloads from different address spaces. By default, the GuC prioritizes |
| 52 | +- * RCS submissions over CCS ones, which can lead to CCS workloads being |
| 53 | +- * significantly (or completely) starved of execution time. This KLV allows |
| 54 | +- * the driver to specify a quantum (in ms) and a ratio (percentage value |
| 55 | +- * between 0 and 100), and the GuC will prioritize the CCS for that |
| 56 | +- * percentage of each quantum. For example, specifying 100ms and 30% will |
| 57 | +- * make the GuC prioritize the CCS for 30ms of every 100ms. |
| 58 | +- * Note that this does not necessarly mean that RCS and CCS engines will |
| 59 | +- * only be active for their percentage of the quantum, as the restriction |
| 60 | +- * only kicks in if both classes are fully busy with non-compatible address |
| 61 | +- * spaces; i.e., if one engine is idle or running the same address space, |
| 62 | +- * a pending job on the other engine will still be submitted to the HW no |
| 63 | +- * matter what the ratio is |
| 64 | +- */ |
| 65 | +-#define GUC_KLV_SCHEDULING_POLICIES_RENDER_COMPUTE_YIELD_KEY 0x1001 |
| 66 | +-#define GUC_KLV_SCHEDULING_POLICIES_RENDER_COMPUTE_YIELD_LEN 2u |
| 67 | +- |
| 68 | + /** |
| 69 | + * DOC: GuC VGT Policy KLVs |
| 70 | + * |
| 71 | +diff --git a/drivers/gpu/drm/xe/xe_gt.c b/drivers/gpu/drm/xe/xe_gt.c |
| 72 | +index 111111111111..222222222222 100644 |
| 73 | +--- a/drivers/gpu/drm/xe/xe_gt.c |
| 74 | ++++ b/drivers/gpu/drm/xe/xe_gt.c |
| 75 | +@@ -41,7 +41,6 @@ |
| 76 | + #include "xe_gt_topology.h" |
| 77 | + #include "xe_guc_exec_queue_types.h" |
| 78 | + #include "xe_guc_pc.h" |
| 79 | +-#include "xe_guc_submit.h" |
| 80 | + #include "xe_hw_fence.h" |
| 81 | + #include "xe_hw_engine_class_sysfs.h" |
| 82 | + #include "xe_irq.h" |
| 83 | +@@ -98,7 +97,7 @@ void xe_gt_sanitize(struct xe_gt *gt) |
| 84 | + * FIXME: if xe_uc_sanitize is called here, on TGL driver will not |
| 85 | + * reload |
| 86 | + */ |
| 87 | +- xe_guc_submit_disable(>->uc.guc); |
| 88 | ++ gt->uc.guc.submission_state.enabled = false; |
| 89 | + } |
| 90 | + |
| 91 | + static void xe_gt_enable_host_l2_vram(struct xe_gt *gt) |
| 92 | +diff --git a/drivers/gpu/drm/xe/xe_guc.c b/drivers/gpu/drm/xe/xe_guc.c |
| 93 | +index 111111111111..222222222222 100644 |
| 94 | +--- a/drivers/gpu/drm/xe/xe_guc.c |
| 95 | ++++ b/drivers/gpu/drm/xe/xe_guc.c |
| 96 | +@@ -825,7 +825,9 @@ int xe_guc_post_load_init(struct xe_guc *guc) |
| 97 | + return ret; |
| 98 | + } |
| 99 | + |
| 100 | +- return xe_guc_submit_enable(guc); |
| 101 | ++ guc->submission_state.enabled = true; |
| 102 | ++ |
| 103 | ++ return 0; |
| 104 | + } |
| 105 | + |
| 106 | + int xe_guc_reset(struct xe_guc *guc) |
| 107 | +@@ -1519,7 +1521,7 @@ void xe_guc_sanitize(struct xe_guc *guc) |
| 108 | + { |
| 109 | + xe_uc_fw_sanitize(&guc->fw); |
| 110 | + xe_guc_ct_disable(&guc->ct); |
| 111 | +- xe_guc_submit_disable(guc); |
| 112 | ++ guc->submission_state.enabled = false; |
| 113 | + } |
| 114 | + |
| 115 | + int xe_guc_reset_prepare(struct xe_guc *guc) |
| 116 | +diff --git a/drivers/gpu/drm/xe/xe_guc_submit.c b/drivers/gpu/drm/xe/xe_guc_submit.c |
| 117 | +index 111111111111..222222222222 100644 |
| 118 | +--- a/drivers/gpu/drm/xe/xe_guc_submit.c |
| 119 | ++++ b/drivers/gpu/drm/xe/xe_guc_submit.c |
| 120 | +@@ -32,7 +32,6 @@ |
| 121 | + #include "xe_guc_ct.h" |
| 122 | + #include "xe_guc_exec_queue_types.h" |
| 123 | + #include "xe_guc_id_mgr.h" |
| 124 | +-#include "xe_guc_klv_helpers.h" |
| 125 | + #include "xe_guc_submit_types.h" |
| 126 | + #include "xe_hw_engine.h" |
| 127 | + #include "xe_hw_fence.h" |
| 128 | +@@ -317,71 +316,6 @@ int xe_guc_submit_init(struct xe_guc *guc, unsigned int num_ids) |
| 129 | + return drmm_add_action_or_reset(&xe->drm, guc_submit_fini, guc); |
| 130 | + } |
| 131 | + |
| 132 | +-/* |
| 133 | +- * Given that we want to guarantee enough RCS throughput to avoid missing |
| 134 | +- * frames, we set the yield policy to 20% of each 80ms interval. |
| 135 | +- */ |
| 136 | +-#define RC_YIELD_DURATION 80 /* in ms */ |
| 137 | +-#define RC_YIELD_RATIO 20 /* in percent */ |
| 138 | +-static u32 *emit_render_compute_yield_klv(u32 *emit) |
| 139 | +-{ |
| 140 | +- *emit++ = PREP_GUC_KLV_TAG(SCHEDULING_POLICIES_RENDER_COMPUTE_YIELD); |
| 141 | +- *emit++ = RC_YIELD_DURATION; |
| 142 | +- *emit++ = RC_YIELD_RATIO; |
| 143 | +- |
| 144 | +- return emit; |
| 145 | +-} |
| 146 | +- |
| 147 | +-#define SCHEDULING_POLICY_MAX_DWORDS 16 |
| 148 | +-static int guc_init_global_schedule_policy(struct xe_guc *guc) |
| 149 | +-{ |
| 150 | +- u32 data[SCHEDULING_POLICY_MAX_DWORDS]; |
| 151 | +- u32 *emit = data; |
| 152 | +- u32 count = 0; |
| 153 | +- int ret; |
| 154 | +- |
| 155 | +- if (GUC_SUBMIT_VER(guc) < MAKE_GUC_VER(1, 1, 0)) |
| 156 | +- return 0; |
| 157 | +- |
| 158 | +- *emit++ = XE_GUC_ACTION_UPDATE_SCHEDULING_POLICIES_KLV; |
| 159 | +- |
| 160 | +- if (CCS_MASK(guc_to_gt(guc))) |
| 161 | +- emit = emit_render_compute_yield_klv(emit); |
| 162 | +- |
| 163 | +- count = emit - data; |
| 164 | +- if (count > 1) { |
| 165 | +- xe_assert(guc_to_xe(guc), count <= SCHEDULING_POLICY_MAX_DWORDS); |
| 166 | +- |
| 167 | +- ret = xe_guc_ct_send_block(&guc->ct, data, count); |
| 168 | +- if (ret < 0) { |
| 169 | +- xe_gt_err(guc_to_gt(guc), |
| 170 | +- "failed to enable GuC sheduling policies: %pe\n", |
| 171 | +- ERR_PTR(ret)); |
| 172 | +- return ret; |
| 173 | +- } |
| 174 | +- } |
| 175 | +- |
| 176 | +- return 0; |
| 177 | +-} |
| 178 | +- |
| 179 | +-int xe_guc_submit_enable(struct xe_guc *guc) |
| 180 | +-{ |
| 181 | +- int ret; |
| 182 | +- |
| 183 | +- ret = guc_init_global_schedule_policy(guc); |
| 184 | +- if (ret) |
| 185 | +- return ret; |
| 186 | +- |
| 187 | +- guc->submission_state.enabled = true; |
| 188 | +- |
| 189 | +- return 0; |
| 190 | +-} |
| 191 | +- |
| 192 | +-void xe_guc_submit_disable(struct xe_guc *guc) |
| 193 | +-{ |
| 194 | +- guc->submission_state.enabled = false; |
| 195 | +-} |
| 196 | +- |
| 197 | + static void __release_guc_id(struct xe_guc *guc, struct xe_exec_queue *q, u32 xa_count) |
| 198 | + { |
| 199 | + int i; |
| 200 | +diff --git a/drivers/gpu/drm/xe/xe_guc_submit.h b/drivers/gpu/drm/xe/xe_guc_submit.h |
| 201 | +index 111111111111..222222222222 100644 |
| 202 | +--- a/drivers/gpu/drm/xe/xe_guc_submit.h |
| 203 | ++++ b/drivers/gpu/drm/xe/xe_guc_submit.h |
| 204 | +@@ -13,8 +13,6 @@ struct xe_exec_queue; |
| 205 | + struct xe_guc; |
| 206 | + |
| 207 | + int xe_guc_submit_init(struct xe_guc *guc, unsigned int num_ids); |
| 208 | +-int xe_guc_submit_enable(struct xe_guc *guc); |
| 209 | +-void xe_guc_submit_disable(struct xe_guc *guc); |
| 210 | + |
| 211 | + int xe_guc_submit_reset_prepare(struct xe_guc *guc); |
| 212 | + void xe_guc_submit_reset_wait(struct xe_guc *guc); |
| 213 | +-- |
| 214 | +Armbian |
| 215 | + |
0 commit comments