Skip to content

Commit 6bac4c4

Browse files
author
CKI KWF Bot
committed
Merge: RHEL 10.1: DRM Stable Backport (v6.15.5)
MR: https://gitlab.com/redhat/centos-stream/src/kernel/centos-stream-10/-/merge_requests/1162 ## Overview The DRM backport's goal is to backport all the changes in the DRM subsystem to the kernel target version, with the biggest value being that we get the upstream hardware enablement (and bug fixes) into RHEL. After the main DRM Backport, this MR backport as many fixes from the linux-stable tree for the target version: v6.15.5. ## Dependencies: This MR depends on the DRM Backport for 6.15: Depends: https://gitlab.com/redhat/centos-stream/src/kernel/centos-stream-10/-/merge_requests/834 Depends: https://gitlab.com/redhat/centos-stream/src/kernel/centos-stream-10/-/merge_requests/1124 And in a ACPI fix required for AMD Kraken and AMD Strix: Depends: https://gitlab.com/redhat/centos-stream/src/kernel/centos-stream-10/-/merge_requests/1179 ## Other: JIRA: https://issues.redhat.com/browse/RHEL-75958 Signed-off-by: José Expósito <jexposit@redhat.com> Approved-by: Jan Stancek <jstancek@redhat.com> Approved-by: Eric Chanudet <echanude@redhat.com> Approved-by: Jocelyn Falempe <jfalempe@redhat.com> Approved-by: David Airlie <airlied@redhat.com> Approved-by: CKI KWF Bot <cki-ci-bot+kwf-gitlab-com@redhat.com> Merged-by: CKI GitLab Kmaint Pipeline Bot <26919896-cki-kmaint-pipeline-bot@users.noreply.gitlab.com>
2 parents bbfd96f + 9b3467f commit 6bac4c4

File tree

200 files changed

+3185
-1935
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

200 files changed

+3185
-1935
lines changed

Documentation/gpu/xe/index.rst

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -16,6 +16,7 @@ DG2, etc is provided to prototype the driver.
1616
xe_migrate
1717
xe_cs
1818
xe_pm
19+
xe_gt_freq
1920
xe_pcode
2021
xe_gt_mcr
2122
xe_wa
Lines changed: 14 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,14 @@
1+
.. SPDX-License-Identifier: (GPL-2.0+ OR MIT)
2+
3+
==========================
4+
Xe GT Frequency Management
5+
==========================
6+
7+
.. kernel-doc:: drivers/gpu/drm/xe/xe_gt_freq.c
8+
:doc: Xe GT Frequency Management
9+
10+
Internal API
11+
============
12+
13+
.. kernel-doc:: drivers/gpu/drm/xe/xe_gt_freq.c
14+
:internal:

Makefile

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -9,7 +9,7 @@ NAME = Baby Opossum Posse
99
#
1010
RHEL_DRM_VERSION = 6
1111
RHEL_DRM_PATCHLEVEL = 15
12-
RHEL_DRM_SUBLEVEL =
12+
RHEL_DRM_SUBLEVEL = 5
1313

1414
# *DOCUMENTATION*
1515
# To see a list of typical targets execute "make help"

drivers/accel/ivpu/ivpu_fw.c

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -55,18 +55,18 @@ static struct {
5555
int gen;
5656
const char *name;
5757
} fw_names[] = {
58-
{ IVPU_HW_IP_37XX, "vpu_37xx.bin" },
58+
{ IVPU_HW_IP_37XX, "intel/vpu/vpu_37xx_v1.bin" },
5959
{ IVPU_HW_IP_37XX, "intel/vpu/vpu_37xx_v0.0.bin" },
60-
{ IVPU_HW_IP_40XX, "vpu_40xx.bin" },
60+
{ IVPU_HW_IP_40XX, "intel/vpu/vpu_40xx_v1.bin" },
6161
{ IVPU_HW_IP_40XX, "intel/vpu/vpu_40xx_v0.0.bin" },
62-
{ IVPU_HW_IP_50XX, "vpu_50xx.bin" },
62+
{ IVPU_HW_IP_50XX, "intel/vpu/vpu_50xx_v1.bin" },
6363
{ IVPU_HW_IP_50XX, "intel/vpu/vpu_50xx_v0.0.bin" },
6464
};
6565

6666
/* Production fw_names from the table above */
67-
MODULE_FIRMWARE("intel/vpu/vpu_37xx_v0.0.bin");
68-
MODULE_FIRMWARE("intel/vpu/vpu_40xx_v0.0.bin");
69-
MODULE_FIRMWARE("intel/vpu/vpu_50xx_v0.0.bin");
67+
MODULE_FIRMWARE("intel/vpu/vpu_37xx_v1.bin");
68+
MODULE_FIRMWARE("intel/vpu/vpu_40xx_v1.bin");
69+
MODULE_FIRMWARE("intel/vpu/vpu_50xx_v1.bin");
7070

7171
static int ivpu_fw_request(struct ivpu_device *vdev)
7272
{

drivers/accel/ivpu/ivpu_gem.c

Lines changed: 52 additions & 37 deletions
Original file line numberDiff line numberDiff line change
@@ -28,11 +28,21 @@ static inline void ivpu_dbg_bo(struct ivpu_device *vdev, struct ivpu_bo *bo, con
2828
{
2929
ivpu_dbg(vdev, BO,
3030
"%6s: bo %8p vpu_addr %9llx size %8zu ctx %d has_pages %d dma_mapped %d mmu_mapped %d wc %d imported %d\n",
31-
action, bo, bo->vpu_addr, ivpu_bo_size(bo), bo->ctx ? bo->ctx->id : 0,
31+
action, bo, bo->vpu_addr, ivpu_bo_size(bo), bo->ctx_id,
3232
(bool)bo->base.pages, (bool)bo->base.sgt, bo->mmu_mapped, bo->base.map_wc,
3333
(bool)bo->base.base.import_attach);
3434
}
3535

36+
static inline int ivpu_bo_lock(struct ivpu_bo *bo)
37+
{
38+
return dma_resv_lock(bo->base.base.resv, NULL);
39+
}
40+
41+
static inline void ivpu_bo_unlock(struct ivpu_bo *bo)
42+
{
43+
dma_resv_unlock(bo->base.base.resv);
44+
}
45+
3646
/*
3747
* ivpu_bo_pin() - pin the backing physical pages and map them to VPU.
3848
*
@@ -43,22 +53,22 @@ static inline void ivpu_dbg_bo(struct ivpu_device *vdev, struct ivpu_bo *bo, con
4353
int __must_check ivpu_bo_pin(struct ivpu_bo *bo)
4454
{
4555
struct ivpu_device *vdev = ivpu_bo_to_vdev(bo);
56+
struct sg_table *sgt;
4657
int ret = 0;
4758

48-
mutex_lock(&bo->lock);
49-
5059
ivpu_dbg_bo(vdev, bo, "pin");
51-
drm_WARN_ON(&vdev->drm, !bo->ctx);
5260

53-
if (!bo->mmu_mapped) {
54-
struct sg_table *sgt = drm_gem_shmem_get_pages_sgt(&bo->base);
61+
sgt = drm_gem_shmem_get_pages_sgt(&bo->base);
62+
if (IS_ERR(sgt)) {
63+
ret = PTR_ERR(sgt);
64+
ivpu_err(vdev, "Failed to map BO in IOMMU: %d\n", ret);
65+
return ret;
66+
}
5567

56-
if (IS_ERR(sgt)) {
57-
ret = PTR_ERR(sgt);
58-
ivpu_err(vdev, "Failed to map BO in IOMMU: %d\n", ret);
59-
goto unlock;
60-
}
68+
ivpu_bo_lock(bo);
6169

70+
if (!bo->mmu_mapped) {
71+
drm_WARN_ON(&vdev->drm, !bo->ctx);
6272
ret = ivpu_mmu_context_map_sgt(vdev, bo->ctx, bo->vpu_addr, sgt,
6373
ivpu_bo_is_snooped(bo));
6474
if (ret) {
@@ -69,7 +79,7 @@ int __must_check ivpu_bo_pin(struct ivpu_bo *bo)
6979
}
7080

7181
unlock:
72-
mutex_unlock(&bo->lock);
82+
ivpu_bo_unlock(bo);
7383

7484
return ret;
7585
}
@@ -84,7 +94,7 @@ ivpu_bo_alloc_vpu_addr(struct ivpu_bo *bo, struct ivpu_mmu_context *ctx,
8494
if (!drm_dev_enter(&vdev->drm, &idx))
8595
return -ENODEV;
8696

87-
mutex_lock(&bo->lock);
97+
ivpu_bo_lock(bo);
8898

8999
ret = ivpu_mmu_context_insert_node(ctx, range, ivpu_bo_size(bo), &bo->mm_node);
90100
if (!ret) {
@@ -94,9 +104,7 @@ ivpu_bo_alloc_vpu_addr(struct ivpu_bo *bo, struct ivpu_mmu_context *ctx,
94104
ivpu_err(vdev, "Failed to add BO to context %u: %d\n", ctx->id, ret);
95105
}
96106

97-
ivpu_dbg_bo(vdev, bo, "alloc");
98-
99-
mutex_unlock(&bo->lock);
107+
ivpu_bo_unlock(bo);
100108

101109
drm_dev_exit(idx);
102110

@@ -107,7 +115,7 @@ static void ivpu_bo_unbind_locked(struct ivpu_bo *bo)
107115
{
108116
struct ivpu_device *vdev = ivpu_bo_to_vdev(bo);
109117

110-
lockdep_assert(lockdep_is_held(&bo->lock) || !kref_read(&bo->base.base.refcount));
118+
lockdep_assert(dma_resv_held(bo->base.base.resv) || !kref_read(&bo->base.base.refcount));
111119

112120
if (bo->mmu_mapped) {
113121
drm_WARN_ON(&vdev->drm, !bo->ctx);
@@ -125,14 +133,12 @@ static void ivpu_bo_unbind_locked(struct ivpu_bo *bo)
125133
if (bo->base.base.import_attach)
126134
return;
127135

128-
dma_resv_lock(bo->base.base.resv, NULL);
129136
if (bo->base.sgt) {
130137
dma_unmap_sgtable(vdev->drm.dev, bo->base.sgt, DMA_BIDIRECTIONAL, 0);
131138
sg_free_table(bo->base.sgt);
132139
kfree(bo->base.sgt);
133140
bo->base.sgt = NULL;
134141
}
135-
dma_resv_unlock(bo->base.base.resv);
136142
}
137143

138144
void ivpu_bo_unbind_all_bos_from_context(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx)
@@ -144,12 +150,12 @@ void ivpu_bo_unbind_all_bos_from_context(struct ivpu_device *vdev, struct ivpu_m
144150

145151
mutex_lock(&vdev->bo_list_lock);
146152
list_for_each_entry(bo, &vdev->bo_list, bo_list_node) {
147-
mutex_lock(&bo->lock);
153+
ivpu_bo_lock(bo);
148154
if (bo->ctx == ctx) {
149155
ivpu_dbg_bo(vdev, bo, "unbind");
150156
ivpu_bo_unbind_locked(bo);
151157
}
152-
mutex_unlock(&bo->lock);
158+
ivpu_bo_unlock(bo);
153159
}
154160
mutex_unlock(&vdev->bo_list_lock);
155161
}
@@ -169,7 +175,6 @@ struct drm_gem_object *ivpu_gem_create_object(struct drm_device *dev, size_t siz
169175
bo->base.pages_mark_dirty_on_put = true; /* VPU can dirty a BO anytime */
170176

171177
INIT_LIST_HEAD(&bo->bo_list_node);
172-
mutex_init(&bo->lock);
173178

174179
return &bo->base.base;
175180
}
@@ -215,7 +220,7 @@ struct drm_gem_object *ivpu_gem_prime_import(struct drm_device *dev,
215220
return ERR_PTR(ret);
216221
}
217222

218-
static struct ivpu_bo *ivpu_bo_alloc(struct ivpu_device *vdev, u64 size, u32 flags)
223+
static struct ivpu_bo *ivpu_bo_alloc(struct ivpu_device *vdev, u64 size, u32 flags, u32 ctx_id)
219224
{
220225
struct drm_gem_shmem_object *shmem;
221226
struct ivpu_bo *bo;
@@ -233,13 +238,16 @@ static struct ivpu_bo *ivpu_bo_alloc(struct ivpu_device *vdev, u64 size, u32 fla
233238
return ERR_CAST(shmem);
234239

235240
bo = to_ivpu_bo(&shmem->base);
241+
bo->ctx_id = ctx_id;
236242
bo->base.map_wc = flags & DRM_IVPU_BO_WC;
237243
bo->flags = flags;
238244

239245
mutex_lock(&vdev->bo_list_lock);
240246
list_add_tail(&bo->bo_list_node, &vdev->bo_list);
241247
mutex_unlock(&vdev->bo_list_lock);
242248

249+
ivpu_dbg_bo(vdev, bo, "alloc");
250+
243251
return bo;
244252
}
245253

@@ -277,10 +285,14 @@ static void ivpu_gem_bo_free(struct drm_gem_object *obj)
277285
list_del(&bo->bo_list_node);
278286
mutex_unlock(&vdev->bo_list_lock);
279287

280-
drm_WARN_ON(&vdev->drm, !dma_resv_test_signaled(obj->resv, DMA_RESV_USAGE_READ));
288+
drm_WARN_ON(&vdev->drm, !drm_gem_is_imported(&bo->base.base) &&
289+
!dma_resv_test_signaled(obj->resv, DMA_RESV_USAGE_READ));
290+
drm_WARN_ON(&vdev->drm, ivpu_bo_size(bo) == 0);
291+
drm_WARN_ON(&vdev->drm, bo->base.vaddr);
281292

282293
ivpu_bo_unbind_locked(bo);
283-
mutex_destroy(&bo->lock);
294+
drm_WARN_ON(&vdev->drm, bo->mmu_mapped);
295+
drm_WARN_ON(&vdev->drm, bo->ctx);
284296

285297
drm_WARN_ON(obj->dev, bo->base.pages_use_count > 1);
286298
drm_gem_shmem_free(&bo->base);
@@ -314,15 +326,18 @@ int ivpu_bo_create_ioctl(struct drm_device *dev, void *data, struct drm_file *fi
314326
if (size == 0)
315327
return -EINVAL;
316328

317-
bo = ivpu_bo_alloc(vdev, size, args->flags);
329+
bo = ivpu_bo_alloc(vdev, size, args->flags, file_priv->ctx.id);
318330
if (IS_ERR(bo)) {
319331
ivpu_err(vdev, "Failed to allocate BO: %pe (ctx %u size %llu flags 0x%x)",
320332
bo, file_priv->ctx.id, args->size, args->flags);
321333
return PTR_ERR(bo);
322334
}
323335

324336
ret = drm_gem_handle_create(file, &bo->base.base, &args->handle);
325-
if (!ret)
337+
if (ret)
338+
ivpu_err(vdev, "Failed to create handle for BO: %pe (ctx %u size %llu flags 0x%x)",
339+
bo, file_priv->ctx.id, args->size, args->flags);
340+
else
326341
args->vpu_addr = bo->vpu_addr;
327342

328343
drm_gem_object_put(&bo->base.base);
@@ -345,7 +360,7 @@ ivpu_bo_create(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx,
345360
drm_WARN_ON(&vdev->drm, !PAGE_ALIGNED(range->end));
346361
drm_WARN_ON(&vdev->drm, !PAGE_ALIGNED(size));
347362

348-
bo = ivpu_bo_alloc(vdev, size, flags);
363+
bo = ivpu_bo_alloc(vdev, size, flags, IVPU_GLOBAL_CONTEXT_MMU_SSID);
349364
if (IS_ERR(bo)) {
350365
ivpu_err(vdev, "Failed to allocate BO: %pe (vpu_addr 0x%llx size %llu flags 0x%x)",
351366
bo, range->start, size, flags);
@@ -361,9 +376,9 @@ ivpu_bo_create(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx,
361376
goto err_put;
362377

363378
if (flags & DRM_IVPU_BO_MAPPABLE) {
364-
dma_resv_lock(bo->base.base.resv, NULL);
379+
ivpu_bo_lock(bo);
365380
ret = drm_gem_shmem_vmap(&bo->base, &map);
366-
dma_resv_unlock(bo->base.base.resv);
381+
ivpu_bo_unlock(bo);
367382

368383
if (ret)
369384
goto err_put;
@@ -386,9 +401,9 @@ void ivpu_bo_free(struct ivpu_bo *bo)
386401
struct iosys_map map = IOSYS_MAP_INIT_VADDR(bo->base.vaddr);
387402

388403
if (bo->flags & DRM_IVPU_BO_MAPPABLE) {
389-
dma_resv_lock(bo->base.base.resv, NULL);
404+
ivpu_bo_lock(bo);
390405
drm_gem_shmem_vunmap(&bo->base, &map);
391-
dma_resv_unlock(bo->base.base.resv);
406+
ivpu_bo_unlock(bo);
392407
}
393408

394409
drm_gem_object_put(&bo->base.base);
@@ -407,12 +422,12 @@ int ivpu_bo_info_ioctl(struct drm_device *dev, void *data, struct drm_file *file
407422

408423
bo = to_ivpu_bo(obj);
409424

410-
mutex_lock(&bo->lock);
425+
ivpu_bo_lock(bo);
411426
args->flags = bo->flags;
412427
args->mmap_offset = drm_vma_node_offset_addr(&obj->vma_node);
413428
args->vpu_addr = bo->vpu_addr;
414429
args->size = obj->size;
415-
mutex_unlock(&bo->lock);
430+
ivpu_bo_unlock(bo);
416431

417432
drm_gem_object_put(obj);
418433
return ret;
@@ -449,10 +464,10 @@ int ivpu_bo_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file
449464

450465
static void ivpu_bo_print_info(struct ivpu_bo *bo, struct drm_printer *p)
451466
{
452-
mutex_lock(&bo->lock);
467+
ivpu_bo_lock(bo);
453468

454469
drm_printf(p, "%-9p %-3u 0x%-12llx %-10lu 0x%-8x %-4u",
455-
bo, bo->ctx ? bo->ctx->id : 0, bo->vpu_addr, bo->base.base.size,
470+
bo, bo->ctx_id, bo->vpu_addr, bo->base.base.size,
456471
bo->flags, kref_read(&bo->base.base.refcount));
457472

458473
if (bo->base.pages)
@@ -466,7 +481,7 @@ static void ivpu_bo_print_info(struct ivpu_bo *bo, struct drm_printer *p)
466481

467482
drm_printf(p, "\n");
468483

469-
mutex_unlock(&bo->lock);
484+
ivpu_bo_unlock(bo);
470485
}
471486

472487
void ivpu_bo_list(struct drm_device *dev, struct drm_printer *p)

drivers/accel/ivpu/ivpu_gem.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -17,10 +17,10 @@ struct ivpu_bo {
1717
struct list_head bo_list_node;
1818
struct drm_mm_node mm_node;
1919

20-
struct mutex lock; /* Protects: ctx, mmu_mapped, vpu_addr */
2120
u64 vpu_addr;
2221
u32 flags;
2322
u32 job_status; /* Valid only for command buffer */
23+
u32 ctx_id;
2424
bool mmu_mapped;
2525
};
2626

drivers/accel/ivpu/ivpu_job.c

Lines changed: 8 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -247,17 +247,17 @@ static int ivpu_cmdq_unregister(struct ivpu_file_priv *file_priv, struct ivpu_cm
247247
if (!cmdq->db_id)
248248
return 0;
249249

250+
ret = ivpu_jsm_unregister_db(vdev, cmdq->db_id);
251+
if (!ret)
252+
ivpu_dbg(vdev, JOB, "DB %d unregistered\n", cmdq->db_id);
253+
250254
if (vdev->fw->sched_mode == VPU_SCHEDULING_MODE_HW) {
251255
ret = ivpu_jsm_hws_destroy_cmdq(vdev, file_priv->ctx.id, cmdq->id);
252256
if (!ret)
253257
ivpu_dbg(vdev, JOB, "Command queue %d destroyed, ctx %d\n",
254258
cmdq->id, file_priv->ctx.id);
255259
}
256260

257-
ret = ivpu_jsm_unregister_db(vdev, cmdq->db_id);
258-
if (!ret)
259-
ivpu_dbg(vdev, JOB, "DB %d unregistered\n", cmdq->db_id);
260-
261261
xa_erase(&file_priv->vdev->db_xa, cmdq->db_id);
262262
cmdq->db_id = 0;
263263

@@ -986,7 +986,8 @@ void ivpu_context_abort_work_fn(struct work_struct *work)
986986
return;
987987

988988
if (vdev->fw->sched_mode == VPU_SCHEDULING_MODE_HW)
989-
ivpu_jsm_reset_engine(vdev, 0);
989+
if (ivpu_jsm_reset_engine(vdev, 0))
990+
return;
990991

991992
mutex_lock(&vdev->context_list_lock);
992993
xa_for_each(&vdev->context_xa, ctx_id, file_priv) {
@@ -1009,7 +1010,8 @@ void ivpu_context_abort_work_fn(struct work_struct *work)
10091010
if (vdev->fw->sched_mode != VPU_SCHEDULING_MODE_HW)
10101011
goto runtime_put;
10111012

1012-
ivpu_jsm_hws_resume_engine(vdev, 0);
1013+
if (ivpu_jsm_hws_resume_engine(vdev, 0))
1014+
return;
10131015
/*
10141016
* In hardware scheduling mode NPU already has stopped processing jobs
10151017
* and won't send us any further notifications, thus we have to free job related resources

0 commit comments

Comments
 (0)