Skip to content

Commit eba6a36

Browse files
committed
Merge: DRM Stable Backport 9.6 from v6.12.9
MR: https://gitlab.com/redhat/centos-stream/src/kernel/centos-stream-9/-/merge_requests/6194 JIRA: https://issues.redhat.com/browse/RHEL-53571 I didn't backport this two patches: 8e86e99 udmabuf: udmabuf_create pin folio codestyle cleanup 93f08e5 udmabuf: fix racy memfd sealing check (They depend on commit c6a3194 udmabuf: pin the pages using memfd_pin_folios() API which is not in centos-stream 9) Signed-off-by: Jocelyn Falempe <jfalempe@redhat.com> Approved-by: Mika Penttilä <mpenttil@redhat.com> Approved-by: Lyude Paul <lyude@redhat.com> Approved-by: CKI KWF Bot <cki-ci-bot+kwf-gitlab-com@redhat.com> Merged-by: Patrick Talbert <ptalbert@redhat.com>
2 parents 733db7c + e848bf6 commit eba6a36

File tree

175 files changed

+2908
-1102
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

175 files changed

+2908
-1102
lines changed

drivers/dma-buf/Kconfig

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -36,6 +36,7 @@ config UDMABUF
3636
depends on DMA_SHARED_BUFFER
3737
depends on MEMFD_CREATE || COMPILE_TEST
3838
depends on MMU
39+
select VMAP_PFN
3940
help
4041
A driver to let userspace turn memfd regions into dma-bufs.
4142
Qemu can use this to create host dmabufs for guest framebuffers.

drivers/dma-buf/dma-buf.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -60,7 +60,7 @@ static void __dma_buf_debugfs_list_add(struct dma_buf *dmabuf)
6060
{
6161
}
6262

63-
static void __dma_buf_debugfs_list_del(struct file *file)
63+
static void __dma_buf_debugfs_list_del(struct dma_buf *dmabuf)
6464
{
6565
}
6666
#endif

drivers/dma-buf/dma-fence-array.c

Lines changed: 27 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -103,10 +103,36 @@ static bool dma_fence_array_enable_signaling(struct dma_fence *fence)
103103
static bool dma_fence_array_signaled(struct dma_fence *fence)
104104
{
105105
struct dma_fence_array *array = to_dma_fence_array(fence);
106+
int num_pending;
107+
unsigned int i;
106108

107-
if (atomic_read(&array->num_pending) > 0)
109+
/*
110+
* We need to read num_pending before checking the enable_signal bit
111+
* to avoid racing with the enable_signaling() implementation, which
112+
* might decrement the counter, and cause a partial check.
113+
* atomic_read_acquire() pairs with atomic_dec_and_test() in
114+
* dma_fence_array_enable_signaling()
115+
*
116+
* The !--num_pending check is here to account for the any_signaled case
117+
* if we race with enable_signaling(), that means the !num_pending check
118+
* in the is_signalling_enabled branch might be outdated (num_pending
119+
* might have been decremented), but that's fine. The user will get the
120+
* right value when testing again later.
121+
*/
122+
num_pending = atomic_read_acquire(&array->num_pending);
123+
if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &array->base.flags)) {
124+
if (num_pending <= 0)
125+
goto signal;
108126
return false;
127+
}
128+
129+
for (i = 0; i < array->num_fences; ++i) {
130+
if (dma_fence_is_signaled(array->fences[i]) && !--num_pending)
131+
goto signal;
132+
}
133+
return false;
109134

135+
signal:
110136
dma_fence_array_clear_pending_error(array);
111137
return true;
112138
}

drivers/dma-buf/dma-fence-unwrap.c

Lines changed: 61 additions & 65 deletions
Original file line numberDiff line numberDiff line change
@@ -12,6 +12,7 @@
1212
#include <linux/dma-fence-chain.h>
1313
#include <linux/dma-fence-unwrap.h>
1414
#include <linux/slab.h>
15+
#include <linux/sort.h>
1516

1617
/* Internal helper to start new array iteration, don't use directly */
1718
static struct dma_fence *
@@ -59,6 +60,25 @@ struct dma_fence *dma_fence_unwrap_next(struct dma_fence_unwrap *cursor)
5960
}
6061
EXPORT_SYMBOL_GPL(dma_fence_unwrap_next);
6162

63+
64+
static int fence_cmp(const void *_a, const void *_b)
65+
{
66+
struct dma_fence *a = *(struct dma_fence **)_a;
67+
struct dma_fence *b = *(struct dma_fence **)_b;
68+
69+
if (a->context < b->context)
70+
return -1;
71+
else if (a->context > b->context)
72+
return 1;
73+
74+
if (dma_fence_is_later(b, a))
75+
return 1;
76+
else if (dma_fence_is_later(a, b))
77+
return -1;
78+
79+
return 0;
80+
}
81+
6282
/* Implementation for the dma_fence_merge() marco, don't use directly */
6383
struct dma_fence *__dma_fence_unwrap_merge(unsigned int num_fences,
6484
struct dma_fence **fences,
@@ -67,8 +87,7 @@ struct dma_fence *__dma_fence_unwrap_merge(unsigned int num_fences,
6787
struct dma_fence_array *result;
6888
struct dma_fence *tmp, **array;
6989
ktime_t timestamp;
70-
unsigned int i;
71-
size_t count;
90+
int i, j, count;
7291

7392
count = 0;
7493
timestamp = ns_to_ktime(0);
@@ -96,78 +115,55 @@ struct dma_fence *__dma_fence_unwrap_merge(unsigned int num_fences,
96115
if (!array)
97116
return NULL;
98117

99-
/*
100-
* This trashes the input fence array and uses it as position for the
101-
* following merge loop. This works because the dma_fence_merge()
102-
* wrapper macro is creating this temporary array on the stack together
103-
* with the iterators.
104-
*/
105-
for (i = 0; i < num_fences; ++i)
106-
fences[i] = dma_fence_unwrap_first(fences[i], &iter[i]);
107-
108118
count = 0;
109-
do {
110-
unsigned int sel;
111-
112-
restart:
113-
tmp = NULL;
114-
for (i = 0; i < num_fences; ++i) {
115-
struct dma_fence *next;
116-
117-
while (fences[i] && dma_fence_is_signaled(fences[i]))
118-
fences[i] = dma_fence_unwrap_next(&iter[i]);
119-
120-
next = fences[i];
121-
if (!next)
122-
continue;
123-
124-
/*
125-
* We can't guarantee that inpute fences are ordered by
126-
* context, but it is still quite likely when this
127-
* function is used multiple times. So attempt to order
128-
* the fences by context as we pass over them and merge
129-
* fences with the same context.
130-
*/
131-
if (!tmp || tmp->context > next->context) {
132-
tmp = next;
133-
sel = i;
134-
135-
} else if (tmp->context < next->context) {
136-
continue;
137-
138-
} else if (dma_fence_is_later(tmp, next)) {
139-
fences[i] = dma_fence_unwrap_next(&iter[i]);
140-
goto restart;
119+
for (i = 0; i < num_fences; ++i) {
120+
dma_fence_unwrap_for_each(tmp, &iter[i], fences[i]) {
121+
if (!dma_fence_is_signaled(tmp)) {
122+
array[count++] = dma_fence_get(tmp);
141123
} else {
142-
fences[sel] = dma_fence_unwrap_next(&iter[sel]);
143-
goto restart;
124+
ktime_t t = dma_fence_timestamp(tmp);
125+
126+
if (ktime_after(t, timestamp))
127+
timestamp = t;
144128
}
145129
}
130+
}
146131

147-
if (tmp) {
148-
array[count++] = dma_fence_get(tmp);
149-
fences[sel] = dma_fence_unwrap_next(&iter[sel]);
150-
}
151-
} while (tmp);
132+
if (count == 0 || count == 1)
133+
goto return_fastpath;
152134

153-
if (count == 0) {
154-
tmp = dma_fence_allocate_private_stub(ktime_get());
155-
goto return_tmp;
156-
}
135+
sort(array, count, sizeof(*array), fence_cmp, NULL);
157136

158-
if (count == 1) {
159-
tmp = array[0];
160-
goto return_tmp;
137+
/*
138+
* Only keep the most recent fence for each context.
139+
*/
140+
j = 0;
141+
for (i = 1; i < count; i++) {
142+
if (array[i]->context == array[j]->context)
143+
dma_fence_put(array[i]);
144+
else
145+
array[++j] = array[i];
161146
}
162-
163-
result = dma_fence_array_create(count, array,
164-
dma_fence_context_alloc(1),
165-
1, false);
166-
if (!result) {
167-
tmp = NULL;
168-
goto return_tmp;
147+
count = ++j;
148+
149+
if (count > 1) {
150+
result = dma_fence_array_create(count, array,
151+
dma_fence_context_alloc(1),
152+
1, false);
153+
if (!result) {
154+
for (i = 0; i < count; i++)
155+
dma_fence_put(array[i]);
156+
tmp = NULL;
157+
goto return_tmp;
158+
}
159+
return &result->base;
169160
}
170-
return &result->base;
161+
162+
return_fastpath:
163+
if (count == 0)
164+
tmp = dma_fence_allocate_private_stub(timestamp);
165+
else
166+
tmp = array[0];
171167

172168
return_tmp:
173169
kfree(array);

drivers/dma-buf/udmabuf.c

Lines changed: 41 additions & 27 deletions
Original file line numberDiff line numberDiff line change
@@ -68,21 +68,29 @@ static int mmap_udmabuf(struct dma_buf *buf, struct vm_area_struct *vma)
6868
static int vmap_udmabuf(struct dma_buf *buf, struct iosys_map *map)
6969
{
7070
struct udmabuf *ubuf = buf->priv;
71-
struct page **pages;
71+
unsigned long *pfns;
7272
void *vaddr;
7373
pgoff_t pg;
7474

7575
dma_resv_assert_held(buf->resv);
7676

77-
pages = kmalloc_array(ubuf->pagecount, sizeof(*pages), GFP_KERNEL);
78-
if (!pages)
77+
/**
78+
* HVO may free tail pages, so just use pfn to map each folio
79+
* into vmalloc area.
80+
*/
81+
pfns = kvmalloc_array(ubuf->pagecount, sizeof(*pfns), GFP_KERNEL);
82+
if (!pfns)
7983
return -ENOMEM;
8084

81-
for (pg = 0; pg < ubuf->pagecount; pg++)
82-
pages[pg] = &ubuf->folios[pg]->page;
85+
for (pg = 0; pg < ubuf->pagecount; pg++) {
86+
unsigned long pfn = folio_pfn(ubuf->folios[pg]);
87+
88+
pfn += ubuf->offsets[pg] >> PAGE_SHIFT;
89+
pfns[pg] = pfn;
90+
}
8391

84-
vaddr = vm_map_ram(pages, ubuf->pagecount, -1);
85-
kfree(pages);
92+
vaddr = vmap_pfn(pfns, ubuf->pagecount, PAGE_KERNEL);
93+
kvfree(pfns);
8694
if (!vaddr)
8795
return -EINVAL;
8896

@@ -164,8 +172,8 @@ static void release_udmabuf(struct dma_buf *buf)
164172

165173
for (pg = 0; pg < ubuf->pagecount; pg++)
166174
folio_put(ubuf->folios[pg]);
167-
kfree(ubuf->offsets);
168-
kfree(ubuf->folios);
175+
kvfree(ubuf->offsets);
176+
kvfree(ubuf->folios);
169177
kfree(ubuf);
170178
}
171179

@@ -216,7 +224,7 @@ static const struct dma_buf_ops udmabuf_ops = {
216224
};
217225

218226
#define SEALS_WANTED (F_SEAL_SHRINK)
219-
#define SEALS_DENIED (F_SEAL_WRITE)
227+
#define SEALS_DENIED (F_SEAL_WRITE|F_SEAL_FUTURE_WRITE)
220228

221229
static int handle_hugetlb_pages(struct udmabuf *ubuf, struct file *memfd,
222230
pgoff_t offset, pgoff_t pgcnt,
@@ -297,24 +305,18 @@ static int check_memfd_seals(struct file *memfd)
297305
return 0;
298306
}
299307

300-
static int export_udmabuf(struct udmabuf *ubuf,
301-
struct miscdevice *device,
302-
u32 flags)
308+
static struct dma_buf *export_udmabuf(struct udmabuf *ubuf,
309+
struct miscdevice *device)
303310
{
304311
DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
305-
struct dma_buf *buf;
306312

307313
ubuf->device = device;
308314
exp_info.ops = &udmabuf_ops;
309315
exp_info.size = ubuf->pagecount << PAGE_SHIFT;
310316
exp_info.priv = ubuf;
311317
exp_info.flags = O_RDWR;
312318

313-
buf = dma_buf_export(&exp_info);
314-
if (IS_ERR(buf))
315-
return PTR_ERR(buf);
316-
317-
return dma_buf_fd(buf, flags);
319+
return dma_buf_export(&exp_info);
318320
}
319321

320322
static long udmabuf_create(struct miscdevice *device,
@@ -324,6 +326,7 @@ static long udmabuf_create(struct miscdevice *device,
324326
pgoff_t pgcnt, pgbuf = 0, pglimit;
325327
struct file *memfd = NULL;
326328
struct udmabuf *ubuf;
329+
struct dma_buf *dmabuf;
327330
int ret = -EINVAL;
328331
u32 i, flags;
329332

@@ -345,14 +348,14 @@ static long udmabuf_create(struct miscdevice *device,
345348
if (!ubuf->pagecount)
346349
goto err;
347350

348-
ubuf->folios = kmalloc_array(ubuf->pagecount, sizeof(*ubuf->folios),
349-
GFP_KERNEL);
351+
ubuf->folios = kvmalloc_array(ubuf->pagecount, sizeof(*ubuf->folios),
352+
GFP_KERNEL);
350353
if (!ubuf->folios) {
351354
ret = -ENOMEM;
352355
goto err;
353356
}
354-
ubuf->offsets = kcalloc(ubuf->pagecount, sizeof(*ubuf->offsets),
355-
GFP_KERNEL);
357+
ubuf->offsets = kvcalloc(ubuf->pagecount, sizeof(*ubuf->offsets),
358+
GFP_KERNEL);
356359
if (!ubuf->offsets) {
357360
ret = -ENOMEM;
358361
goto err;
@@ -382,9 +385,20 @@ static long udmabuf_create(struct miscdevice *device,
382385
}
383386

384387
flags = head->flags & UDMABUF_FLAGS_CLOEXEC ? O_CLOEXEC : 0;
385-
ret = export_udmabuf(ubuf, device, flags);
386-
if (ret < 0)
388+
dmabuf = export_udmabuf(ubuf, device);
389+
if (IS_ERR(dmabuf)) {
390+
ret = PTR_ERR(dmabuf);
387391
goto err;
392+
}
393+
/*
394+
* Ownership of ubuf is held by the dmabuf from here.
395+
* If the following dma_buf_fd() fails, dma_buf_put() cleans up both the
396+
* dmabuf and the ubuf (through udmabuf_ops.release).
397+
*/
398+
399+
ret = dma_buf_fd(dmabuf, flags);
400+
if (ret < 0)
401+
dma_buf_put(dmabuf);
388402

389403
return ret;
390404

@@ -393,8 +407,8 @@ static long udmabuf_create(struct miscdevice *device,
393407
folio_put(ubuf->folios[--pgbuf]);
394408
if (memfd)
395409
fput(memfd);
396-
kfree(ubuf->offsets);
397-
kfree(ubuf->folios);
410+
kvfree(ubuf->offsets);
411+
kvfree(ubuf->folios);
398412
kfree(ubuf);
399413
return ret;
400414
}

drivers/gpu/drm/Kconfig

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -152,6 +152,7 @@ config DRM_PANIC_SCREEN
152152
config DRM_PANIC_SCREEN_QR_CODE
153153
bool "Add a panic screen with a QR code"
154154
depends on DRM_PANIC && RUST
155+
select ZLIB_DEFLATE
155156
help
156157
This option adds a QR code generator, and a panic screen with a QR
157158
code. The QR code will contain the last lines of kmsg and other debug
@@ -331,7 +332,7 @@ config DRM_TTM_HELPER
331332
config DRM_GEM_DMA_HELPER
332333
tristate
333334
depends on DRM
334-
select FB_DMAMEM_HELPERS if DRM_FBDEV_EMULATION
335+
select FB_DMAMEM_HELPERS_DEFERRED if DRM_FBDEV_EMULATION
335336
help
336337
Choose this if you need the GEM DMA helper functions
337338

drivers/gpu/drm/amd/amdgpu/amdgpu_aca.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -158,7 +158,7 @@ static int aca_smu_get_valid_aca_banks(struct amdgpu_device *adev, enum aca_smu_
158158
return -EINVAL;
159159
}
160160

161-
if (start + count >= max_count)
161+
if (start + count > max_count)
162162
return -EINVAL;
163163

164164
count = min_t(int, count, max_count);

drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -800,6 +800,7 @@ int amdgpu_acpi_power_shift_control(struct amdgpu_device *adev,
800800
return -EIO;
801801
}
802802

803+
kfree(info);
803804
return 0;
804805
}
805806

0 commit comments

Comments
 (0)