Skip to content

Commit b891562

Browse files
jgunthorpeheftig
authored andcommitted
udmabuf: Do not create malformed scatterlists
Using a sg_set_folio() loop for every 4K results in a malformed scatterlist because sg_set_folio() has an issue with offsets > PAGE_SIZE and because scatterlist expects the creator to build a list which consolidates any physical contiguity. sg_alloc_table_from_pages() creates a valid scatterlist directly from a struct page array, so go back to that. Remove the offsets allocation and just store an array of tail pages as it did before the below commit. Everything wants that anyhow. Fixes: 0c8b91e ("udmabuf: add back support for mapping hugetlb pages") Reported-by: Julian Orth <ju.orth@gmail.com> Closes: https://lore.kernel.org/all/20260308-scatterlist-v1-1-39c4566b0bba@gmail.com/ Signed-off-by: Jason Gunthorpe <jgg@nvidia.com> Cherry-picked-for: https://gitlab.archlinux.org/archlinux/packaging/packages/linux/-/issues/183
1 parent a42e392 commit b891562

File tree

1 file changed

+13
-36
lines changed

1 file changed

+13
-36
lines changed

drivers/dma-buf/udmabuf.c

Lines changed: 13 additions & 36 deletions
Original file line numberDiff line numberDiff line change
@@ -26,10 +26,10 @@ MODULE_PARM_DESC(size_limit_mb, "Max size of a dmabuf, in megabytes. Default is
2626

2727
struct udmabuf {
2828
pgoff_t pagecount;
29-
struct folio **folios;
29+
struct page **pages;
3030

3131
/**
32-
* Unlike folios, pinned_folios is only used for unpin.
32+
* Unlike pages, pinned_folios is only used for unpin.
3333
* So, nr_pinned is not the same to pagecount, the pinned_folios
3434
* only set each folio which already pinned when udmabuf_create.
3535
* Note that, since a folio may be pinned multiple times, each folio
@@ -41,7 +41,6 @@ struct udmabuf {
4141

4242
struct sg_table *sg;
4343
struct miscdevice *device;
44-
pgoff_t *offsets;
4544
};
4645

4746
static vm_fault_t udmabuf_vm_fault(struct vm_fault *vmf)
@@ -55,8 +54,7 @@ static vm_fault_t udmabuf_vm_fault(struct vm_fault *vmf)
5554
if (pgoff >= ubuf->pagecount)
5655
return VM_FAULT_SIGBUS;
5756

58-
pfn = folio_pfn(ubuf->folios[pgoff]);
59-
pfn += ubuf->offsets[pgoff] >> PAGE_SHIFT;
57+
pfn = page_to_pfn(ubuf->pages[pgoff]);
6058

6159
ret = vmf_insert_pfn(vma, vmf->address, pfn);
6260
if (ret & VM_FAULT_ERROR)
@@ -73,8 +71,7 @@ static vm_fault_t udmabuf_vm_fault(struct vm_fault *vmf)
7371
if (WARN_ON(pgoff >= ubuf->pagecount))
7472
break;
7573

76-
pfn = folio_pfn(ubuf->folios[pgoff]);
77-
pfn += ubuf->offsets[pgoff] >> PAGE_SHIFT;
74+
pfn = page_to_pfn(ubuf->pages[pgoff]);
7875

7976
/**
8077
* If the below vmf_insert_pfn() fails, we do not return an
@@ -109,22 +106,11 @@ static int mmap_udmabuf(struct dma_buf *buf, struct vm_area_struct *vma)
109106
static int vmap_udmabuf(struct dma_buf *buf, struct iosys_map *map)
110107
{
111108
struct udmabuf *ubuf = buf->priv;
112-
struct page **pages;
113109
void *vaddr;
114-
pgoff_t pg;
115110

116111
dma_resv_assert_held(buf->resv);
117112

118-
pages = kvmalloc_array(ubuf->pagecount, sizeof(*pages), GFP_KERNEL);
119-
if (!pages)
120-
return -ENOMEM;
121-
122-
for (pg = 0; pg < ubuf->pagecount; pg++)
123-
pages[pg] = folio_page(ubuf->folios[pg],
124-
ubuf->offsets[pg] >> PAGE_SHIFT);
125-
126-
vaddr = vm_map_ram(pages, ubuf->pagecount, -1);
127-
kvfree(pages);
113+
vaddr = vm_map_ram(ubuf->pages, ubuf->pagecount, -1);
128114
if (!vaddr)
129115
return -EINVAL;
130116

@@ -146,22 +132,18 @@ static struct sg_table *get_sg_table(struct device *dev, struct dma_buf *buf,
146132
{
147133
struct udmabuf *ubuf = buf->priv;
148134
struct sg_table *sg;
149-
struct scatterlist *sgl;
150-
unsigned int i = 0;
151135
int ret;
152136

153137
sg = kzalloc(sizeof(*sg), GFP_KERNEL);
154138
if (!sg)
155139
return ERR_PTR(-ENOMEM);
156140

157-
ret = sg_alloc_table(sg, ubuf->pagecount, GFP_KERNEL);
141+
ret = sg_alloc_table_from_pages(sg, ubuf->pages, ubuf->pagecount, 0,
142+
ubuf->pagecount << PAGE_SHIFT,
143+
GFP_KERNEL);
158144
if (ret < 0)
159145
goto err_alloc;
160146

161-
for_each_sg(sg->sgl, sgl, ubuf->pagecount, i)
162-
sg_set_folio(sgl, ubuf->folios[i], PAGE_SIZE,
163-
ubuf->offsets[i]);
164-
165147
ret = dma_map_sgtable(dev, sg, direction, 0);
166148
if (ret < 0)
167149
goto err_map;
@@ -207,12 +189,8 @@ static void unpin_all_folios(struct udmabuf *ubuf)
207189

208190
static __always_inline int init_udmabuf(struct udmabuf *ubuf, pgoff_t pgcnt)
209191
{
210-
ubuf->folios = kvmalloc_array(pgcnt, sizeof(*ubuf->folios), GFP_KERNEL);
211-
if (!ubuf->folios)
212-
return -ENOMEM;
213-
214-
ubuf->offsets = kvcalloc(pgcnt, sizeof(*ubuf->offsets), GFP_KERNEL);
215-
if (!ubuf->offsets)
192+
ubuf->pages = kvmalloc_array(pgcnt, sizeof(*ubuf->pages), GFP_KERNEL);
193+
if (!ubuf->pages)
216194
return -ENOMEM;
217195

218196
ubuf->pinned_folios = kvmalloc_array(pgcnt,
@@ -227,8 +205,7 @@ static __always_inline int init_udmabuf(struct udmabuf *ubuf, pgoff_t pgcnt)
227205
static __always_inline void deinit_udmabuf(struct udmabuf *ubuf)
228206
{
229207
unpin_all_folios(ubuf);
230-
kvfree(ubuf->offsets);
231-
kvfree(ubuf->folios);
208+
kvfree(ubuf->pages);
232209
}
233210

234211
static void release_udmabuf(struct dma_buf *buf)
@@ -346,8 +323,8 @@ static long udmabuf_pin_folios(struct udmabuf *ubuf, struct file *memfd,
346323
ubuf->pinned_folios[nr_pinned++] = folios[cur_folio];
347324

348325
for (; subpgoff < fsize; subpgoff += PAGE_SIZE) {
349-
ubuf->folios[upgcnt] = folios[cur_folio];
350-
ubuf->offsets[upgcnt] = subpgoff;
326+
ubuf->pages[upgcnt] = folio_page(folios[cur_folio],
327+
subpgoff >> PAGE_SHIFT);
351328
++upgcnt;
352329

353330
if (++cur_pgcnt >= pgcnt)

0 commit comments

Comments
 (0)