diff options
author | Maor Gottlieb <maorg@nvidia.com> | 2021-08-24 17:25:29 +0300 |
---|---|---|
committer | Jason Gunthorpe <jgg@nvidia.com> | 2021-08-24 15:21:14 -0300 |
commit | 90e7a6de62781c27d6a111fccfb19b807f9b6887 (patch) | |
tree | 3d6375338bcc2032b928772f59bbbd3b410389f3 /drivers/gpu/drm/drm_prime.c | |
parent | 7c60610d476766e128cc4284bb6349732cbd6606 (diff) | |
download | linux-90e7a6de62781c27d6a111fccfb19b807f9b6887.tar.gz linux-90e7a6de62781c27d6a111fccfb19b807f9b6887.tar.bz2 linux-90e7a6de62781c27d6a111fccfb19b807f9b6887.zip |
lib/scatterlist: Provide a dedicated function to support table append
RDMA is the only in-kernel user that uses __sg_alloc_table_from_pages to
append pages dynamically. In the next patch. That mode will be extended
and that function will get more parameters. So separate it into a unique
function to make such change more clear.
Link: https://lore.kernel.org/r/20210824142531.3877007-2-maorg@nvidia.com
Signed-off-by: Maor Gottlieb <maorg@nvidia.com>
Signed-off-by: Leon Romanovsky <leonro@nvidia.com>
Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
Diffstat (limited to 'drivers/gpu/drm/drm_prime.c')
-rw-r--r-- | drivers/gpu/drm/drm_prime.c | 13 |
1 files changed, 6 insertions, 7 deletions
diff --git a/drivers/gpu/drm/drm_prime.c b/drivers/gpu/drm/drm_prime.c index 2a54f86856af..cf3278041f9c 100644 --- a/drivers/gpu/drm/drm_prime.c +++ b/drivers/gpu/drm/drm_prime.c @@ -807,8 +807,8 @@ struct sg_table *drm_prime_pages_to_sg(struct drm_device *dev, struct page **pages, unsigned int nr_pages) { struct sg_table *sg; - struct scatterlist *sge; size_t max_segment = 0; + int err; sg = kmalloc(sizeof(struct sg_table), GFP_KERNEL); if (!sg) @@ -818,13 +818,12 @@ struct sg_table *drm_prime_pages_to_sg(struct drm_device *dev, max_segment = dma_max_mapping_size(dev->dev); if (max_segment == 0) max_segment = UINT_MAX; - sge = __sg_alloc_table_from_pages(sg, pages, nr_pages, 0, - nr_pages << PAGE_SHIFT, - max_segment, - NULL, 0, GFP_KERNEL); - if (IS_ERR(sge)) { + err = sg_alloc_table_from_pages_segment(sg, pages, nr_pages, 0, + nr_pages << PAGE_SHIFT, + max_segment, GFP_KERNEL); + if (err) { kfree(sg); - sg = ERR_CAST(sge); + sg = ERR_PTR(err); } return sg; } |