diff options
78 files changed, 160 insertions, 116 deletions
diff --git a/arch/powerpc/kernel/rtasd.c b/arch/powerpc/kernel/rtasd.c index f915db93cd42..44d66c33d59d 100644 --- a/arch/powerpc/kernel/rtasd.c +++ b/arch/powerpc/kernel/rtasd.c @@ -559,7 +559,8 @@ static int __init rtas_event_scan_init(void) rtas_error_log_max = rtas_get_error_log_max(); rtas_error_log_buffer_max = rtas_error_log_max + sizeof(int); - rtas_log_buf = vmalloc(rtas_error_log_buffer_max*LOG_NUMBER); + rtas_log_buf = vmalloc(array_size(LOG_NUMBER, + rtas_error_log_buffer_max)); if (!rtas_log_buf) { printk(KERN_ERR "rtasd: no memory\n"); return -ENOMEM; diff --git a/arch/powerpc/kvm/book3s_64_mmu_hv.c b/arch/powerpc/kvm/book3s_64_mmu_hv.c index a670fa5fbe50..1b3fcafc685e 100644 --- a/arch/powerpc/kvm/book3s_64_mmu_hv.c +++ b/arch/powerpc/kvm/book3s_64_mmu_hv.c @@ -108,7 +108,7 @@ int kvmppc_allocate_hpt(struct kvm_hpt_info *info, u32 order) npte = 1ul << (order - 4); /* Allocate reverse map array */ - rev = vmalloc(sizeof(struct revmap_entry) * npte); + rev = vmalloc(array_size(npte, sizeof(struct revmap_entry))); if (!rev) { if (cma) kvm_free_hpt_cma(page, 1 << (order - PAGE_SHIFT)); diff --git a/arch/s390/hypfs/hypfs_diag.c b/arch/s390/hypfs/hypfs_diag.c index be8cc53204b5..a2945b289a29 100644 --- a/arch/s390/hypfs/hypfs_diag.c +++ b/arch/s390/hypfs/hypfs_diag.c @@ -239,7 +239,7 @@ static void *page_align_ptr(void *ptr) static void *diag204_alloc_vbuf(int pages) { /* The buffer has to be page aligned! */ - diag204_buf_vmalloc = vmalloc(PAGE_SIZE * (pages + 1)); + diag204_buf_vmalloc = vmalloc(array_size(PAGE_SIZE, (pages + 1))); if (!diag204_buf_vmalloc) return ERR_PTR(-ENOMEM); diag204_buf = page_align_ptr(diag204_buf_vmalloc); diff --git a/arch/s390/kernel/module.c b/arch/s390/kernel/module.c index 0dc8ac8548ee..d298d3cb46d0 100644 --- a/arch/s390/kernel/module.c +++ b/arch/s390/kernel/module.c @@ -123,8 +123,8 @@ int module_frob_arch_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs, /* Allocate one syminfo structure per symbol. */ me->arch.nsyms = symtab->sh_size / sizeof(Elf_Sym); - me->arch.syminfo = vmalloc(me->arch.nsyms * - sizeof(struct mod_arch_syminfo)); + me->arch.syminfo = vmalloc(array_size(sizeof(struct mod_arch_syminfo), + me->arch.nsyms)); if (!me->arch.syminfo) return -ENOMEM; symbols = (void *) hdr + symtab->sh_offset; diff --git a/arch/s390/kernel/sthyi.c b/arch/s390/kernel/sthyi.c index 80b862e9c53c..0859cde36f75 100644 --- a/arch/s390/kernel/sthyi.c +++ b/arch/s390/kernel/sthyi.c @@ -315,7 +315,7 @@ static void fill_diag(struct sthyi_sctns *sctns) if (pages <= 0) return; - diag204_buf = vmalloc(PAGE_SIZE * pages); + diag204_buf = vmalloc(array_size(pages, PAGE_SIZE)); if (!diag204_buf) return; diff --git a/arch/s390/kvm/gaccess.c b/arch/s390/kvm/gaccess.c index 8e2b8647ee12..07d30ffcfa41 100644 --- a/arch/s390/kvm/gaccess.c +++ b/arch/s390/kvm/gaccess.c @@ -847,7 +847,7 @@ int access_guest(struct kvm_vcpu *vcpu, unsigned long ga, u8 ar, void *data, nr_pages = (((ga & ~PAGE_MASK) + len - 1) >> PAGE_SHIFT) + 1; pages = pages_array; if (nr_pages > ARRAY_SIZE(pages_array)) - pages = vmalloc(nr_pages * sizeof(unsigned long)); + pages = vmalloc(array_size(nr_pages, sizeof(unsigned long))); if (!pages) return -ENOMEM; need_ipte_lock = psw_bits(*psw).dat && !asce.r; diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c index 64c986243018..3f6625c64341 100644 --- a/arch/s390/kvm/kvm-s390.c +++ b/arch/s390/kvm/kvm-s390.c @@ -1725,7 +1725,7 @@ static int kvm_s390_set_cmma_bits(struct kvm *kvm, if (args->count == 0) return 0; - bits = vmalloc(sizeof(*bits) * args->count); + bits = vmalloc(array_size(sizeof(*bits), args->count)); if (!bits) return -ENOMEM; diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c index f4f30d0c25c4..66fc27b92c59 100644 --- a/arch/x86/kvm/cpuid.c +++ b/arch/x86/kvm/cpuid.c @@ -203,8 +203,9 @@ int kvm_vcpu_ioctl_set_cpuid(struct kvm_vcpu *vcpu, goto out; r = -ENOMEM; if (cpuid->nent) { - cpuid_entries = vmalloc(sizeof(struct kvm_cpuid_entry) * - cpuid->nent); + cpuid_entries = + vmalloc(array_size(sizeof(struct kvm_cpuid_entry), + cpuid->nent)); if (!cpuid_entries) goto out; r = -EFAULT; diff --git a/drivers/base/firmware_loader/fallback.c b/drivers/base/firmware_loader/fallback.c index b676a99c469c..7f732744f0d3 100644 --- a/drivers/base/firmware_loader/fallback.c +++ b/drivers/base/firmware_loader/fallback.c @@ -403,7 +403,7 @@ static int fw_realloc_pages(struct fw_sysfs *fw_sysfs, int min_size) fw_priv->page_array_size * 2); struct page **new_pages; - new_pages = vmalloc(new_array_size * sizeof(void *)); + new_pages = vmalloc(array_size(new_array_size, sizeof(void *))); if (!new_pages) { fw_load_abort(fw_sysfs); return -ENOMEM; diff --git a/drivers/dma/ipu/ipu_idmac.c b/drivers/dma/ipu/ipu_idmac.c index ed76044ce4b9..bbff52be4f0f 100644 --- a/drivers/dma/ipu/ipu_idmac.c +++ b/drivers/dma/ipu/ipu_idmac.c @@ -910,7 +910,8 @@ out: /* Called with ichan->chan_mutex held */ static int idmac_desc_alloc(struct idmac_channel *ichan, int n) { - struct idmac_tx_desc *desc = vmalloc(n * sizeof(struct idmac_tx_desc)); + struct idmac_tx_desc *desc = + vmalloc(array_size(n, sizeof(struct idmac_tx_desc))); struct idmac *idmac = to_idmac(ichan->dma_chan.device); if (!desc) diff --git a/drivers/gpu/drm/drm_memory.c b/drivers/gpu/drm/drm_memory.c index 3c54044214db..d69e4fc1ee77 100644 --- a/drivers/gpu/drm/drm_memory.c +++ b/drivers/gpu/drm/drm_memory.c @@ -80,7 +80,7 @@ static void *agp_remap(unsigned long offset, unsigned long size, * page-table instead (that's probably faster anyhow...). */ /* note: use vmalloc() because num_pages could be large... */ - page_map = vmalloc(num_pages * sizeof(struct page *)); + page_map = vmalloc(array_size(num_pages, sizeof(struct page *))); if (!page_map) return NULL; diff --git a/drivers/gpu/drm/nouveau/nv84_fence.c b/drivers/gpu/drm/nouveau/nv84_fence.c index 090664899247..e721bb2163a0 100644 --- a/drivers/gpu/drm/nouveau/nv84_fence.c +++ b/drivers/gpu/drm/nouveau/nv84_fence.c @@ -141,7 +141,7 @@ nv84_fence_suspend(struct nouveau_drm *drm) struct nv84_fence_priv *priv = drm->fence; int i; - priv->suspend = vmalloc(drm->chan.nr * sizeof(u32)); + priv->suspend = vmalloc(array_size(sizeof(u32), drm->chan.nr)); if (priv->suspend) { for (i = 0; i < drm->chan.nr; i++) priv->suspend[i] = nouveau_bo_rd32(priv->bo, i*4); diff --git a/drivers/gpu/drm/qxl/qxl_fb.c b/drivers/gpu/drm/qxl/qxl_fb.c index 9a6752606079..ca465c0d49fa 100644 --- a/drivers/gpu/drm/qxl/qxl_fb.c +++ b/drivers/gpu/drm/qxl/qxl_fb.c @@ -241,7 +241,7 @@ static int qxlfb_create(struct qxl_fbdev *qfbdev, DRM_DEBUG_DRIVER("%dx%d %d\n", mode_cmd.width, mode_cmd.height, mode_cmd.pitches[0]); - shadow = vmalloc(mode_cmd.pitches[0] * mode_cmd.height); + shadow = vmalloc(array_size(mode_cmd.pitches[0], mode_cmd.height)); /* TODO: what's the usual response to memory allocation errors? */ BUG_ON(!shadow); DRM_DEBUG_DRIVER("surface0 at gpu offset %lld, mmap_offset %lld (virt %p, shadow %p)\n", diff --git a/drivers/gpu/drm/radeon/radeon_gart.c b/drivers/gpu/drm/radeon/radeon_gart.c index 0b3ec35515f3..66149eaba78c 100644 --- a/drivers/gpu/drm/radeon/radeon_gart.c +++ b/drivers/gpu/drm/radeon/radeon_gart.c @@ -352,8 +352,8 @@ int radeon_gart_init(struct radeon_device *rdev) radeon_gart_fini(rdev); return -ENOMEM; } - rdev->gart.pages_entry = vmalloc(sizeof(uint64_t) * - rdev->gart.num_gpu_pages); + rdev->gart.pages_entry = vmalloc(array_size(sizeof(uint64_t), + rdev->gart.num_gpu_pages)); if (rdev->gart.pages_entry == NULL) { radeon_gart_fini(rdev); return -ENOMEM; diff --git a/drivers/gpu/drm/selftests/test-drm_mm.c b/drivers/gpu/drm/selftests/test-drm_mm.c index ab6c6c9c5b5c..7027a6739845 100644 --- a/drivers/gpu/drm/selftests/test-drm_mm.c +++ b/drivers/gpu/drm/selftests/test-drm_mm.c @@ -579,7 +579,7 @@ static int __igt_insert(unsigned int count, u64 size, bool replace) DRM_MM_BUG_ON(!size); ret = -ENOMEM; - nodes = vmalloc(count * sizeof(*nodes)); + nodes = vmalloc(array_size(count, sizeof(*nodes))); if (!nodes) goto err; diff --git a/drivers/iommu/tegra-gart.c b/drivers/iommu/tegra-gart.c index 89ec24c6952c..a004f6da35f2 100644 --- a/drivers/iommu/tegra-gart.c +++ b/drivers/iommu/tegra-gart.c @@ -465,7 +465,7 @@ static int tegra_gart_probe(struct platform_device *pdev) gart->iovmm_base = (dma_addr_t)res_remap->start; gart->page_count = (resource_size(res_remap) >> GART_PAGE_SHIFT); - gart->savedata = vmalloc(sizeof(u32) * gart->page_count); + gart->savedata = vmalloc(array_size(sizeof(u32), gart->page_count)); if (!gart->savedata) { dev_err(dev, "failed to allocate context save area\n"); return -ENOMEM; diff --git a/drivers/isdn/i4l/isdn_bsdcomp.c b/drivers/isdn/i4l/isdn_bsdcomp.c index 99012c047751..7f28b967ed19 100644 --- a/drivers/isdn/i4l/isdn_bsdcomp.c +++ b/drivers/isdn/i4l/isdn_bsdcomp.c @@ -340,7 +340,7 @@ static void *bsd_alloc(struct isdn_ppp_comp_data *data) * Allocate space for the dictionary. This may be more than one page in * length. */ - db->dict = vmalloc(hsize * sizeof(struct bsd_dict)); + db->dict = vmalloc(array_size(hsize, sizeof(struct bsd_dict))); if (!db->dict) { bsd_free(db); return NULL; @@ -353,7 +353,8 @@ static void *bsd_alloc(struct isdn_ppp_comp_data *data) if (!decomp) db->lens = NULL; else { - db->lens = vmalloc((maxmaxcode + 1) * sizeof(db->lens[0])); + db->lens = vmalloc(array_size(sizeof(db->lens[0]), + maxmaxcode + 1)); if (!db->lens) { bsd_free(db); return (NULL); diff --git a/drivers/lightnvm/pblk-gc.c b/drivers/lightnvm/pblk-gc.c index 6a4883e40cc0..080469d90b40 100644 --- a/drivers/lightnvm/pblk-gc.c +++ b/drivers/lightnvm/pblk-gc.c @@ -88,7 +88,7 @@ static void pblk_gc_line_ws(struct work_struct *work) up(&gc->gc_sem); - gc_rq->data = vmalloc(gc_rq->nr_secs * geo->csecs); + gc_rq->data = vmalloc(array_size(gc_rq->nr_secs, geo->csecs)); if (!gc_rq->data) { pr_err("pblk: could not GC line:%d (%d/%d)\n", line->id, *line->vsc, gc_rq->nr_secs); diff --git a/drivers/md/bcache/sysfs.c b/drivers/md/bcache/sysfs.c index 8ccbc8f3b3af..225b15aa0340 100644 --- a/drivers/md/bcache/sysfs.c +++ b/drivers/md/bcache/sysfs.c @@ -881,7 +881,8 @@ SHOW(__bch_cache) uint16_t q[31], *p, *cached; ssize_t ret; - cached = p = vmalloc(ca->sb.nbuckets * sizeof(uint16_t)); + cached = p = vmalloc(array_size(sizeof(uint16_t), + ca->sb.nbuckets)); if (!p) return -ENOMEM; diff --git a/drivers/md/dm-cache-policy-smq.c b/drivers/md/dm-cache-policy-smq.c index 4ab23d0075f6..4d69b6f4129e 100644 --- a/drivers/md/dm-cache-policy-smq.c +++ b/drivers/md/dm-cache-policy-smq.c @@ -588,7 +588,7 @@ static int h_init(struct smq_hash_table *ht, struct entry_space *es, unsigned nr nr_buckets = roundup_pow_of_two(max(nr_entries / 4u, 16u)); ht->hash_bits = __ffs(nr_buckets); - ht->buckets = vmalloc(sizeof(*ht->buckets) * nr_buckets); + ht->buckets = vmalloc(array_size(nr_buckets, sizeof(*ht->buckets))); if (!ht->buckets) return -ENOMEM; diff --git a/drivers/md/dm-region-hash.c b/drivers/md/dm-region-hash.c index abf3521b80a8..bc7795095dd9 100644 --- a/drivers/md/dm-region-hash.c +++ b/drivers/md/dm-region-hash.c @@ -202,7 +202,7 @@ struct dm_region_hash *dm_region_hash_create( rh->shift = RH_HASH_SHIFT; rh->prime = RH_HASH_MULT; - rh->buckets = vmalloc(nr_buckets * sizeof(*rh->buckets)); + rh->buckets = vmalloc(array_size(nr_buckets, sizeof(*rh->buckets))); if (!rh->buckets) { DMERR("unable to allocate region hash bucket memory"); kfree(rh); diff --git a/drivers/md/dm-switch.c b/drivers/md/dm-switch.c index 7924a6a33ddc..fae35caf3672 100644 --- a/drivers/md/dm-switch.c +++ b/drivers/md/dm-switch.c @@ -114,7 +114,8 @@ static int alloc_region_table(struct dm_target *ti, unsigned nr_paths) return -EINVAL; } - sctx->region_table = vmalloc(nr_slots * sizeof(region_table_slot_t)); + sctx->region_table = vmalloc(array_size(nr_slots, + sizeof(region_table_slot_t))); if (!sctx->region_table) { ti->error = "Cannot allocate region table"; return -ENOMEM; diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c index 5772756c63c1..a91332557bc8 100644 --- a/drivers/md/dm-thin.c +++ b/ |
