diff options
| author | Kent Russell <kent.russell@amd.com> | 2017-08-15 23:00:05 -0400 |
|---|---|---|
| committer | Oded Gabbay <oded.gabbay@gmail.com> | 2017-08-15 23:00:05 -0400 |
| commit | 79775b627dc49df06880a32b4340674554c669b9 (patch) | |
| tree | 535ec97742fc0fabddacfa59438cff42a1d834fe /drivers/gpu | |
| parent | 8eabaf54cfb34d185b7c9684bc891397d757d15e (diff) | |
| download | linux-79775b627dc49df06880a32b4340674554c669b9.tar.gz linux-79775b627dc49df06880a32b4340674554c669b9.tar.bz2 linux-79775b627dc49df06880a32b4340674554c669b9.zip | |
drm/amdkfd: Consolidate and clean up log commands
Consolidate log commands so that dev_info(NULL, "Error...") uses the more
accurate pr_err, remove the module name from the log (can be seen via
dynamic debugging with +m), and the function name (can be seen via
dynamic debugging with +f). We also don't need debug messages saying
what function we're in. Those can be added by devs when needed
Don't print vendor and device ID in error messages. They are typically
the same for all GPUs in a multi-GPU system. So this doesn't add any
value to the message.
Lastly, remove parentheses around %d, %i and 0x%llX.
According to kernel.org:
"Printing numbers in parentheses (%d) adds no value and should be
avoided."
Signed-off-by: Kent Russell <kent.russell@amd.com>
Signed-off-by: Yong Zhao <Yong.Zhao@amd.com>
Signed-off-by: Felix Kuehling <Felix.Kuehling@amd.com>
Reviewed-by: Oded Gabbay <oded.gabbay@gmail.com>
Signed-off-by: Oded Gabbay <oded.gabbay@gmail.com>
Diffstat (limited to 'drivers/gpu')
| -rw-r--r-- | drivers/gpu/drm/amd/amdkfd/kfd_chardev.c | 64 | ||||
| -rw-r--r-- | drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.c | 38 | ||||
| -rw-r--r-- | drivers/gpu/drm/amd/amdkfd/kfd_dbgmgr.c | 4 | ||||
| -rw-r--r-- | drivers/gpu/drm/amd/amdkfd/kfd_device.c | 51 | ||||
| -rw-r--r-- | drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c | 81 | ||||
| -rw-r--r-- | drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_cik.c | 2 | ||||
| -rw-r--r-- | drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_vi.c | 2 | ||||
| -rw-r--r-- | drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c | 21 | ||||
| -rw-r--r-- | drivers/gpu/drm/amd/amdkfd/kfd_events.c | 22 | ||||
| -rw-r--r-- | drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c | 16 | ||||
| -rw-r--r-- | drivers/gpu/drm/amd/amdkfd/kfd_module.c | 4 | ||||
| -rw-r--r-- | drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c | 10 | ||||
| -rw-r--r-- | drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c | 8 | ||||
| -rw-r--r-- | drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c | 34 | ||||
| -rw-r--r-- | drivers/gpu/drm/amd/amdkfd/kfd_process.c | 4 | ||||
| -rw-r--r-- | drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c | 27 | ||||
| -rw-r--r-- | drivers/gpu/drm/amd/amdkfd/kfd_topology.c | 6 |
17 files changed, 158 insertions, 236 deletions
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c index 2603b7ce50a2..6763972b03df 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c @@ -142,12 +142,12 @@ static int set_queue_properties_from_user(struct queue_properties *q_properties, struct kfd_ioctl_create_queue_args *args) { if (args->queue_percentage > KFD_MAX_QUEUE_PERCENTAGE) { - pr_err("kfd: queue percentage must be between 0 to KFD_MAX_QUEUE_PERCENTAGE\n"); + pr_err("Queue percentage must be between 0 to KFD_MAX_QUEUE_PERCENTAGE\n"); return -EINVAL; } if (args->queue_priority > KFD_MAX_QUEUE_PRIORITY) { - pr_err("kfd: queue priority must be between 0 to KFD_MAX_QUEUE_PRIORITY\n"); + pr_err("Queue priority must be between 0 to KFD_MAX_QUEUE_PRIORITY\n"); return -EINVAL; } @@ -155,26 +155,26 @@ static int set_queue_properties_from_user(struct queue_properties *q_properties, (!access_ok(VERIFY_WRITE, (const void __user *) args->ring_base_address, sizeof(uint64_t)))) { - pr_err("kfd: can't access ring base address\n"); + pr_err("Can't access ring base address\n"); return -EFAULT; } if (!is_power_of_2(args->ring_size) && (args->ring_size != 0)) { - pr_err("kfd: ring size must be a power of 2 or 0\n"); + pr_err("Ring size must be a power of 2 or 0\n"); return -EINVAL; } if (!access_ok(VERIFY_WRITE, (const void __user *) args->read_pointer_address, sizeof(uint32_t))) { - pr_err("kfd: can't access read pointer\n"); + pr_err("Can't access read pointer\n"); return -EFAULT; } if (!access_ok(VERIFY_WRITE, (const void __user *) args->write_pointer_address, sizeof(uint32_t))) { - pr_err("kfd: can't access write pointer\n"); + pr_err("Can't access write pointer\n"); return -EFAULT; } @@ -182,7 +182,7 @@ static int set_queue_properties_from_user(struct queue_properties *q_properties, !access_ok(VERIFY_WRITE, (const void __user *) args->eop_buffer_address, sizeof(uint32_t))) { - pr_debug("kfd: can't access eop buffer"); + pr_debug("Can't access eop buffer"); return -EFAULT; } @@ -190,7 +190,7 @@ static int set_queue_properties_from_user(struct queue_properties *q_properties, !access_ok(VERIFY_WRITE, (const void __user *) args->ctx_save_restore_address, sizeof(uint32_t))) { - pr_debug("kfd: can't access ctx save restore buffer"); + pr_debug("Can't access ctx save restore buffer"); return -EFAULT; } @@ -219,27 +219,27 @@ static int set_queue_properties_from_user(struct queue_properties *q_properties, else q_properties->format = KFD_QUEUE_FORMAT_PM4; - pr_debug("Queue Percentage (%d, %d)\n", + pr_debug("Queue Percentage: %d, %d\n", q_properties->queue_percent, args->queue_percentage); - pr_debug("Queue Priority (%d, %d)\n", + pr_debug("Queue Priority: %d, %d\n", q_properties->priority, args->queue_priority); - pr_debug("Queue Address (0x%llX, 0x%llX)\n", + pr_debug("Queue Address: 0x%llX, 0x%llX\n", q_properties->queue_address, args->ring_base_address); - pr_debug("Queue Size (0x%llX, %u)\n", + pr_debug("Queue Size: 0x%llX, %u\n", q_properties->queue_size, args->ring_size); - pr_debug("Queue r/w Pointers (0x%llX, 0x%llX)\n", - (uint64_t) q_properties->read_ptr, - (uint64_t) q_properties->write_ptr); + pr_debug("Queue r/w Pointers: %p, %p\n", + q_properties->read_ptr, + q_properties->write_ptr); - pr_debug("Queue Format (%d)\n", q_properties->format); + pr_debug("Queue Format: %d\n", q_properties->format); - pr_debug("Queue EOP (0x%llX)\n", q_properties->eop_ring_buffer_address); + pr_debug("Queue EOP: 0x%llX\n", q_properties->eop_ring_buffer_address); - pr_debug("Queue CTX save arex (0x%llX)\n", + pr_debug("Queue CTX save area: 0x%llX\n", q_properties->ctx_save_restore_area_address); return 0; @@ -257,16 +257,16 @@ static int kfd_ioctl_create_queue(struct file *filep, struct kfd_process *p, memset(&q_properties, 0, sizeof(struct queue_properties)); - pr_debug("kfd: creating queue ioctl\n"); + pr_debug("Creating queue ioctl\n"); err = set_queue_properties_from_user(&q_properties, args); if (err) return err; - pr_debug("kfd: looking for gpu id 0x%x\n", args->gpu_id); + pr_debug("Looking for gpu id 0x%x\n", args->gpu_id); dev = kfd_device_by_id(args->gpu_id); if (dev == NULL) { - pr_debug("kfd: gpu id 0x%x was not found\n", args->gpu_id); + pr_debug("Could not find gpu id 0x%x\n", args->gpu_id); return -EINVAL; } @@ -278,7 +278,7 @@ static int kfd_ioctl_create_queue(struct file *filep, struct kfd_process *p, goto err_bind_process; } - pr_debug("kfd: creating queue for PASID %d on GPU 0x%x\n", + pr_debug("Creating queue for PASID %d on gpu 0x%x\n", p->pasid, dev->id); @@ -296,15 +296,15 @@ static int kfd_ioctl_create_queue(struct file *filep, struct kfd_process *p, mutex_unlock(&p->mutex); - pr_debug("kfd: queue id %d was created successfully\n", args->queue_id); + pr_debug("Queue id %d was created successfully\n", args->queue_id); - pr_debug("ring buffer address == 0x%016llX\n", + pr_debug("Ring buffer address == 0x%016llX\n", args->ring_base_address); - pr_debug("read ptr address == 0x%016llX\n", + pr_debug("Read ptr address == 0x%016llX\n", args->read_pointer_address); - pr_debug("write ptr address == 0x%016llX\n", + pr_debug("Write ptr address == 0x%016llX\n", args->write_pointer_address); return 0; @@ -321,7 +321,7 @@ static int kfd_ioctl_destroy_queue(struct file *filp, struct kfd_process *p, int retval; struct kfd_ioctl_destroy_queue_args *args = data; - pr_debug("kfd: destroying queue id %d for PASID %d\n", + pr_debug("Destroying queue id %d for pasid %d\n", args->queue_id, p->pasid); @@ -341,12 +341,12 @@ static int kfd_ioctl_update_queue(struct file *filp, struct kfd_process *p, struct queue_properties properties; if (args->queue_percentage > KFD_MAX_QUEUE_PERCENTAGE) { - pr_err("kfd: queue percentage must be between 0 to KFD_MAX_QUEUE_PERCENTAGE\n"); + pr_err("Queue percentage must be between 0 to KFD_MAX_QUEUE_PERCENTAGE\n"); return -EINVAL; } if (args->queue_priority > KFD_MAX_QUEUE_PRIORITY) { - pr_err("kfd: queue priority must be between 0 to KFD_MAX_QUEUE_PRIORITY\n"); + pr_err("Queue priority must be between 0 to KFD_MAX_QUEUE_PRIORITY\n"); return -EINVAL; } @@ -354,12 +354,12 @@ static int kfd_ioctl_update_queue(struct file *filp, struct kfd_process *p, (!access_ok(VERIFY_WRITE, (const void __user *) args->ring_base_address, sizeof(uint64_t)))) { - pr_err("kfd: can't access ring base address\n"); + pr_err("Can't access ring base address\n"); return -EFAULT; } if (!is_power_of_2(args->ring_size) && (args->ring_size != 0)) { - pr_err("kfd: ring size must be a power of 2 or 0\n"); + pr_err("Ring size must be a power of 2 or 0\n"); return -EINVAL; } @@ -368,7 +368,7 @@ static int kfd_ioctl_update_queue(struct file *filp, struct kfd_process *p, properties.queue_percent = args->queue_percentage; properties.priority = args->queue_priority; - pr_debug("kfd: updating queue id %d for PASID %d\n", + pr_debug("Updating queue id %d for pasid %d\n", args->queue_id, p->pasid); mutex_lock(&p->mutex); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.c b/drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.c index a7548a55c046..bf8ee196dd13 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.c @@ -78,7 +78,7 @@ static int dbgdev_diq_submit_ib(struct kfd_dbgdev *dbgdev, pq_packets_size_in_bytes / sizeof(uint32_t), &ib_packet_buff); if (status != 0) { - pr_err("amdkfd: acquire_packet_buffer failed\n"); + pr_err("acquire_packet_buffer failed\n"); return status; } @@ -116,7 +116,7 @@ static int dbgdev_diq_submit_ib(struct kfd_dbgdev *dbgdev, &mem_obj); if (status != 0) { - pr_err("amdkfd: Failed to allocate GART memory\n"); + pr_err("Failed to allocate GART memory\n"); kq->ops.rollback_packet(kq); return status; } @@ -194,7 +194,7 @@ static int dbgdev_register_diq(struct kfd_dbgdev *dbgdev) &qid); if (status) { - pr_err("amdkfd: Failed to create DIQ\n"); + pr_err("Failed to create DIQ\n"); return status; } @@ -203,7 +203,7 @@ static int dbgdev_register_diq(struct kfd_dbgdev *dbgdev) kq = pqm_get_kernel_queue(dbgdev->pqm, qid); if (kq == NULL) { - pr_err("amdkfd: Error getting DIQ\n"); + pr_err("Error getting DIQ\n"); pqm_destroy_queue(dbgdev->pqm, qid); return -EFAULT; } @@ -279,7 +279,7 @@ static void dbgdev_address_watch_set_registers( } static int dbgdev_address_watch_nodiq(struct kfd_dbgdev *dbgdev, - struct dbg_address_watch_info *adw_info) + struct dbg_address_watch_info *adw_info) { union TCP_WATCH_ADDR_H_BITS addrHi; union TCP_WATCH_ADDR_L_BITS addrLo; @@ -293,7 +293,7 @@ static int dbgdev_address_watch_nodiq(struct kfd_dbgdev *dbgdev, pdd = kfd_get_process_device_data(dbgdev->dev, adw_info->process); if (!pdd) { - pr_err("amdkfd: Failed to get pdd for wave control no DIQ\n"); + pr_err("Failed to get pdd for wave control no DIQ\n"); return -EFAULT; } @@ -303,13 +303,13 @@ static int dbgdev_address_watch_nodiq(struct kfd_dbgdev *dbgdev, if ((adw_info->num_watch_points > MAX_WATCH_ADDRESSES) || (adw_info->num_watch_points == 0)) { - pr_err("amdkfd: num_watch_points is invalid\n"); + pr_err("num_watch_points is invalid\n"); return -EINVAL; } if ((adw_info->watch_mode == NULL) || (adw_info->watch_address == NULL)) { - pr_err("amdkfd: adw_info fields are not valid\n"); + pr_err("adw_info fields are not valid\n"); return -EINVAL; } @@ -348,7 +348,7 @@ static int dbgdev_address_watch_nodiq(struct kfd_dbgdev *dbgdev, } static int dbgdev_address_watch_diq(struct kfd_dbgdev *dbgdev, - struct dbg_address_watch_info *adw_info) + struct dbg_address_watch_info *adw_info) { struct pm4__set_config_reg *packets_vec; union TCP_WATCH_ADDR_H_BITS addrHi; @@ -371,20 +371,20 @@ static int dbgdev_address_watch_diq(struct kfd_dbgdev *dbgdev, if ((adw_info->num_watch_points > MAX_WATCH_ADDRESSES) || (adw_info->num_watch_points == 0)) { - pr_err("amdkfd: num_watch_points is invalid\n"); + pr_err("num_watch_points is invalid\n"); return -EINVAL; } if ((NULL == adw_info->watch_mode) || (NULL == adw_info->watch_address)) { - pr_err("amdkfd: adw_info fields are not valid\n"); + pr_err("adw_info fields are not valid\n"); return -EINVAL; } status = kfd_gtt_sa_allocate(dbgdev->dev, ib_size, &mem_obj); if (status != 0) { - pr_err("amdkfd: Failed to allocate GART memory\n"); + pr_err("Failed to allocate GART memory\n"); return status; } @@ -491,7 +491,7 @@ static int dbgdev_address_watch_diq(struct kfd_dbgdev *dbgdev, ib_size); if (status != 0) { - pr_err("amdkfd: Failed to submit IB to DIQ\n"); + pr_err("Failed to submit IB to DIQ\n"); break; } } @@ -619,7 +619,7 @@ static int dbgdev_wave_control_diq(struct kfd_dbgdev *dbgdev, status = dbgdev_wave_control_set_registers(wac_info, ®_sq_cmd, ®_gfx_index); if (status) { - pr_err("amdkfd: Failed to set wave control registers\n"); + pr_err("Failed to set wave control registers\n"); return status; } @@ -659,7 +659,7 @@ static int dbgdev_wave_control_diq(struct kfd_dbgdev *dbgdev, status = kfd_gtt_sa_allocate(dbgdev->dev, ib_size, &mem_obj); if (status != 0) { - pr_err("amdkfd: Failed to allocate GART memory\n"); + pr_err("Failed to allocate GART memory\n"); return status; } @@ -712,7 +712,7 @@ static int dbgdev_wave_control_diq(struct kfd_dbgdev *dbgdev, ib_size); if (status != 0) - pr_err("amdkfd: Failed to submit IB to DIQ\n"); + pr_err("Failed to submit IB to DIQ\n"); kfd_gtt_sa_free(dbgdev->dev, mem_obj); @@ -735,13 +735,13 @@ static int dbgdev_wave_control_nodiq(struct kfd_dbgdev *dbgdev, pdd = kfd_get_process_device_data(dbgdev->dev, wac_info->process); if (!pdd) { - pr_err("amdkfd: Failed to get pdd for wave control no DIQ\n"); + pr_err("Failed to get pdd for wave control no DIQ\n"); return -EFAULT; } status = dbgdev_wave_control_set_registers(wac_info, ®_sq_cmd, ®_gfx_index); if (status) { - pr_err("amdkfd: Failed to set wave control registers\n"); + pr_err("Failed to set wave control registers\n"); return status; } @@ -826,7 +826,7 @@ int dbgdev_wave_reset_wavefronts(struct kfd_dev *dev, struct kfd_process *p) } if (vmid > last_vmid_to_scan) { - pr_err("amdkfd: didn't found vmid for pasid (%d)\n", p->pasid); + pr_err("Didn't find vmid for pasid %d\n", p->pasid); return -EFAULT; } diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_dbgmgr.c b/drivers/gpu/drm/amd/amdkfd/kfd_dbgmgr.c index 56d676396342..7225789f9c21 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_dbgmgr.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_dbgmgr.c @@ -71,7 +71,7 @@ bool kfd_dbgmgr_create(struct kfd_dbgmgr **ppmgr, struct kfd_dev *pdev) new_buff = kfd_alloc_struct(new_buff); if (!new_buff) { - pr_err("amdkfd: Failed to allocate dbgmgr instance\n"); + pr_err("Failed to allocate dbgmgr instance\n"); return false; } @@ -79,7 +79,7 @@ bool kfd_dbgmgr_create(struct kfd_dbgmgr **ppmgr, struct kfd_dev *pdev) new_buff->dev = pdev; new_buff->dbgdev = kfd_alloc_struct(new_buff->dbgdev); if (!new_buff->dbgdev) { - pr_err("amdkfd: Failed to allocate dbgdev instance\n"); + pr_err("Failed to allocate dbgdev instance\n"); kfree(new_buff); return false; } diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c index 1f5032534944..87df8bfb9fec 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c @@ -152,7 +152,7 @@ static bool device_iommu_pasid_init(struct kfd_dev *kfd) } if ((iommu_info.flags & required_iommu_flags) != required_iommu_flags) { - dev_err(kfd_device, "error required iommu flags ats(%i), pri(%i), pasid(%i)\n", + dev_err(kfd_device, "error required iommu flags ats %i, pri %i, pasid %i\n", (iommu_info.flags & AMD_IOMMU_DEVICE_FLAG_ATS_SUP) != 0, (iommu_info.flags & AMD_IOMMU_DEVICE_FLAG_PRI_SUP) != 0, (iommu_info.flags & AMD_IOMMU_DEVICE_FLAG_PASID_SUP) @@ -248,42 +248,33 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd, if (kfd->kfd2kgd->init_gtt_mem_allocation( kfd->kgd, size, &kfd->gtt_mem, &kfd->gtt_start_gpu_addr, &kfd->gtt_start_cpu_ptr)){ - dev_err(kfd_device, - "Could not allocate %d bytes for device (%x:%x)\n", - size, kfd->pdev->vendor, kfd->pdev->device); + dev_err(kfd_device, "Could not allocate %d bytes\n", size); goto out; } - dev_info(kfd_device, - "Allocated %d bytes on gart for device(%x:%x)\n", - size, kfd->pdev->vendor, kfd->pdev->device); + dev_info(kfd_device, "Allocated %d bytes on gart\n", size); /* Initialize GTT sa with 512 byte chunk size */ if (kfd_gtt_sa_init(kfd, size, 512) != 0) { - dev_err(kfd_device, - "Error initializing gtt sub-allocator\n"); + dev_err(kfd_device, "Error initializing gtt sub-allocator\n"); goto kfd_gtt_sa_init_error; } kfd_doorbell_init(kfd); if (kfd_topology_add_device(kfd) != 0) { - dev_err(kfd_device, - "Error adding device (%x:%x) to topology\n", - kfd->pdev->vendor, kfd->pdev->device); + dev_err(kfd_device, "Error adding device to topology\n"); goto kfd_topology_add_device_error; } if (kfd_interrupt_init(kfd)) { - dev_err(kfd_device, - "Error initializing interrupts for device (%x:%x)\n", - kfd->pdev->vendor, kfd->pdev->device); + dev_err(kfd_device, "Error initializing interrupts\n"); goto kfd_interrupt_error; } if (!device_iommu_pasid_init(kfd)) { dev_err(kfd_device, - "Error initializing iommuv2 for device (%x:%x)\n", + "Error initializing iommuv2 for device %x:%x\n", kfd->pdev->vendor, kfd->pdev->device); goto device_iommu_pasid_error; } @@ -293,15 +284,13 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd, kfd->dqm = device_queue_manager_init(kfd); if (!kfd->dqm) { - dev_err(kfd_device, - "Error initializing queue manager for device (%x:%x)\n", - kfd->pdev->vendor, kfd->pdev->device); + dev_err(kfd_device, "Error initializing queue manager\n"); goto device_queue_manager_error; } if (kfd->dqm->ops.start(kfd->dqm) != 0) { dev_err(kfd_device, - "Error starting queuen manager for device (%x:%x)\n", + "Error starting queue manager for device %x:%x\n", kfd->pdev->vendor, kfd->pdev->device); goto dqm_start_error; } @@ -309,10 +298,10 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd, kfd->dbgmgr = NULL; kfd->init_complete = true; - dev_info(kfd_device, "added device (%x:%x)\n", kfd->pdev->vendor, + dev_info(kfd_device, "added device %x:%x\n", kfd->pdev->vendor, kfd->pdev->device); - pr_debug("kfd: Starting kfd with the following scheduling policy %d\n", + pr_debug("Starting kfd with the following scheduling policy %d\n", sched_policy); goto out; @@ -330,7 +319,7 @@ kfd_topology_add_device_error: kfd_gtt_sa_init_error: kfd->kfd2kgd->free_gtt_mem(kfd->kgd, kfd->gtt_mem); dev_err(kfd_device, - "device (%x:%x) NOT added due to errors\n", + "device %x:%x NOT added due to errors\n", kfd->pdev->vendor, kfd->pdev->device); out: return kfd->init_complete; @@ -422,7 +411,7 @@ static int kfd_gtt_sa_init(struct kfd_dev *kfd, unsigned int buf_size, if (!kfd->gtt_sa_bitmap) return -ENOMEM; - pr_debug("kfd: gtt_sa_num_of_chunks = %d, gtt_sa_bitmap = %p\n", + pr_debug("gtt_sa_num_of_chunks = %d, gtt_sa_bitmap = %p\n", kfd->gtt_sa_num_of_chunks, kfd->gtt_sa_bitmap); mutex_init(&kfd->gtt_sa_lock); @@ -468,7 +457,7 @@ int kfd_gtt_sa_allocate(struct kfd_dev *kfd, unsigned int size, if ((*mem_obj) == NULL) return -ENOMEM; - pr_debug("kfd: allocated mem_obj = %p for size = %d\n", *mem_obj, size); + pr_debug("Allocated mem_obj = %p for size = %d\n", *mem_obj, size); start_search = 0; @@ -480,7 +469,7 @@ kfd_gtt_restart_search: kfd->gtt_sa_num_of_chunks, start_search); - pr_debug("kfd: found = %d\n", found); + pr_debug("Found = %d\n", found); /* If there wasn't any free chunk, bail out */ if (found == kfd->gtt_sa_num_of_chunks) @@ -498,12 +487,12 @@ kfd_gtt_restart_search: found, kfd->gtt_sa_chunk_size); - pr_debug("kfd: gpu_addr = %p, cpu_addr = %p\n", + pr_debug("gpu_addr = %p, cpu_addr = %p\n", (uint64_t *) (*mem_obj)->gpu_addr, (*mem_obj)->cpu_ptr); /* If we need only one chunk, mark it as allocated and get out */ if (size <= kfd->gtt_sa_chunk_size) { - pr_debug("kfd: single bit\n"); + pr_debug("Single bit\n"); set_bit(found, kfd->gtt_sa_bitmap); goto kfd_gtt_out; } @@ -538,7 +527,7 @@ kfd_gtt_restart_search: } while (cur_size > 0); - pr_debug("kfd: range_start = %d, range_end = %d\n", + pr_debug("range_start = %d, range_end = %d\n", (*mem_obj)->range_start, (*mem_obj)->range_end); /* Mark the chunks as allocated */ @@ -552,7 +541,7 @@ kfd_gtt_out: return 0; kfd_gtt_no_free_chunk: - pr_debug("kfd: allocation failed with mem_obj = %p\n", mem_obj); + pr_debug("Allocation failed with mem_obj = %p\n", mem_obj); mutex_unlock(&kfd->gtt_sa_lock); kfree(mem_obj); return -ENOMEM; @@ -568,7 +557,7 @@ int kfd_gtt_sa_free(struct kfd_dev *kfd, struct kfd_mem_obj *mem_obj) if (!mem_obj) return 0; - pr_debug("kfd: free mem_obj = %p, range_start = %d, range_end = %d\n", + pr_debug("Free mem_obj = %p, range_start = %d, range_end = %d\n", mem_obj, mem_obj->range_start, mem_obj->range_end); mutex_lock(&kfd->gtt_sa_lock); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c index 3b850dab0e7d..8b147e424bf3 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c @@ -121,7 +121,7 @@ static int allocate_vmid(struct device_queue_manager *dqm, /* Kaveri kfd vmid's starts from vmid 8 */ allocated_vmid = bit + KFD_VMID_START_OFFSET; - pr_debug("kfd: vmid allocation %d\n", allocated_vmid); + pr_debug("vmid allocation %d\n", allocated_vmid); qpd->vmid = allocated_vmid; q->properties.vmid = allocated_vmid; @@ -154,13 +154,12 @@ static int create_queue_nocpsch(struct device_queue_manager *dqm, BUG_ON(!dqm || !q || !qpd || !allocated_vmid); - pr_debug("kfd: In func %s\n", __func__); print_queue(q); mutex_lock(&dqm->lock); if (dqm->total_queue_count >= max_num_of_queues_per_device) { - pr_warn("amdkfd: Can't create new usermode queue because %d queues were already created\n", + pr_warn("Can't create new usermode queue because %d queues were already created\n", dqm->total_queue_count); mutex_unlock(&dqm->lock); return -EPERM; @@ -240,8 +239,7 @@ static int allocate_hqd(struct device_queue_manager *dqm, struct queue *q) if (!set) return -EBUSY; - pr_debug("kfd: DQM %s hqd slot - pipe (%d) queue(%d)\n", - __func__, q->pipe, q->queue); + pr_debug("hqd slot - pipe %d, queue %d\n", q->pipe, q->queue); /* horizontal hqd allocation */ dqm->next_pipe_to_allocate = (pipe + 1) % get_pipes_per_mec(dqm); @@ -278,9 +276,8 @@ static int create_compute_queue_nocpsch(struct device_queue_manager *dqm, return retval; } - pr_debug("kfd: loading mqd to hqd on pipe (%d) queue (%d)\n", - q->pipe, - q->queue); + pr_debug("Loading mqd to hqd on pipe %d, queue %d\n", + q->pipe, q->queue); retval = mqd->load_mqd(mqd, q->mqd, q->pipe, q->queue, (uint32_t __user *) q->properties.write_ptr); @@ -304,8 +301,6 @@ static int destroy_queue_nocpsch(struct device_queue_manager *dqm, retval = 0; - pr_debug("kfd: In Func %s\n", __func__); - mutex_lock(&dqm->lock); if (q->properties.type == KFD_QUEUE_TYPE_COMPUTE) { @@ -324,7 +319,7 @@ static int destroy_queue_nocpsch(struct device_queue_manager *dqm, dqm->sdma_queue_count--; deallocate_sdma_queue(dqm, q->sdma_id); } else { - pr_debug("q->properties.type is invalid (%d)\n", + pr_debug("q->properties.type %d is invalid\n", q->properties.type); retval = -EINVAL; goto out; @@ -403,13 +398,13 @@ static struct mqd_manager *get_mqd_manager_nocpsch( BUG_ON(!dqm || type >= KFD_MQD_TYPE_MAX); - pr_debug("kfd: In func %s mqd type %d\n", __func__, type); + pr_debug("mqd type %d\n", type); mqd = dqm->mqds[type]; if (!mqd) { mqd = mqd_manager_init(type, dqm->dev); if (mqd == NULL) - pr_err("kfd: mqd manager is NULL"); + pr_err("mqd manager is NULL"); dqm->mqds[type] = mqd; } @@ -424,8 +419,6 @@ static int register_process_nocpsch(struct device_queue_manager *dqm, BUG_ON(!dqm || !qpd); - pr_debug("kfd: In func %s\n", __func__); - n = kzalloc(sizeof(struct device_process_node), GFP_KERNEL); if (!n) return -ENOMEM; @@ -452,8 +445,6 @@ static int unregister_process_nocpsch(struct device_queue_manager *dqm, BUG_ON(!dqm || !qpd); - pr_debug("In func %s\n", __func__); - pr_debug("qpd->queues_list is %s\n", list_empty(&qpd->queues_list) ? "empty" : "not empty"); @@ -501,25 +492,13 @@ static void init_interrupts(struct device_queue_manager *dqm) dqm->dev->kfd2kgd->init_interrupts(dqm->dev->kgd, i); } -static int init_scheduler(struct device_queue_manager *dqm) -{ - int retval = 0; - - BUG_ON(!dqm); - - pr_debug("kfd: In %s\n", __func__); - - return retval; -} - static int initialize_nocpsch(struct device_queue_manager *dqm) { int pipe, queue; BUG_ON(!dqm); - pr_debug("kfd: In func %s num of pipes: %d\n", - __func__, get_pipes_per_mec(dqm)); + pr_debug("num of pipes: %d\n", get_pipes_per_mec(dqm)); mutex_init(&dqm->lock); INIT_LIST_HEAD(&dqm->queues); @@ -544,7 +523,6 @@ static int initialize_nocpsch(struct device_queue_manager *dqm) dqm->vmid_bitmap = (1 << VMID_PER_DEVICE) - 1; dqm->sdma_bitmap = (1 << CIK_SDMA_QUEUES) - 1; - init_scheduler(dqm); return 0; } @@ -617,9 +595,9 @@ static int create_sdma_queue_nocpsch(struct device_queue_manager *dqm, q->properties.sdma_queue_id = q->sdma_id % CIK_SDMA_QUEUES_PER_ENGINE; q->properties.sdma_engine_id = q->sdma_id / CIK_SDMA_ENGINE_NUM; - pr_debug("kfd: sdma id is: %d\n", q->sdma_id); - pr_debug(" sdma queue id: %d\n", q->properties.sdma_queue_id); - pr_debug(" sdma engine id: %d\n", q->properties.sdma_engine_id); + pr_debug("SDMA id is: %d\n", q->sdma_id); + pr_debug("SDMA queue id: %d\n", q->properties.sdma_queue_id); + pr_debug("SDMA engine id: %d\n", q->properties.sdma_engine_id); dqm->ops_asic_specific.init_sdma_vm(dqm, q, qpd); retval = mqd->init_mqd(mqd, &q->mqd, &q->mqd_mem_obj, @@ -651,8 +629,6 @@ static int set_sched_resources(struct device_queue_manager *dqm) BUG_ON(!dqm); - pr_debug("kfd: In func %s\n", __func__); - res.vmid_mask = (1 << VMID_PER_DEVICE) - 1; res.vmid_mask <<= KFD_VMID_START_OFFSET; @@ -682,9 +658,9 @@ static int set_sched_resources(struct device_queue_manager *dqm) res.gws_mask = res.oac_mask = res.gds_heap_base = res.gds_heap_size = 0; - pr_debug("kfd: scheduling resources:\n" - " vmid mask: 0x%8X\n" - " queue mask: 0x%8llX\n", + pr_debug("Scheduling resources:\n" + "vmid mask: 0x%8X\n" + "queue mask: 0x%8llX\n", res.vmid_mask, res.queue_mask); return pm_send_set_resources(&dqm->packets, &res); @@ -696,8 +672,7 @@ static int initialize_cpsch(struct device_queue_manager *dqm) BUG_ON(!dqm); - pr_debug("kfd: In func %s num of pipes: %d\n", - __func__, get_pipes_per_mec(dqm)); + pr_debug("num of pipes: %d\n", get_pipes_per_mec(dqm)); mutex_init(&dqm->lock); INIT_LIST_HEAD(&dqm->queues); @@ -732,7 +707,7 @@ static int start_cpsch(struct device_queue_manager *dqm) if (retval != 0) goto fail_set_sched_resources; - pr_debug("kfd: allocating fence memory\n"); + pr_debug("Allocating fence memory\n"); /* allocate fence memory on the gart */ retval = kfd_gtt_sa_allocate(dqm->dev, sizeof(*dqm->fence_addr), @@ -786,11 +761,9 @@ static int create_kernel_queue_cpsch(struct device_queue_manager *dqm, { BUG_ON(!dqm || !kq || !qpd); - pr_debug("kfd: In func %s\n", __func__); - mutex_lock(&dqm->lock); if (dqm->total_queue_count >= max_num_of_queues_per_device) { - pr_warn("amdkfd: Can't create new kernel queue because %d queues were already created\n", + pr_warn("Can't create new kernel queue because %d queues were already created\n", dqm->total_queue_count); mutex_unlock(&dqm->lock); return -EPERM; @@ -819,8 +792,6 @@ static void destroy_kernel_queue_cpsch(struct device_queue_manager *dqm, { BUG_ON(!dqm || !kq); - pr_debug("kfd: In %s\n", __func__); - mutex_lock(&dqm->lock); /* here we actually preempt the DIQ */ destroy_queues_cpsch(dqm, true, false); @@ -862,7 +833,7 @@ static int create_queue_cpsch(struct device_queue_manager *dqm, struct queue *q, mutex_lock(&dqm->lock); if (dqm->total_queue_count >= max_num_of_queues_per_device) { - pr_warn("amdkfd: Can't create new usermode queue because %d queues were already created\n", + pr_warn("Can't create new usermode queue because %d queues were already created\n", dqm->total_queue_count); retval = -EPERM; goto out; @@ -916,7 +887,7 @@ int amdkfd_fence_wait_timeout(unsigned int *fence_addr, while (*fence_addr != fence_value) { if (time_after(jiffies, timeout)) { - pr_err("kfd: qcm fence wait loop timeout expired\n"); + pr_err("qcm fence wait loop timeout expired\n"); return -ETIME; } schedule(); @@ -949,7 +920,7 @@ static int destroy_queues_cpsch(struct device_queue_manager *dqm, if (!dqm->active_runlist) goto out; - pr_debug("kfd: Before destroying queues, sdma queue count is : %u\n", + pr_debug("Before destroying queues, sdma queue count is : %u\n", dqm->sdma_queue_count); if (dqm->sdma_queue_count > 0) { @@ -998,7 +969,7 @@ static int execute_queues_cpsch(struct device_queue_manager *dqm, bool lock) retval = destroy_queues_cpsch(dqm, false, false); if (retval != 0) { - pr_err("kfd: the cp might be in an unrecoverable state due to an unsuccessful queues preemption"); + pr_err("The cp might be in an unrecoverable state due to an unsuccessful queues preemption"); goto out; } @@ -1014,7 +985,7 @@ static int execute_queues_cpsch(struct device_queue_manager *dqm, bool lock) retval = pm_send_runlist(&dqm->packets, &dqm->queues); if (retval != 0) { - pr_err("kfd: failed to execute runlist"); + pr_err("failed to execute runlist"); goto out; } dqm->active_runlist = true; @@ -1106,8 +1077,6 @@ static bool set_cache_memory_policy(struct device_queue_manager *dqm, { bool retval; - pr_debug("kfd: In func %s\n", __func__); - mutex_lock(&dqm->lock); if (alternate_aperture_size == 0) { @@ -1152,7 +1121,7 @@ static bool set_cache_memory_policy(struct device_queue_manager *dqm, if ((sched_policy == KFD_SCHED_POLICY_NO_HWS) |
