diff options
Diffstat (limited to 'drivers/misc')
28 files changed, 820 insertions, 349 deletions
diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig index 3726eacdf65d..f417b06e11c5 100644 --- a/drivers/misc/Kconfig +++ b/drivers/misc/Kconfig @@ -513,6 +513,14 @@ config MISC_RTSX tristate default MISC_RTSX_PCI || MISC_RTSX_USB +config PVPANIC + tristate "pvpanic device support" + depends on HAS_IOMEM && (ACPI || OF) + help + This driver provides support for the pvpanic device. pvpanic is + a paravirtualized device provided by QEMU; it lets a virtual machine + (guest) communicate panic events to the host. + source "drivers/misc/c2port/Kconfig" source "drivers/misc/eeprom/Kconfig" source "drivers/misc/cb710/Kconfig" diff --git a/drivers/misc/Makefile b/drivers/misc/Makefile index fe3134cf3008..e39ccbbc1b3a 100644 --- a/drivers/misc/Makefile +++ b/drivers/misc/Makefile @@ -57,4 +57,5 @@ obj-$(CONFIG_ASPEED_LPC_CTRL) += aspeed-lpc-ctrl.o obj-$(CONFIG_ASPEED_LPC_SNOOP) += aspeed-lpc-snoop.o obj-$(CONFIG_PCI_ENDPOINT_TEST) += pci_endpoint_test.o obj-$(CONFIG_OCXL) += ocxl/ -obj-y += cardreader/ +obj-y += cardreader/ +obj-$(CONFIG_PVPANIC) += pvpanic.o diff --git a/drivers/misc/altera-stapl/altera.c b/drivers/misc/altera-stapl/altera.c index ef83a9078646..d2ed3b9728b7 100644 --- a/drivers/misc/altera-stapl/altera.c +++ b/drivers/misc/altera-stapl/altera.c @@ -2176,8 +2176,7 @@ static int altera_get_note(u8 *p, s32 program_size, key_ptr = &p[note_strings + get_unaligned_be32( &p[note_table + (8 * i)])]; - if ((strncasecmp(key, key_ptr, strlen(key_ptr)) == 0) && - (key != NULL)) { + if (key && !strncasecmp(key, key_ptr, strlen(key_ptr))) { status = 0; value_ptr = &p[note_strings + diff --git a/drivers/misc/genwqe/card_debugfs.c b/drivers/misc/genwqe/card_debugfs.c index c6b82f09b3ba..7c713e01d198 100644 --- a/drivers/misc/genwqe/card_debugfs.c +++ b/drivers/misc/genwqe/card_debugfs.c @@ -33,19 +33,6 @@ #include "card_base.h" #include "card_ddcb.h" -#define GENWQE_DEBUGFS_RO(_name, _showfn) \ - static int genwqe_debugfs_##_name##_open(struct inode *inode, \ - struct file *file) \ - { \ - return single_open(file, _showfn, inode->i_private); \ - } \ - static const struct file_operations genwqe_##_name##_fops = { \ - .open = genwqe_debugfs_##_name##_open, \ - .read = seq_read, \ - .llseek = seq_lseek, \ - .release = single_release, \ - } - static void dbg_uidn_show(struct seq_file *s, struct genwqe_reg *regs, int entries) { @@ -87,26 +74,26 @@ static int curr_dbg_uidn_show(struct seq_file *s, void *unused, int uid) return 0; } -static int genwqe_curr_dbg_uid0_show(struct seq_file *s, void *unused) +static int curr_dbg_uid0_show(struct seq_file *s, void *unused) { return curr_dbg_uidn_show(s, unused, 0); } -GENWQE_DEBUGFS_RO(curr_dbg_uid0, genwqe_curr_dbg_uid0_show); +DEFINE_SHOW_ATTRIBUTE(curr_dbg_uid0); -static int genwqe_curr_dbg_uid1_show(struct seq_file *s, void *unused) +static int curr_dbg_uid1_show(struct seq_file *s, void *unused) { return curr_dbg_uidn_show(s, unused, 1); } -GENWQE_DEBUGFS_RO(curr_dbg_uid1, genwqe_curr_dbg_uid1_show); +DEFINE_SHOW_ATTRIBUTE(curr_dbg_uid1); -static int genwqe_curr_dbg_uid2_show(struct seq_file *s, void *unused) +static int curr_dbg_uid2_show(struct seq_file *s, void *unused) { return curr_dbg_uidn_show(s, unused, 2); } -GENWQE_DEBUGFS_RO(curr_dbg_uid2, genwqe_curr_dbg_uid2_show); +DEFINE_SHOW_ATTRIBUTE(curr_dbg_uid2); static int prev_dbg_uidn_show(struct seq_file *s, void *unused, int uid) { @@ -116,28 +103,28 @@ static int prev_dbg_uidn_show(struct seq_file *s, void *unused, int uid) return 0; } -static int genwqe_prev_dbg_uid0_show(struct seq_file *s, void *unused) +static int prev_dbg_uid0_show(struct seq_file *s, void *unused) { return prev_dbg_uidn_show(s, unused, 0); } -GENWQE_DEBUGFS_RO(prev_dbg_uid0, genwqe_prev_dbg_uid0_show); +DEFINE_SHOW_ATTRIBUTE(prev_dbg_uid0); -static int genwqe_prev_dbg_uid1_show(struct seq_file *s, void *unused) +static int prev_dbg_uid1_show(struct seq_file *s, void *unused) { return prev_dbg_uidn_show(s, unused, 1); } -GENWQE_DEBUGFS_RO(prev_dbg_uid1, genwqe_prev_dbg_uid1_show); +DEFINE_SHOW_ATTRIBUTE(prev_dbg_uid1); -static int genwqe_prev_dbg_uid2_show(struct seq_file *s, void *unused) +static int prev_dbg_uid2_show(struct seq_file *s, void *unused) { return prev_dbg_uidn_show(s, unused, 2); } -GENWQE_DEBUGFS_RO(prev_dbg_uid2, genwqe_prev_dbg_uid2_show); +DEFINE_SHOW_ATTRIBUTE(prev_dbg_uid2); -static int genwqe_curr_regs_show(struct seq_file *s, void *unused) +static int curr_regs_show(struct seq_file *s, void *unused) { struct genwqe_dev *cd = s->private; unsigned int i; @@ -164,9 +151,9 @@ static int genwqe_curr_regs_show(struct seq_file *s, void *unused) return 0; } -GENWQE_DEBUGFS_RO(curr_regs, genwqe_curr_regs_show); +DEFINE_SHOW_ATTRIBUTE(curr_regs); -static int genwqe_prev_regs_show(struct seq_file *s, void *unused) +static int prev_regs_show(struct seq_file *s, void *unused) { struct genwqe_dev *cd = s->private; unsigned int i; @@ -188,9 +175,9 @@ static int genwqe_prev_regs_show(struct seq_file *s, void *unused) return 0; } -GENWQE_DEBUGFS_RO(prev_regs, genwqe_prev_regs_show); +DEFINE_SHOW_ATTRIBUTE(prev_regs); -static int genwqe_jtimer_show(struct seq_file *s, void *unused) +static int jtimer_show(struct seq_file *s, void *unused) { struct genwqe_dev *cd = s->private; unsigned int vf_num; @@ -209,9 +196,9 @@ static int genwqe_jtimer_show(struct seq_file *s, void *unused) return 0; } -GENWQE_DEBUGFS_RO(jtimer, genwqe_jtimer_show); +DEFINE_SHOW_ATTRIBUTE(jtimer); -static int genwqe_queue_working_time_show(struct seq_file *s, void *unused) +static int queue_working_time_show(struct seq_file *s, void *unused) { struct genwqe_dev *cd = s->private; unsigned int vf_num; @@ -227,9 +214,9 @@ static int genwqe_queue_working_time_show(struct seq_file *s, void *unused) return 0; } -GENWQE_DEBUGFS_RO(queue_working_time, genwqe_queue_working_time_show); +DEFINE_SHOW_ATTRIBUTE(queue_working_time); -static int genwqe_ddcb_info_show(struct seq_file *s, void *unused) +static int ddcb_info_show(struct seq_file *s, void *unused) { struct genwqe_dev *cd = s->private; unsigned int i; @@ -300,9 +287,9 @@ static int genwqe_ddcb_info_show(struct seq_file *s, void *unused) return 0; } -GENWQE_DEBUGFS_RO(ddcb_info, genwqe_ddcb_info_show); +DEFINE_SHOW_ATTRIBUTE(ddcb_info); -static int genwqe_info_show(struct seq_file *s, void *unused) +static int info_show(struct seq_file *s, void *unused) { struct genwqe_dev *cd = s->private; u64 app_id, slu_id, bitstream = -1; @@ -335,7 +322,7 @@ static int genwqe_info_show(struct seq_file *s, void *unused) return 0; } -GENWQE_DEBUGFS_RO(info, genwqe_info_show); +DEFINE_SHOW_ATTRIBUTE(info); int genwqe_init_debugfs(struct genwqe_dev *cd) { @@ -356,14 +343,14 @@ int genwqe_init_debugfs(struct genwqe_dev *cd) /* non privileged interfaces are done here */ file = debugfs_create_file("ddcb_info", S_IRUGO, root, cd, - &genwqe_ddcb_info_fops); + &ddcb_info_fops); if (!file) { ret = -ENOMEM; goto err1; } file = debugfs_create_file("info", S_IRUGO, root, cd, - &genwqe_info_fops); + &info_fops); if (!file) { ret = -ENOMEM; goto err1; @@ -396,56 +383,56 @@ int genwqe_init_debugfs(struct genwqe_dev *cd) } file = debugfs_create_file("curr_regs", S_IRUGO, root, cd, - &genwqe_curr_regs_fops); + &curr_regs_fops); if (!file) { ret = -ENOMEM; goto err1; } file = debugfs_create_file("curr_dbg_uid0", S_IRUGO, root, cd, - &genwqe_curr_dbg_uid0_fops); + &curr_dbg_uid0_fops); if (!file) { ret = -ENOMEM; goto err1; } file = debugfs_create_file("curr_dbg_uid1", S_IRUGO, root, cd, - &genwqe_curr_dbg_uid1_fops); + &curr_dbg_uid1_fops); if (!file) { ret = -ENOMEM; goto err1; } file = debugfs_create_file("curr_dbg_uid2", S_IRUGO, root, cd, - &genwqe_curr_dbg_uid2_fops); + &curr_dbg_uid2_fops); if (!file) { ret = -ENOMEM; goto err1; } file = debugfs_create_file("prev_regs", S_IRUGO, root, cd, - &genwqe_prev_regs_fops); + &prev_regs_fops); if (!file) { ret = -ENOMEM; goto err1; } file = debugfs_create_file("prev_dbg_uid0", S_IRUGO, root, cd, - &genwqe_prev_dbg_uid0_fops); + &prev_dbg_uid0_fops); if (!file) { ret = -ENOMEM; goto err1; } file = debugfs_create_file("prev_dbg_uid1", S_IRUGO, root, cd, - &genwqe_prev_dbg_uid1_fops); + &prev_dbg_uid1_fops); if (!file) { ret = -ENOMEM; goto err1; } file = debugfs_create_file("prev_dbg_uid2", S_IRUGO, root, cd, - &genwqe_prev_dbg_uid2_fops); + &prev_dbg_uid2_fops); if (!file) { ret = -ENOMEM; goto err1; @@ -463,14 +450,14 @@ int genwqe_init_debugfs(struct genwqe_dev *cd) } file = debugfs_create_file("jobtimer", S_IRUGO, root, cd, - &genwqe_jtimer_fops); + &jtimer_fops); if (!file) { ret = -ENOMEM; goto err1; } file = debugfs_create_file("queue_working_time", S_IRUGO, root, cd, - &genwqe_queue_working_time_fops); + &queue_working_time_fops); if (!file) { ret = -ENOMEM; goto err1; diff --git a/drivers/misc/genwqe/card_utils.c b/drivers/misc/genwqe/card_utils.c index 3fcb9a2fe1c9..efe2fb72d54b 100644 --- a/drivers/misc/genwqe/card_utils.c +++ b/drivers/misc/genwqe/card_utils.c @@ -215,7 +215,7 @@ u32 genwqe_crc32(u8 *buff, size_t len, u32 init) void *__genwqe_alloc_consistent(struct genwqe_dev *cd, size_t size, dma_addr_t *dma_handle) { - if (get_order(size) > MAX_ORDER) + if (get_order(size) >= MAX_ORDER) return NULL; return dma_zalloc_coherent(&cd->pci_dev->dev, size, dma_handle, diff --git a/drivers/misc/mei/Makefile b/drivers/misc/mei/Makefile index cd6825afa8e1..d9215fc4e499 100644 --- a/drivers/misc/mei/Makefile +++ b/drivers/misc/mei/Makefile @@ -9,6 +9,7 @@ mei-objs += hbm.o mei-objs += interrupt.o mei-objs += client.o mei-objs += main.o +mei-objs += dma-ring.o mei-objs += bus.o mei-objs += bus-fixup.o mei-$(CONFIG_DEBUG_FS) += debugfs.o diff --git a/drivers/misc/mei/client.c b/drivers/misc/mei/client.c index ebdcf0b450e2..1fc8ea0f519b 100644 --- a/drivers/misc/mei/client.c +++ b/drivers/misc/mei/client.c @@ -318,23 +318,6 @@ void mei_me_cl_rm_all(struct mei_device *dev) } /** - * mei_cl_cmp_id - tells if the clients are the same - * - * @cl1: host client 1 - * @cl2: host client 2 - * - * Return: true - if the clients has same host and me ids - * false - otherwise - */ -static inline bool mei_cl_cmp_id(const struct mei_cl *cl1, - const struct mei_cl *cl2) -{ - return cl1 && cl2 && - (cl1->host_client_id == cl2->host_client_id) && - (mei_cl_me_id(cl1) == mei_cl_me_id(cl2)); -} - -/** * mei_io_cb_free - free mei_cb_private related memory * * @cb: mei callback struct @@ -418,7 +401,7 @@ static void mei_io_list_flush_cl(struct list_head *head, struct mei_cl_cb *cb, *next; list_for_each_entry_safe(cb, next, head, list) { - if (mei_cl_cmp_id(cl, cb->cl)) + if (cl == cb->cl) list_del_init(&cb->list); } } @@ -435,7 +418,7 @@ static void mei_io_tx_list_free_cl(struct list_head *head, struct mei_cl_cb *cb, *next; list_for_each_entry_safe(cb, next, head, list) { - if (mei_cl_cmp_id(cl, cb->cl)) + if (cl == cb->cl) mei_tx_cb_dequeue(cb); } } @@ -478,7 +461,7 @@ struct mei_cl_cb *mei_cl_alloc_cb(struct mei_cl *cl, size_t length, if (length == 0) return cb; - cb->buf.data = kmalloc(length, GFP_KERNEL); + cb->buf.data = kmalloc(roundup(length, MEI_SLOT_SIZE), GFP_KERNEL); if (!cb->buf.data) { mei_io_cb_free(cb); return NULL; @@ -1374,7 +1357,9 @@ int mei_cl_notify_request(struct mei_cl *cl, mutex_unlock(&dev->device_lock); wait_event_timeout(cl->wait, - cl->notify_en == request || !mei_cl_is_connected(cl), + cl->notify_en == request || + cl->status || + !mei_cl_is_connected(cl), mei_secs_to_jiffies(MEI_CL_CONNECT_TIMEOUT)); mutex_lock(&dev->device_lock); @@ -1573,10 +1558,13 @@ int mei_cl_irq_write(struct mei_cl *cl, struct mei_cl_cb *cb, struct mei_msg_hdr mei_hdr; size_t hdr_len = sizeof(mei_hdr); size_t len; - size_t hbuf_len; + size_t hbuf_len, dr_len; int hbuf_slots; + u32 dr_slots; + u32 dma_len; int rets; bool first_chunk; + const void *data; if (WARN_ON(!cl || !cl->dev)) return -ENODEV; @@ -1597,6 +1585,7 @@ int mei_cl_irq_write(struct mei_cl *cl, struct mei_cl_cb *cb, } len = buf->size - cb->buf_idx; + data = buf->data + cb->buf_idx; hbuf_slots = mei_hbuf_empty_slots(dev); if (hbuf_slots < 0) { rets = -EOVERFLOW; @@ -1604,6 +1593,8 @@ int mei_cl_irq_write(struct mei_cl *cl, struct mei_cl_cb *cb, } hbuf_len = mei_slots2data(hbuf_slots); + dr_slots = mei_dma_ring_empty_slots(dev); + dr_len = mei_slots2data(dr_slots); mei_msg_hdr_init(&mei_hdr, cb); @@ -1614,23 +1605,33 @@ int mei_cl_irq_write(struct mei_cl *cl, struct mei_cl_cb *cb, if (len + hdr_len <= hbuf_len) { mei_hdr.length = len; mei_hdr.msg_complete = 1; + } else if (dr_slots && hbuf_len >= hdr_len + sizeof(dma_len)) { + mei_hdr.dma_ring = 1; + if (len > dr_len) + len = dr_len; + else + mei_hdr.msg_complete = 1; + + mei_hdr.length = sizeof(dma_len); + dma_len = len; + data = &dma_len; } else if ((u32)hbuf_slots == mei_hbuf_depth(dev)) { - mei_hdr.length = hbuf_len - hdr_len; + len = hbuf_len - hdr_len; + mei_hdr.length = len; } else { return 0; } - cl_dbg(dev, cl, "buf: size = %zu idx = %zu\n", - cb->buf.size, cb->buf_idx); + if (mei_hdr.dma_ring) + mei_dma_ring_write(dev, buf->data + cb->buf_idx, len); - rets = mei_write_message(dev, &mei_hdr, hdr_len, - buf->data + cb->buf_idx, mei_hdr.length); + rets = mei_write_message(dev, &mei_hdr, hdr_len, data, mei_hdr.length); if (rets) goto err; cl->status = 0; cl->writing_state = MEI_WRITING; - cb->buf_idx += mei_hdr.length; + cb->buf_idx += len; if (first_chunk) { if (mei_cl_tx_flow_ctrl_creds_reduce(cl)) { @@ -1665,11 +1666,13 @@ ssize_t mei_cl_write(struct mei_cl *cl, struct mei_cl_cb *cb) struct mei_msg_data *buf; struct mei_msg_hdr mei_hdr; size_t hdr_len = sizeof(mei_hdr); - size_t len; - size_t hbuf_len; + size_t len, hbuf_len, dr_len; int hbuf_slots; + u32 dr_slots; + u32 dma_len; ssize_t rets; bool blocking; + const void *data; if (WARN_ON(!cl || !cl->dev)) return -ENODEV; @@ -1681,10 +1684,12 @@ ssize_t mei_cl_write(struct mei_cl *cl, struct mei_cl_cb *cb) buf = &cb->buf; len = buf->size; - blocking = cb->blocking; cl_dbg(dev, cl, "len=%zd\n", len); + blocking = cb->blocking; + data = buf->data; + rets = pm_runtime_get(dev->dev); if (rets < 0 && rets != -EINPROGRESS) { pm_runtime_put_noidle(dev->dev); @@ -1721,16 +1726,32 @@ ssize_t mei_cl_write(struct mei_cl *cl, struct mei_cl_cb *cb) } hbuf_len = mei_slots2data(hbuf_slots); + dr_slots = mei_dma_ring_empty_slots(dev); + dr_len = mei_slots2data(dr_slots); if (len + hdr_len <= hbuf_len) { mei_hdr.length = len; mei_hdr.msg_complete = 1; + } else if (dr_slots && hbuf_len >= hdr_len + sizeof(dma_len)) { + mei_hdr.dma_ring = 1; + if (len > dr_len) + len = dr_len; + else + mei_hdr.msg_complete = 1; + + mei_hdr.length = sizeof(dma_len); + dma_len = len; + data = &dma_len; } else { - mei_hdr.length = hbuf_len - hdr_len; + len = hbuf_len - hdr_len; + mei_hdr.length = len; } + if (mei_hdr.dma_ring) + mei_dma_ring_write(dev, buf->data, len); + rets = mei_write_message(dev, &mei_hdr, hdr_len, - buf->data, mei_hdr.length); + data, mei_hdr.length); if (rets) goto err; @@ -1739,7 +1760,9 @@ ssize_t mei_cl_write(struct mei_cl *cl, struct mei_cl_cb *cb) goto err; cl->writing_state = MEI_WRITING; - cb->buf_idx = mei_hdr.length; + cb->buf_idx = len; + /* restore return value */ + len = buf->size; out: if (mei_hdr.msg_complete) diff --git a/drivers/misc/mei/dma-ring.c b/drivers/misc/mei/dma-ring.c new file mode 100644 index 000000000000..795641b82181 --- /dev/null +++ b/drivers/misc/mei/dma-ring.c @@ -0,0 +1,269 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright(c) 2016 - 2018 Intel Corporation. All rights reserved. + */ +#include <linux/dma-mapping.h> +#include <linux/mei.h> + +#include "mei_dev.h" + +/** + * mei_dmam_dscr_alloc() - allocate a managed coherent buffer + * for the dma descriptor + * @dev: mei_device + * @dscr: dma descriptor + * + * Return: + * * 0 - on success or zero allocation request + * * -EINVAL - if size is not power of 2 + * * -ENOMEM - of allocation has failed + */ +static int mei_dmam_dscr_alloc(struct mei_device *dev, + struct mei_dma_dscr *dscr) +{ + if (!dscr->size) + return 0; + + if (WARN_ON(!is_power_of_2(dscr->size))) + return -EINVAL; + + if (dscr->vaddr) + return 0; + + dscr->vaddr = dmam_alloc_coherent(dev->dev, dscr->size, &dscr->daddr, + GFP_KERNEL); + if (!dscr->vaddr) + return -ENOMEM; + + return 0; +} + +/** + * mei_dmam_dscr_free() - free a managed coherent buffer + * from the dma descriptor + * @dev: mei_device + * @dscr: dma descriptor + */ +static void mei_dmam_dscr_free(struct mei_device *dev, + struct mei_dma_dscr *dscr) +{ + if (!dscr->vaddr) + return; + + dmam_free_coherent(dev->dev, dscr->size, dscr->vaddr, dscr->daddr); + dscr->vaddr = NULL; +} + +/** + * mei_dmam_ring_free() - free dma ring buffers + * @dev: mei device + */ +void mei_dmam_ring_free(struct mei_device *dev) +{ + int i; + + for (i = 0; i < DMA_DSCR_NUM; i++) + mei_dmam_dscr_free(dev, &dev->dr_dscr[i]); +} + +/** + * mei_dmam_ring_alloc() - allocate dma ring buffers + * @dev: mei device + * + * Return: -ENOMEM on allocation failure 0 otherwise + */ +int mei_dmam_ring_alloc(struct mei_device *dev) +{ + int i; + + for (i = 0; i < DMA_DSCR_NUM; i++) + if (mei_dmam_dscr_alloc(dev, &dev->dr_dscr[i])) + goto err; + + return 0; + +err: + mei_dmam_ring_free(dev); + return -ENOMEM; +} + +/** + * mei_dma_ring_is_allocated() - check if dma ring is allocated + * @dev: mei device + * + * Return: true if dma ring is allocated + */ +bool mei_dma_ring_is_allocated(struct mei_device *dev) +{ + return !!dev->dr_dscr[DMA_DSCR_HOST].vaddr; +} + +static inline +struct hbm_dma_ring_ctrl *mei_dma_ring_ctrl(struct mei_device *dev) +{ + return (struct hbm_dma_ring_ctrl *)dev->dr_dscr[DMA_DSCR_CTRL].vaddr; +} + +/** + * mei_dma_ring_reset() - reset the dma control block + * @dev: mei device + */ +void mei_dma_ring_reset(struct mei_device *dev) +{ + struct hbm_dma_ring_ctrl *ctrl = mei_dma_ring_ctrl(dev); + + if (!ctrl) + return; + + memset(ctrl, 0, sizeof(*ctrl)); +} + +/** + * mei_dma_copy_from() - copy from dma ring into buffer + * @dev: mei device + * @buf: data buffer + * @offset: offset in slots. + * @n: number of slots to copy. + */ +static size_t mei_dma_copy_from(struct mei_device *dev, unsigned char *buf, + u32 offset, u32 n) +{ + unsigned char *dbuf = dev->dr_dscr[DMA_DSCR_DEVICE].vaddr; + + size_t b_offset = offset << 2; + size_t b_n = n << 2; + + memcpy(buf, dbuf + b_offset, b_n); + + return b_n; +} + +/** + * mei_dma_copy_to() - copy to a buffer to the dma ring + * @dev: mei device + * @buf: data buffer + * @offset: offset in slots. + * @n: number of slots to copy. + */ +static size_t mei_dma_copy_to(struct mei_device *dev, unsigned char *buf, + u32 offset, u32 n) +{ + unsigned char *hbuf = dev->dr_dscr[DMA_DSCR_HOST].vaddr; + + size_t b_offset = offset << 2; + size_t b_n = n << 2; + + memcpy(hbuf + b_offset, buf, b_n); + + return b_n; +} + +/** + * mei_dma_ring_read() - read data from the ring + * @dev: mei device + * @buf: buffer to read into: may be NULL in case of droping the data. + * @len: length to read. + */ +void mei_dma_ring_read(struct mei_device *dev, unsigned char *buf, u32 len) +{ + struct hbm_dma_ring_ctrl *ctrl = mei_dma_ring_ctrl(dev); + u32 dbuf_depth; + u32 rd_idx, rem, slots; + + if (WARN_ON(!ctrl)) + return; + + dev_dbg(dev->dev, "reading from dma %u bytes\n", len); + + if (!len) + return; + + dbuf_depth = dev->dr_dscr[DMA_DSCR_DEVICE].size >> 2; + rd_idx = READ_ONCE(ctrl->dbuf_rd_idx) & (dbuf_depth - 1); + slots = mei_data2slots(len); + + /* if buf is NULL we drop the packet by advancing the pointer.*/ + if (!buf) + goto out; + + if (rd_idx + slots > dbuf_depth) { + buf += mei_dma_copy_from(dev, buf, rd_idx, dbuf_depth - rd_idx); + rem = slots - (dbuf_depth - rd_idx); + rd_idx = 0; + } else { + rem = slots; + } + + mei_dma_copy_from(dev, buf, rd_idx, rem); +out: + WRITE_ONCE(ctrl->dbuf_rd_idx, ctrl->dbuf_rd_idx + slots); +} + +static inline u32 mei_dma_ring_hbuf_depth(struct mei_device *dev) +{ + return dev->dr_dscr[DMA_DSCR_HOST].size >> 2; +} + +/** + * mei_dma_ring_empty_slots() - calaculate number of empty slots in dma ring + * @dev: mei_device + * + * Return: number of empty slots + */ +u32 mei_dma_ring_empty_slots(struct mei_device *dev) +{ + struct hbm_dma_ring_ctrl *ctrl = mei_dma_ring_ctrl(dev); + u32 wr_idx, rd_idx, hbuf_depth, empty; + + if (!mei_dma_ring_is_allocated(dev)) + return 0; + + if (WARN_ON(!ctrl)) + return 0; + + /* easier to work in slots */ + hbuf_depth = mei_dma_ring_hbuf_depth(dev); + rd_idx = READ_ONCE(ctrl->hbuf_rd_idx); + wr_idx = READ_ONCE(ctrl->hbuf_wr_idx); + + if (rd_idx > wr_idx) + empty = rd_idx - wr_idx; + else + empty = hbuf_depth - (wr_idx - rd_idx); + + return empty; +} + +/** + * mei_dma_ring_write - write data to dma ring host buffer + * + * @dev: mei_device + * @buf: data will be written + * @len: data length + */ +void mei_dma_ring_write(struct mei_device *dev, unsigned char *buf, u32 len) +{ + struct hbm_dma_ring_ctrl *ctrl = mei_dma_ring_ctrl(dev); + u32 hbuf_depth; + u32 wr_idx, rem, slots; + + if (WARN_ON(!ctrl)) + return; + + dev_dbg(dev->dev, "writing to dma %u bytes\n", len); + hbuf_depth = mei_dma_ring_hbuf_depth(dev); + wr_idx = READ_ONCE(ctrl->hbuf_wr_idx) & (hbuf_depth - 1); + slots = mei_data2slots(len); + + if (wr_idx + slots > hbuf_depth) { + buf += mei_dma_copy_to(dev, buf, wr_idx, hbuf_depth - wr_idx); + rem = slots - (hbuf_depth - wr_idx); + wr_idx = 0; + } else { + rem = slots; + } + + mei_dma_copy_to(dev, buf, wr_idx, rem); + + WRITE_ONCE(ctrl->hbuf_wr_idx, ctrl->hbuf_wr_idx + slots); +} diff --git a/drivers/misc/mei/hbm.c b/drivers/misc/mei/hbm.c index e56f3e72d57a..78c26cebf5d4 100644 --- a/drivers/misc/mei/hbm.c +++ b/drivers/misc/mei/hbm.c @@ -65,6 +65,7 @@ const char *mei_hbm_state_str(enum mei_hbm_state state) MEI_HBM_STATE(IDLE); MEI_HBM_STATE(STARTING); MEI_HBM_STATE(STARTED); + MEI_HBM_STATE(DR_SETUP); MEI_HBM_STATE(ENUM_CLIENTS); MEI_HBM_STATE(CLIENT_PROPERTIES); MEI_HBM_STATE(STOPPED); @@ -296,6 +297,48 @@ int mei_hbm_start_req(struct mei_device *dev) } /** + * mei_hbm_dma_setup_req() - setup DMA request + * @dev: the device structure + * + * Return: 0 on success and < 0 on failure + */ +static int mei_hbm_dma_setup_req(struct mei_device *dev) +{ + struct mei_msg_hdr mei_hdr; + struct hbm_dma_setup_request req; + const size_t len = sizeof(struct hbm_dma_setup_request); + unsigned int i; + int ret; + + mei_hbm_hdr(&mei_hdr, len); + + memset(&req, 0, len); + req.hbm_cmd = MEI_HBM_DMA_SETUP_REQ_CMD; + for (i = 0; i < DMA_DSCR_NUM; i++) { + phys_addr_t paddr; + + paddr = dev->dr_dscr[i].daddr; + req.dma_dscr[i].addr_hi = upper_32_bits(paddr); + req.dma_dscr[i].addr_lo = lower_32_bits(paddr); + req.dma_dscr[i].size = dev->dr_dscr[i].size; + } + + mei_dma_ring_reset(dev); + + ret = mei_hbm_write_message(dev, &mei_hdr, &req); + if (ret) { + dev_err(dev->dev, "dma setup request write failed: ret = %d.\n", + ret); + return ret; + } + + dev->hbm_state = MEI_HBM_DR_SETUP; + dev->init_clients_timer = MEI_CLIENTS_INIT_TIMEOUT; + mei_schedule_stall_timer(dev); + return 0; +} + +/** * mei_hbm_enum_clients_req - sends enumeration client request message. * * @dev: the device structure @@ -1044,6 +1087,7 @@ int mei_hbm_dispatch(struct mei_device *dev, struct mei_msg_hdr *hdr) struct hbm_host_version_response *version_res; struct hbm_props_response *props_res; struct hbm_host_enum_response *enum_res; + struct hbm_dma_setup_response *dma_setup_res; struct hbm_add_client_request *add_cl_req; int ret; @@ -1108,14 +1152,52 @@ int mei_hbm_dispatch(struct mei_device *dev, struct mei_msg_hdr *hdr) return -EPROTO; } - if (mei_hbm_enum_clients_req(dev)) { - dev_err(dev->dev, "hbm: start: failed to send enumeration request\n"); - return -EIO; + if (dev->hbm_f_dr_supported) { + if (mei_dmam_ring_alloc(dev)) + dev_info(dev->dev, "running w/o dma ring\n"); + if (mei_dma_ring_is_allocated(dev)) { + if (mei_hbm_dma_setup_req(dev)) + return -EIO; + + wake_up(&dev->wait_hbm_start); + break; + } } + dev->hbm_f_dr_supported = 0; + mei_dmam_ring_free(dev); + + if (mei_hbm_enum_clients_req(dev)) + return -EIO; + wake_up(&dev->wait_hbm_start); break; + case MEI_HBM_DMA_SETUP_RES_CMD: + dev_dbg(dev->dev, "hbm: dma setup response: message received.\n"); + + dev->init_clients_timer = 0; + + if (dev->hbm_state != MEI_HBM_DR_SETUP) { + dev_err(dev->dev, "hbm: dma setup response: state mismatch, [%d, %d]\n", + dev->dev_state, dev->hbm_state); + return -EPROTO; + } + + dma_setup_res = (struct hbm_dma_setup_response *)mei_msg; + + if (dma_setup_res->status) { + dev_info(dev->dev, "hbm: dma setup |
