diff options
Diffstat (limited to 'drivers/usb/cdns3/cdnsp-ring.c')
-rw-r--r-- | drivers/usb/cdns3/cdnsp-ring.c | 2376 |
1 files changed, 2376 insertions, 0 deletions
diff --git a/drivers/usb/cdns3/cdnsp-ring.c b/drivers/usb/cdns3/cdnsp-ring.c new file mode 100644 index 000000000000..a28faca41a8f --- /dev/null +++ b/drivers/usb/cdns3/cdnsp-ring.c @@ -0,0 +1,2376 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Cadence CDNSP DRD Driver. + * + * Copyright (C) 2020 Cadence. + * + * Author: Pawel Laszczak <pawell@cadence.com> + * + * Code based on Linux XHCI driver. + * Origin: Copyright (C) 2008 Intel Corp + */ + +/* + * Ring initialization rules: + * 1. Each segment is initialized to zero, except for link TRBs. + * 2. Ring cycle state = 0. This represents Producer Cycle State (PCS) or + * Consumer Cycle State (CCS), depending on ring function. + * 3. Enqueue pointer = dequeue pointer = address of first TRB in the segment. + * + * Ring behavior rules: + * 1. A ring is empty if enqueue == dequeue. This means there will always be at + * least one free TRB in the ring. This is useful if you want to turn that + * into a link TRB and expand the ring. + * 2. When incrementing an enqueue or dequeue pointer, if the next TRB is a + * link TRB, then load the pointer with the address in the link TRB. If the + * link TRB had its toggle bit set, you may need to update the ring cycle + * state (see cycle bit rules). You may have to do this multiple times + * until you reach a non-link TRB. + * 3. A ring is full if enqueue++ (for the definition of increment above) + * equals the dequeue pointer. + * + * Cycle bit rules: + * 1. When a consumer increments a dequeue pointer and encounters a toggle bit + * in a link TRB, it must toggle the ring cycle state. + * 2. When a producer increments an enqueue pointer and encounters a toggle bit + * in a link TRB, it must toggle the ring cycle state. + * + * Producer rules: + * 1. Check if ring is full before you enqueue. + * 2. Write the ring cycle state to the cycle bit in the TRB you're enqueuing. + * Update enqueue pointer between each write (which may update the ring + * cycle state). + * 3. Notify consumer. If SW is producer, it rings the doorbell for command + * and endpoint rings. If controller is the producer for the event ring, + * and it generates an interrupt according to interrupt modulation rules. + * + * Consumer rules: + * 1. Check if TRB belongs to you. If the cycle bit == your ring cycle state, + * the TRB is owned by the consumer. + * 2. Update dequeue pointer (which may update the ring cycle state) and + * continue processing TRBs until you reach a TRB which is not owned by you. + * 3. Notify the producer. SW is the consumer for the event ring, and it + * updates event ring dequeue pointer. Controller is the consumer for the + * command and endpoint rings; it generates events on the event ring + * for these. + */ + +#include <linux/scatterlist.h> +#include <linux/dma-mapping.h> +#include <linux/delay.h> +#include <linux/slab.h> +#include <linux/irq.h> + +#include "cdnsp-gadget.h" + +/* + * Returns zero if the TRB isn't in this segment, otherwise it returns the DMA + * address of the TRB. + */ +dma_addr_t cdnsp_trb_virt_to_dma(struct cdnsp_segment *seg, + union cdnsp_trb *trb) +{ + unsigned long segment_offset = trb - seg->trbs; + + if (trb < seg->trbs || segment_offset >= TRBS_PER_SEGMENT) + return 0; + + return seg->dma + (segment_offset * sizeof(*trb)); +} + +static bool cdnsp_trb_is_noop(union cdnsp_trb *trb) +{ + return TRB_TYPE_NOOP_LE32(trb->generic.field[3]); +} + +static bool cdnsp_trb_is_link(union cdnsp_trb *trb) +{ + return TRB_TYPE_LINK_LE32(trb->link.control); +} + +bool cdnsp_last_trb_on_seg(struct cdnsp_segment *seg, union cdnsp_trb *trb) +{ + return trb == &seg->trbs[TRBS_PER_SEGMENT - 1]; +} + +bool cdnsp_last_trb_on_ring(struct cdnsp_ring *ring, + struct cdnsp_segment *seg, + union cdnsp_trb *trb) +{ + return cdnsp_last_trb_on_seg(seg, trb) && (seg->next == ring->first_seg); +} + +static bool cdnsp_link_trb_toggles_cycle(union cdnsp_trb *trb) +{ + return le32_to_cpu(trb->link.control) & LINK_TOGGLE; +} + +static void cdnsp_trb_to_noop(union cdnsp_trb *trb, u32 noop_type) +{ + if (cdnsp_trb_is_link(trb)) { + /* Unchain chained link TRBs. */ + trb->link.control &= cpu_to_le32(~TRB_CHAIN); + } else { + trb->generic.field[0] = 0; + trb->generic.field[1] = 0; + trb->generic.field[2] = 0; + /* Preserve only the cycle bit of this TRB. */ + trb->generic.field[3] &= cpu_to_le32(TRB_CYCLE); + trb->generic.field[3] |= cpu_to_le32(TRB_TYPE(noop_type)); + } +} + +/* + * Updates trb to point to the next TRB in the ring, and updates seg if the next + * TRB is in a new segment. This does not skip over link TRBs, and it does not + * effect the ring dequeue or enqueue pointers. + */ +static void cdnsp_next_trb(struct cdnsp_device *pdev, + struct cdnsp_ring *ring, + struct cdnsp_segment **seg, + union cdnsp_trb **trb) +{ + if (cdnsp_trb_is_link(*trb)) { + *seg = (*seg)->next; + *trb = ((*seg)->trbs); + } else { + (*trb)++; + } +} + +/* + * See Cycle bit rules. SW is the consumer for the event ring only. + * Don't make a ring full of link TRBs. That would be dumb and this would loop. + */ +void cdnsp_inc_deq(struct cdnsp_device *pdev, struct cdnsp_ring *ring) +{ + /* event ring doesn't have link trbs, check for last trb. */ + if (ring->type == TYPE_EVENT) { + if (!cdnsp_last_trb_on_seg(ring->deq_seg, ring->dequeue)) { + ring->dequeue++; + return; + } + + if (cdnsp_last_trb_on_ring(ring, ring->deq_seg, ring->dequeue)) + ring->cycle_state ^= 1; + + ring->deq_seg = ring->deq_seg->next; + ring->dequeue = ring->deq_seg->trbs; + return; + } + + /* All other rings have link trbs. */ + if (!cdnsp_trb_is_link(ring->dequeue)) { + ring->dequeue++; + ring->num_trbs_free++; + } + while (cdnsp_trb_is_link(ring->dequeue)) { + ring->deq_seg = ring->deq_seg->next; + ring->dequeue = ring->deq_seg->trbs; + } +} + +/* + * See Cycle bit rules. SW is the consumer for the event ring only. + * Don't make a ring full of link TRBs. That would be dumb and this would loop. + * + * If we've just enqueued a TRB that is in the middle of a TD (meaning the + * chain bit is set), then set the chain bit in all the following link TRBs. + * If we've enqueued the last TRB in a TD, make sure the following link TRBs + * have their chain bit cleared (so that each Link TRB is a separate TD). + * + * @more_trbs_coming: Will you enqueue more TRBs before ringing the doorbell. + */ +static void cdnsp_inc_enq(struct cdnsp_device *pdev, + struct cdnsp_ring *ring, + bool more_trbs_coming) +{ + union cdnsp_trb *next; + u32 chain; + + chain = le32_to_cpu(ring->enqueue->generic.field[3]) & TRB_CHAIN; + + /* If this is not event ring, there is one less usable TRB. */ + if (!cdnsp_trb_is_link(ring->enqueue)) + ring->num_trbs_free--; + next = ++(ring->enqueue); + + /* Update the dequeue pointer further if that was a link TRB */ + while (cdnsp_trb_is_link(next)) { + /* + * If the caller doesn't plan on enqueuing more TDs before + * ringing the doorbell, then we don't want to give the link TRB + * to the hardware just yet. We'll give the link TRB back in + * cdnsp_prepare_ring() just before we enqueue the TD at the + * top of the ring. + */ + if (!chain && !more_trbs_coming) + break; + + next->link.control &= cpu_to_le32(~TRB_CHAIN); + next->link.control |= cpu_to_le32(chain); + + /* Give this link TRB to the hardware */ + wmb(); + next->link.control ^= cpu_to_le32(TRB_CYCLE); + + /* Toggle the cycle bit after the last ring segment. */ + if (cdnsp_link_trb_toggles_cycle(next)) + ring->cycle_state ^= 1; + + ring->enq_seg = ring->enq_seg->next; + ring->enqueue = ring->enq_seg->trbs; + next = ring->enqueue; + } +} + +/* + * Check to see if there's room to enqueue num_trbs on the ring and make sure + * enqueue pointer will not advance into dequeue segment. + */ +static bool cdnsp_room_on_ring(struct cdnsp_device *pdev, + struct cdnsp_ring *ring, + unsigned int num_trbs) +{ + int num_trbs_in_deq_seg; + + if (ring->num_trbs_free < num_trbs) + return false; + + if (ring->type != TYPE_COMMAND && ring->type != TYPE_EVENT) { + num_trbs_in_deq_seg = ring->dequeue - ring->deq_seg->trbs; + + if (ring->num_trbs_free < num_trbs + num_trbs_in_deq_seg) + return false; + } + + return true; +} + +/* + * Workaround for L1: controller has issue with resuming from L1 after + * setting doorbell for endpoint during L1 state. This function forces + * resume signal in such case. + */ +static void cdnsp_force_l0_go(struct cdnsp_device *pdev) +{ + if (pdev->active_port == &pdev->usb2_port && pdev->gadget.lpm_capable) + cdnsp_set_link_state(pdev, &pdev->active_port->regs->portsc, XDEV_U0); +} + +/* Ring the doorbell after placing a command on the ring. */ +void cdnsp_ring_cmd_db(struct cdnsp_device *pdev) +{ + writel(DB_VALUE_CMD, &pdev->dba->cmd_db); +} + +/* + * Ring the doorbell after placing a transfer on the ring. + * Returns true if doorbell was set, otherwise false. + */ +static bool cdnsp_ring_ep_doorbell(struct cdnsp_device *pdev, + struct cdnsp_ep *pep, + unsigned int stream_id) +{ + __le32 __iomem *reg_addr = &pdev->dba->ep_db; + unsigned int ep_state = pep->ep_state; + unsigned int db_value; + + /* + * Don't ring the doorbell for this endpoint if endpoint is halted or + * disabled. + */ + if (ep_state & EP_HALTED || !(ep_state & EP_ENABLED)) + return false; + + /* For stream capable endpoints driver can ring doorbell only twice. */ + if (pep->ep_state & EP_HAS_STREAMS) { + if (pep->stream_info.drbls_count >= 2) + return false; + + pep->stream_info.drbls_count++; + } + + pep->ep_state &= ~EP_STOPPED; + + if (pep->idx == 0 && pdev->ep0_stage == CDNSP_DATA_STAGE && + !pdev->ep0_expect_in) + db_value = DB_VALUE_EP0_OUT(pep->idx, stream_id); + else + db_value = DB_VALUE(pep->idx, stream_id); + + writel(db_value, reg_addr); + + cdnsp_force_l0_go(pdev); + + /* Doorbell was set. */ + return true; +} + +/* + * Get the right ring for the given pep and stream_id. + * If the endpoint supports streams, boundary check the USB request's stream ID. + * If the endpoint doesn't support streams, return the singular endpoint ring. + */ +static struct cdnsp_ring *cdnsp_get_transfer_ring(struct cdnsp_device *pdev, + struct cdnsp_ep *pep, + unsigned int stream_id) +{ + if (!(pep->ep_state & EP_HAS_STREAMS)) + return pep->ring; + + if (stream_id == 0 || stream_id >= pep->stream_info.num_streams) { + dev_err(pdev->dev, "ERR: %s ring doesn't exist for SID: %d.\n", + pep->name, stream_id); + return NULL; + } + + return pep->stream_info.stream_rings[stream_id]; +} + +static struct cdnsp_ring * + cdnsp_request_to_transfer_ring(struct cdnsp_device *pdev, + struct cdnsp_request *preq) +{ + return cdnsp_get_transfer_ring(pdev, preq->pep, + preq->request.stream_id); +} + +/* Ring the doorbell for any rings with pending requests. */ +void cdnsp_ring_doorbell_for_active_rings(struct cdnsp_device *pdev, + struct cdnsp_ep *pep) +{ + struct cdnsp_stream_info *stream_info; + unsigned int stream_id; + int ret; + + if (pep->ep_state & EP_DIS_IN_RROGRESS) + return; + + /* A ring has pending Request if its TD list is not empty. */ + if (!(pep->ep_state & EP_HAS_STREAMS) && pep->number) { + if (pep->ring && !list_empty(&pep->ring->td_list)) + cdnsp_ring_ep_doorbell(pdev, pep, 0); + return; + } + + stream_info = &pep->stream_info; + + for (stream_id = 1; stream_id < stream_info->num_streams; stream_id++) { + struct cdnsp_td *td, *td_temp; + struct cdnsp_ring *ep_ring; + + if (stream_info->drbls_count >= 2) + return; + + ep_ring = cdnsp_get_transfer_ring(pdev, pep, stream_id); + if (!ep_ring) + continue; + + if (!ep_ring->stream_active || ep_ring->stream_rejected) + continue; + + list_for_each_entry_safe(td, td_temp, &ep_ring->td_list, + td_list) { + if (td->drbl) + continue; + + ret = cdnsp_ring_ep_doorbell(pdev, pep, stream_id); + if (ret) + td->drbl = 1; + } + } +} + +/* + * Get the hw dequeue pointer controller stopped on, either directly from the + * endpoint context, or if streams are in use from the stream context. + * The returned hw_dequeue contains the lowest four bits with cycle state + * and possible stream context type. + */ +static u64 cdnsp_get_hw_deq(struct cdnsp_device *pdev, + unsigned int ep_index, + unsigned int stream_id) +{ + struct cdnsp_stream_ctx *st_ctx; + struct cdnsp_ep *pep; + + pep = &pdev->eps[stream_id]; + + if (pep->ep_state & EP_HAS_STREAMS) { + st_ctx = &pep->stream_info.stream_ctx_array[stream_id]; + return le64_to_cpu(st_ctx->stream_ring); + } + + return le64_to_cpu(pep->out_ctx->deq); +} + +/* + * Move the controller endpoint ring dequeue pointer past cur_td. + * Record the new state of the controller endpoint ring dequeue segment, + * dequeue pointer, and new consumer cycle state in state. + * Update internal representation of the ring's dequeue pointer. + * + * We do this in three jumps: + * - First we update our new ring state to be the same as when the + * controller stopped. + * - Then we traverse the ring to find the segment that contains + * the last TRB in the TD. We toggle the controller new cycle state + * when we pass any link TRBs with the toggle cycle bit set. + * - Finally we move the dequeue state one TRB further, toggling the cycle bit + * if we've moved it past a link TRB with the toggle cycle bit set. + */ +static void cdnsp_find_new_dequeue_state(struct cdnsp_device *pdev, + struct cdnsp_ep *pep, + unsigned int stream_id, + struct cdnsp_td *cur_td, + struct cdnsp_dequeue_state *state) +{ + bool td_last_trb_found = false; + struct cdnsp_segment *new_seg; + struct cdnsp_ring *ep_ring; + union cdnsp_trb *new_deq; + bool cycle_found = false; + u64 hw_dequeue; + + ep_ring = cdnsp_get_transfer_ring(pdev, pep, stream_id); + if (!ep_ring) + return; + + /* + * Dig out the cycle state saved by the controller during the + * stop endpoint command. + */ + hw_dequeue = cdnsp_get_hw_deq(pdev, pep->idx, stream_id); + new_seg = ep_ring->deq_seg; + new_deq = ep_ring->dequeue; + state->new_cycle_state = hw_dequeue & 0x1; + state->stream_id = stream_id; + + /* + * We want to find the pointer, segment and cycle state of the new trb + * (the one after current TD's last_trb). We know the cycle state at + * hw_dequeue, so walk the ring until both hw_dequeue and last_trb are + * found. + */ + do { + if (!cycle_found && cdnsp_trb_virt_to_dma(new_seg, new_deq) + == (dma_addr_t)(hw_dequeue & ~0xf)) { + cycle_found = true; + + if (td_last_trb_found) + break; + } + + if (new_deq == cur_td->last_trb) + td_last_trb_found = true; + + if (cycle_found && cdnsp_trb_is_link(new_deq) && + cdnsp_link_trb_toggles_cycle(new_deq)) + state->new_cycle_state ^= 0x1; + + cdnsp_next_trb(pdev, ep_ring, &new_seg, &new_deq); + + /* Search wrapped around, bail out. */ + if (new_deq == pep->ring->dequeue) { + dev_err(pdev->dev, + "Error: Failed finding new dequeue state\n"); + state->new_deq_seg = NULL; + state->new_deq_ptr = NULL; + return; + } + + } while (!cycle_found || !td_last_trb_found); + + state->new_deq_seg = new_seg; + state->new_deq_ptr = new_deq; +} + +/* + * flip_cycle means flip the cycle bit of all but the first and last TRB. + * (The last TRB actually points to the ring enqueue pointer, which is not part + * of this TD.) This is used to remove partially enqueued isoc TDs from a ring. + */ +static void cdnsp_td_to_noop(struct cdnsp_device *pdev, + struct cdnsp_ring *ep_ring, + struct cdnsp_td *td, + bool flip_cycle) +{ + struct cdnsp_segment *seg = td->start_seg; + union cdnsp_trb *trb = td->first_trb; + + while (1) { + cdnsp_trb_to_noop(trb, TRB_TR_NOOP); + + /* flip cycle if asked to */ + if (flip_cycle && trb != td->first_trb && trb != td->last_trb) + trb->generic.field[3] ^= cpu_to_le32(TRB_CYCLE); + + if (trb == td->last_trb) + break; + + cdnsp_next_trb(pdev, ep_ring, &seg, &trb); + } +} + +/* + * This TD is defined by the TRBs starting at start_trb in start_seg and ending + * at end_trb, which may be in another segment. If the suspect DMA address is a + * TRB in this TD, this function returns that TRB's segment. Otherwise it + * returns 0. + */ +static struct cdnsp_segment *cdnsp_trb_in_td(struct cdnsp_device *pdev, + struct cdnsp_segment *start_seg, + union cdnsp_trb *start_trb, + union cdnsp_trb *end_trb, + dma_addr_t suspect_dma) +{ + struct cdnsp_segment *cur_seg; + union cdnsp_trb *temp_trb; + dma_addr_t end_seg_dma; + dma_addr_t end_trb_dma; + dma_addr_t start_dma; + + start_dma = cdnsp_trb_virt_to_dma(start_seg, start_trb); + cur_seg = start_seg; + + do { + if (start_dma == 0) + return NULL; + + temp_trb = &cur_seg->trbs[TRBS_PER_SEGMENT - 1]; + /* We may get an event for a Link TRB in the middle of a TD */ + end_seg_dma = cdnsp_trb_virt_to_dma(cur_seg, temp_trb); + /* If the end TRB isn't in this segment, this is set to 0 */ + end_trb_dma = cdnsp_trb_virt_to_dma(cur_seg, end_trb); + + if (end_trb_dma > 0) { + /* + * The end TRB is in this segment, so suspect should + * be here + */ + if (start_dma <= end_trb_dma) { + if (suspect_dma >= start_dma && + suspect_dma <= end_trb_dma) { + return cur_seg; + } + } else { + /* + * Case for one segment with a + * TD wrapped around to the top + */ + if ((suspect_dma >= start_dma && + suspect_dma <= end_seg_dma) || + (suspect_dma >= cur_seg->dma && + suspect_dma <= end_trb_dma)) { + return cur_seg; + } + } + + return NULL; + } + + /* Might still be somewhere in this segment */ + if (suspect_dma >= start_dma && suspect_dma <= end_seg_dma) + return cur_seg; + + cur_seg = cur_seg->next; + start_dma = cdnsp_trb_virt_to_dma(cur_seg, &cur_seg->trbs[0]); + } while (cur_seg != start_seg); + + return NULL; +} + +static void cdnsp_unmap_td_bounce_buffer(struct cdnsp_device *pdev, + struct cdnsp_ring *ring, + struct cdnsp_td *td) +{ + struct cdnsp_segment *seg = td->bounce_seg; + struct cdnsp_request *preq; + size_t len; + + if (!seg) + return; + + preq = td->preq; + + if (!preq->direction) { + dma_unmap_single(pdev->dev, seg->bounce_dma, + ring->bounce_buf_len, DMA_TO_DEVICE); + return; + } + + dma_unmap_single(pdev->dev, seg->bounce_dma, ring->bounce_buf_len, + DMA_FROM_DEVICE); + + /* For in transfers we need to copy the data from bounce to sg */ + len = sg_pcopy_from_buffer(preq->request.sg, preq->request.num_sgs, + seg->bounce_buf, seg->bounce_len, + seg->bounce_offs); + if (len != seg->bounce_len) + dev_warn(pdev->dev, "WARN Wrong bounce buffer read length: %zu != %d\n", + len, seg->bounce_len); + + seg->bounce_len = 0; + seg->bounce_offs = 0; +} + +static int cdnsp_cmd_set_deq(struct cdnsp_device *pdev, + struct cdnsp_ep *pep, + struct cdnsp_dequeue_state *deq_state) +{ + struct cdnsp_ring *ep_ring; + int ret; + + if (!deq_state->new_deq_ptr || !deq_state->new_deq_seg) { + cdnsp_ring_doorbell_for_active_rings(pdev, pep); + return 0; + } + + cdnsp_queue_new_dequeue_state(pdev, pep, deq_state); + cdnsp_ring_cmd_db(pdev); + ret = cdnsp_wait_for_cmd_compl(pdev); + + /* + * Update the ring's dequeue segment and dequeue pointer + * to reflect the new position. + */ + ep_ring = cdnsp_get_transfer_ring(pdev, pep, deq_state->stream_id); + + if (cdnsp_trb_is_link(ep_ring->dequeue)) { + ep_ring->deq_seg = ep_ring->deq_seg->next; + ep_ring->dequeue = ep_ring->deq_seg->trbs; + } + + while (ep_ring->dequeue != deq_state->new_deq_ptr) { + ep_ring->num_trbs_free++; + ep_ring->dequeue++; + + if (cdnsp_trb_is_link(ep_ring->dequeue)) { + if (ep_ring->dequeue == deq_state->new_deq_ptr) + break; + + ep_ring->deq_seg = ep_ring->deq_seg->next; + ep_ring->dequeue = ep_ring->deq_seg->trbs; + } + } + + /* + * Probably there was TIMEOUT during handling Set Dequeue Pointer + * command. It's critical error and controller will be stopped. + */ + if (ret) + return -ESHUTDOWN; + + /* Restart any rings with pending requests */ + cdnsp_ring_doorbell_for_active_rings(pdev, pep); + + return 0; +} + +int cdnsp_remove_request(struct cdnsp_device *pdev, + struct cdnsp_request *preq, + struct cdnsp_ep *pep) +{ + struct cdnsp_dequeue_state deq_state; + struct cdnsp_td *cur_td = NULL; + struct cdnsp_ring *ep_ring; + struct cdnsp_segment *seg; + int status = -ECONNRESET; + int ret = 0; + u64 hw_deq; + + memset(&deq_state, 0, sizeof(deq_state)); + + cur_td = &preq->td; + ep_ring = cdnsp_request_to_transfer_ring(pdev, preq); + + /* + * If we stopped on the TD we need to cancel, then we have to + * move the controller endpoint ring dequeue pointer past + * this TD. + */ + hw_deq = cdnsp_get_hw_deq(pdev, pep->idx, preq->request.stream_id); + hw_deq &= ~0xf; + + seg = cdnsp_trb_in_td(pdev, cur_td->start_seg, cur_td->first_trb, + cur_td->last_trb, hw_deq); + + if (seg && (pep->ep_state & EP_ENABLED)) + cdnsp_find_new_dequeue_state(pdev, pep, preq->request.stream_id, + cur_td, &deq_state); + else + cdnsp_td_to_noop(pdev, ep_ring, cur_td, false); + + /* + * The event handler won't see a completion for this TD anymore, + * so remove it from the endpoint ring's TD list. + */ + list_del_init(&cur_td->td_list); + ep_ring->num_tds--; + pep->stream_info.td_count--; + + /* + * During disconnecting all endpoint will be disabled so we don't + * have to worry about updating dequeue pointer. + */ + if (pdev->cdnsp_state & CDNSP_STATE_DISCONNECT_PENDING) { + status = -ESHUTDOWN; + ret = cdnsp_cmd_set_deq(pdev, pep, &deq_state); + } + + cdnsp_unmap_td_bounce_buffer(pdev, ep_ring, cur_td); + cdnsp_gadget_giveback(pep, cur_td->preq, status); + + return ret; +} + +static int cdnsp_update_port_id(struct cdnsp_device *pdev, u32 port_id) +{ + struct cdnsp_port *port = pdev->active_port; + u8 old_port = 0; + + if (port && port->port_num == port_id) + return 0; + + if (port) + old_port = port->port_num; + + if (port_id == pdev->usb2_port.port_num) { + port = &pdev->usb2_port; + } else if (port_id == pdev->usb3_port.port_num) { + port = &pdev->usb3_port; + } else { + dev_err(pdev->dev, "Port event with invalid port ID %d\n", + port_id); + return -EINVAL; + } + + if (port_id != old_port) { + cdnsp_disable_slot(pdev); + pdev->active_port = port; + cdnsp_enable_slot(pdev); + } + + if (port_id == pdev->usb2_port.port_num) + cdnsp_set_usb2_hardware_lpm(pdev, NULL, 1); + else + writel(PORT_U1_TIMEOUT(1) | PORT_U2_TIMEOUT(1), + &pdev->usb3_port.regs->portpmsc); + + return 0; +} + +static void cdnsp_handle_port_status(struct cdnsp_device *pdev, + union cdnsp_trb *event) +{ + struct cdnsp_port_regs __iomem *port_regs; + u32 portsc, cmd_regs; + bool port2 = false; + u32 link_state; + u32 port_id; + + /* Port status change events always have a successful completion code */ + if (GET_COMP_CODE(le32_to_cpu(event->generic.field[2])) != COMP_SUCCESS) + dev_err(pdev->dev, "ERR: incorrect PSC event\n"); + + port_id = GET_PORT_ID(le32_to_cpu(event->generic.field[0])); + + if (cdnsp_update_port_id(pdev, port_id)) + goto cleanup; + + port_regs = pdev->active_port->regs; + + if (port_id == pdev->usb2_port.port_num) + port2 = true; + +new_event: + portsc = readl(&port_regs->portsc); + writel(cdnsp_port_state_to_neutral(portsc) | + (portsc & PORT_CHANGE_BITS), &port_regs->portsc); + + pdev->gadget.speed = cdnsp_port_speed(portsc); + link_state = portsc & PORT_PLS_MASK; + + /* Port Link State change detected. */ + if ((portsc & PORT_PLC)) { + if (!(pdev->cdnsp_state & CDNSP_WAKEUP_PENDING) && + link_state == XDEV_RESUME) { + cmd_regs = readl(&pdev->op_regs->command); + if (!(cmd_regs & CMD_R_S)) + goto cleanup; + + if (DEV_SUPERSPEED_ANY(portsc)) { + cdnsp_set_link_state(pdev, &port_regs->portsc, + XDEV_U0); + + cdnsp_resume_gadget(pdev); + } + } + + if ((pdev->cdnsp_state & CDNSP_WAKEUP_PENDING) && + link_state == XDEV_U0) { + pdev->cdnsp_state &= ~CDNSP_WAKEUP_PENDING; + + cdnsp_force_header_wakeup(pdev, 1); + cdnsp_ring_cmd_db(pdev); + cdnsp_wait_for_cmd_compl(pdev); + } + + if (link_state == XDEV_U0 && pdev->link_state == XDEV_U3 && + !DEV_SUPERSPEED_ANY(portsc)) + cdnsp_resume_gadget(pdev); + + if (link_state == XDEV_U3 && pdev->link_state != XDEV_U3) + cdnsp_suspend_gadget(pdev); + + pdev->link_state = link_state; + } + + if (portsc & PORT_CSC) { + /* Detach device. */ + if (pdev->gadget.connected && !(portsc & PORT_CONNECT)) + cdnsp_disconnect_gadget(pdev); + + /* Attach device. */ + if (portsc & PORT_CONNECT) { + if (!port2) + cdnsp_irq_reset(pdev); + + usb_gadget_set_state(&pdev->gadget, USB_STATE_ATTACHED); + } + } + + /* Port reset. */ + if ((portsc & (PORT_RC | PORT_WRC)) && (portsc & PORT_CONNECT)) { + cdnsp_irq_reset(pdev); + pdev->u1_allowed = 0; + pdev->u2_allowed = 0; + pdev->may_wakeup = 0; + } + + if (portsc & PORT_CEC) + dev_err(pdev->dev, "Port Over Current detected\n"); + + if (portsc & PORT_CEC) + dev_err(pdev->dev, "Port Configure Error detected\n"); + + if (readl(&port_regs->portsc) & PORT_CHANGE_BITS) + goto new_event; + +cleanup: + cdnsp_inc_deq(pdev, pdev->event_ring); +} + +static void cdnsp_td_cleanup(struct cdnsp_device *pdev, + struct cdnsp_td *td, + struct cdnsp_ring *ep_ring, + int *status) +{ + struct cdnsp_request *preq = td->preq; + + /* if a bounce buffer was used to align this td then unmap it */ + cdnsp_unmap_td_bounce_buffer(pdev, ep_ring, td); + + /* + * If the controller said we transferred more data than the buffer + * length, Play it safe and say we didn't transfer anything. + */ + if (preq->request.actual > preq->request.length) { + preq->request.actual = 0; + *status = 0; + } + + list_del_init(&td->td_list); + ep_ring->num_tds--; + preq->pep->stream_info.td_count--; + + cdnsp_gadget_giveback(preq->pep, preq, *status); +} + +static void cdnsp_finish_td(struct cdnsp_device *pdev, + struct cdnsp_td *td, + struct cdnsp_transfer_event *event, + struct cdnsp_ep *ep, + int *status) +{ + struct cdnsp_ring *ep_ring; + u32 trb_comp_code; + + ep_ring = cdnsp_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer)); + trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len)); + + if (trb_comp_code == COMP_STOPPED_LENGTH_INVALID || + trb_comp_code == COMP_STOPPED || + trb_comp_code == COMP_STOPPED_SHORT_PACKET) { + /* + * The Endpoint Stop Command completion will take care of any + * stopped TDs. A stopped TD may be restarted, so don't update + * the ring dequeue pointer or take this TD off any lists yet. + */ + return; + } + + /* Update ring dequeue pointer */ + while (ep_ring->dequeue != td->last_trb) + cdnsp_inc_deq(pdev, ep_ring); + + cdnsp_inc_deq(pdev, ep_ring); + + cdnsp_td_cleanup(pdev, td, ep_ring, status); +} + +/* sum trb lengths from ring dequeue up to stop_trb, _excluding_ stop_trb */ +static int cdnsp_sum_trb_lengths(struct cdnsp_device *pdev, + struct cdnsp_ring *ring, + union cdnsp_trb *stop_trb) +{ + struct cdnsp_segment *seg = ring->deq_seg; + union cdnsp_trb *trb = ring->dequeue; + u32 sum; + + for (sum = 0; trb != stop_trb; cdnsp_next_trb(pdev, ring, &seg, &trb)) { + if (!cdnsp_trb_is_noop(trb) && !cdnsp_trb_is_link(trb)) + sum += TRB_LEN(le32_to_cpu(trb->generic.field[2])); + } + return sum; +} + +static int cdnsp_giveback_first_trb(struct cdnsp_device *pdev, + struct cdnsp_ep *pep, + unsigned int stream_id, + int start_cycle, + struct cdnsp_generic_trb *start_trb) +{ + /* + * Pass all the TRBs to the hardware at once and make sure this write + * isn't reordered. + */ + wmb(); + + if (start_cycle) + start_trb->field[3] |= cpu_to_le32(start_cycle); + else + start_trb->field[3] &= cpu_to_le32(~TRB_CYCLE); + + if ((pep->ep_state & EP_HAS_STREAMS) && + !pep->stream_info.first_prime_det) + return 0; + + return cdnsp_ring_ep_doorbell(pdev, pep, stream_id); +} + +/* + * Process control tds, update USB request status and actual_length. + */ +static void cdnsp_process_ctrl_td(struct cdnsp_device *pdev, + struct cdnsp_td *td, + union cdnsp_trb *event_trb, + struct cdnsp_transfer_event *event, + struct cdnsp_ep *pep, + int *status) +{ + struct cdnsp_ring *ep_ring; + u32 remaining; + u32 trb_type; + + trb_type = TRB_FIELD_TO_TYPE(le32_to_cpu(event_trb->generic.field[3])); + ep_ring = cdnsp_dma_to_transfer_ring(pep, le64_to_cpu(event->buffer)); + remaining = EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)); + + /* + * if on data stage then update the actual_length of the USB + * request and flag it as set, so it won't be overwritten in the event + * for the last TRB. + */ + if (trb_type == TRB_DATA) { + td->request_length_set = true; + td->preq->request.actual = td->preq->request.length - remaining; + } + + /* at status stage */ + if (!td->request_length_set) + td->preq->request.actual = td->preq->request.length; + + if (pdev->ep0_stage == CDNSP_DATA_STAGE && pep->number == 0 && + pdev->three_stage_setup) { + td = list_entry(ep_ring->td_list.next, struct cdnsp_td, + td_list); + pdev->ep0_stage = CDNSP_STATUS_STAGE; + + cdnsp_giveback_first_trb(pdev, pep, 0, ep_ring->cycle_state, + &td->last_trb->generic); + return; + } + + cdnsp_finish_td(pdev, td, event, pep, status); +} + +/* + * Process isochronous tds, update usb request status and actual_length. + */ +static void cdnsp_process_isoc_td(struct cdnsp_device *pdev, + struct cdnsp_td *td, + union cdnsp_trb *ep_trb, + struct cdnsp_transfer_event *event, + struct cdnsp_ep *pep, + int status) +{ + struct cdnsp_request *preq = td->preq; + u32 remaining, requested, ep_trb_len; + bool sum_trbs_for_length = false; + struct cdnsp_ring *ep_ring; + u32 trb_comp_code; + u32 td_length; + + ep_ring = cdnsp_dma_to_transfer_ring(pep, le64_to_cpu(event->buffer)); + trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len)); + remaining = EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)); + ep_trb_len = TRB_LEN(le32_to_cpu(ep_trb->generic.field[2])); + + requested = preq->request.length; + + /* handle completion code */ + switch (trb_comp_code) { + case COMP_SUCCESS: + preq->request.status = 0; + break; + case COMP_SHORT_PACKET: + preq->request.status = 0; + sum_trbs_for_length = true; + break; + case COMP_ISOCH_BUFFER_OVERRUN: + case COMP_BABBLE_DETECTED_ERROR: + preq->request.status = -EOVERFLOW; + break; + case COMP_STOPPED: + sum_trbs_for_length = true; + break; + case COMP_STOPPED_SHORT_PACKET: + /* field normally containing residue now contains transferred */ + preq->request.status = 0; + requested = remaining; + break; + case COMP_STOPPED_LENGTH_INVALID: + requested = 0; + remaining = 0; + break; + default: + sum_trbs_for_length = true; + preq->request.status = -1; + break; + } + + if (sum_trbs_for_length) { + td_length = cdnsp_sum_trb_lengths(pdev, ep_ring, ep_trb); + td_length += ep_trb_len - remaining; + } else { + td_length = requested; + } + + td->preq->request.actual += td_length; + + cdnsp_finish_td(pdev, td, event, pep, &status); +} + +static void cdnsp_skip_isoc_td(struct cdnsp_device *pdev, + struct cdnsp_td *td, + struct cdnsp_transfer_event *event, + struct cdnsp_ep *pep, + int status) +{ + struct cdnsp_ring *ep_ring; + + ep_ring = cdnsp_dma_to_transfer_ring(pep, le64_to_cpu(event->buffer)); + td->preq->request.status = -EXDEV; + td->preq->request.actual = 0; + + /* Update ring dequeue pointer */ + while (ep_ring->dequeue != td->last_trb) + cdnsp_inc_deq(pdev, ep_ring); + + cdnsp_inc_deq(pdev, ep_ring); + + cdnsp_td_cleanup(pdev, td, ep_ring, &status); +} + +/* + * Process bulk and interrupt tds, update usb request status and actual_length. + */ +static void cdnsp_process_bulk_intr_td(struct cdnsp_device *pdev, + struct cdnsp_td *td, + union cdnsp_trb *ep_trb, + struct cdnsp_transfer_event *event, + struct cdnsp_ep *ep, + int *status) +{ + u32 remaining, requested, ep_trb_len; + struct cdnsp_ring *ep_ring; + u32 trb_comp_code; + + ep_ring = cdnsp_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer)); + trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len)); + remaining = EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)); + ep_trb_len = TRB_LEN(le32_to_cpu(ep_trb->generic.field[2])); + requested = td->preq->request.length; + + switch (trb_comp_code) { + case COMP_SUCCESS: + case COMP_SHORT_PACKET: + *status = 0; + break; + case COMP_STOPPED_SHORT_PACKET: + td->preq->request.actual = remaining; + goto finish_td; + case COMP_STOPPED_LENGTH_INVALID: + /* Stopped on ep trb with invalid length, exclu |