diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2020-01-31 14:40:36 -0800 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2020-01-31 14:40:36 -0800 |
commit | 8fdd4019bcb2d824c5ab45c6fc340293cfed843f (patch) | |
tree | 9a7627096f8c0d351c8ed0335da3cb1afb9049d9 /drivers/infiniband/hw/hfi1/driver.c | |
parent | 68b62e5d965a8cbcfa287ee7754cf06e9b3775ab (diff) | |
parent | 8889f6fa35884d09f24734e10fea0c9ddcbc6429 (diff) | |
download | linux-8fdd4019bcb2d824c5ab45c6fc340293cfed843f.tar.gz linux-8fdd4019bcb2d824c5ab45c6fc340293cfed843f.tar.bz2 linux-8fdd4019bcb2d824c5ab45c6fc340293cfed843f.zip |
Merge tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma
Pull rdma updates from Jason Gunthorpe:
"A very quiet cycle with few notable changes. Mostly the usual list of
one or two patches to drivers changing something that isn't quite rc
worthy. The subsystem seems to be seeing a larger number of rework and
cleanup style patches right now, I feel that several vendors are
prepping their drivers for new silicon.
Summary:
- Driver updates and cleanup for qedr, bnxt_re, hns, siw, mlx5, mlx4,
rxe, i40iw
- Larger series doing cleanup and rework for hns and hfi1.
- Some general reworking of the CM code to make it a little more
understandable
- Unify the different code paths connected to the uverbs FD scheme
- New UAPI ioctls conversions for get context and get async fd
- Trace points for CQ and CM portions of the RDMA stack
- mlx5 driver support for virtio-net formatted rings as RDMA raw
ethernet QPs
- verbs support for setting the PCI-E relaxed ordering bit on DMA
traffic connected to a MR
- A couple of bug fixes that came too late to make rc7"
* tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma: (108 commits)
RDMA/core: Make the entire API tree static
RDMA/efa: Mask access flags with the correct optional range
RDMA/cma: Fix unbalanced cm_id reference count during address resolve
RDMA/umem: Fix ib_umem_find_best_pgsz()
IB/mlx4: Fix leak in id_map_find_del
IB/opa_vnic: Spelling correction of 'erorr' to 'error'
IB/hfi1: Fix logical condition in msix_request_irq
RDMA/cm: Remove CM message structs
RDMA/cm: Use IBA functions for complex structure members
RDMA/cm: Use IBA functions for simple structure members
RDMA/cm: Use IBA functions for swapping get/set acessors
RDMA/cm: Use IBA functions for simple get/set acessors
RDMA/cm: Add SET/GET implementations to hide IBA wire format
RDMA/cm: Add accessors for CM_REQ transport_type
IB/mlx5: Return the administrative GUID if exists
RDMA/core: Ensure that rdma_user_mmap_entry_remove() is a fence
IB/mlx4: Fix memory leak in add_gid error flow
IB/mlx5: Expose RoCE accelerator counters
RDMA/mlx5: Set relaxed ordering when requested
RDMA/core: Add the core support field to METHOD_GET_CONTEXT
...
Diffstat (limited to 'drivers/infiniband/hw/hfi1/driver.c')
-rw-r--r-- | drivers/infiniband/hw/hfi1/driver.c | 237 |
1 files changed, 92 insertions, 145 deletions
diff --git a/drivers/infiniband/hw/hfi1/driver.c b/drivers/infiniband/hw/hfi1/driver.c index 01aa1f132f55..049d15befe58 100644 --- a/drivers/infiniband/hw/hfi1/driver.c +++ b/drivers/infiniband/hw/hfi1/driver.c @@ -411,14 +411,14 @@ drop: static inline void init_packet(struct hfi1_ctxtdata *rcd, struct hfi1_packet *packet) { - packet->rsize = rcd->rcvhdrqentsize; /* words */ - packet->maxcnt = rcd->rcvhdrq_cnt * packet->rsize; /* words */ + packet->rsize = get_hdrqentsize(rcd); /* words */ + packet->maxcnt = get_hdrq_cnt(rcd) * packet->rsize; /* words */ packet->rcd = rcd; packet->updegr = 0; packet->etail = -1; packet->rhf_addr = get_rhf_addr(rcd); packet->rhf = rhf_to_cpu(packet->rhf_addr); - packet->rhqoff = rcd->head; + packet->rhqoff = hfi1_rcd_head(rcd); packet->numpkt = 0; } @@ -551,22 +551,22 @@ static inline void init_ps_mdata(struct ps_mdata *mdata, mdata->maxcnt = packet->maxcnt; mdata->ps_head = packet->rhqoff; - if (HFI1_CAP_KGET_MASK(rcd->flags, DMA_RTAIL)) { + if (get_dma_rtail_setting(rcd)) { mdata->ps_tail = get_rcvhdrtail(rcd); if (rcd->ctxt == HFI1_CTRL_CTXT) - mdata->ps_seq = rcd->seq_cnt; + mdata->ps_seq = hfi1_seq_cnt(rcd); else mdata->ps_seq = 0; /* not used with DMA_RTAIL */ } else { mdata->ps_tail = 0; /* used only with DMA_RTAIL*/ - mdata->ps_seq = rcd->seq_cnt; + mdata->ps_seq = hfi1_seq_cnt(rcd); } } static inline int ps_done(struct ps_mdata *mdata, u64 rhf, struct hfi1_ctxtdata *rcd) { - if (HFI1_CAP_KGET_MASK(rcd->flags, DMA_RTAIL)) + if (get_dma_rtail_setting(rcd)) return mdata->ps_head == mdata->ps_tail; return mdata->ps_seq != rhf_rcv_seq(rhf); } @@ -592,11 +592,9 @@ static inline void update_ps_mdata(struct ps_mdata *mdata, mdata->ps_head = 0; /* Control context must do seq counting */ - if (!HFI1_CAP_KGET_MASK(rcd->flags, DMA_RTAIL) || - (rcd->ctxt == HFI1_CTRL_CTXT)) { - if (++mdata->ps_seq > 13) - mdata->ps_seq = 1; - } + if (!get_dma_rtail_setting(rcd) || + rcd->ctxt == HFI1_CTRL_CTXT) + mdata->ps_seq = hfi1_seq_incr_wrap(mdata->ps_seq); } /* @@ -734,6 +732,7 @@ static noinline int skip_rcv_packet(struct hfi1_packet *packet, int thread) { int ret; + packet->rcd->dd->ctx0_seq_drop++; /* Set up for the next packet */ packet->rhqoff += packet->rsize; if (packet->rhqoff >= packet->maxcnt) @@ -769,7 +768,7 @@ static inline int process_rcv_packet(struct hfi1_packet *packet, int thread) * The +2 is the size of the RHF. */ prefetch_range(packet->ebuf, - packet->tlen - ((packet->rcd->rcvhdrqentsize - + packet->tlen - ((get_hdrqentsize(packet->rcd) - (rhf_hdrq_offset(packet->rhf) + 2)) * 4)); } @@ -823,7 +822,7 @@ static inline void finish_packet(struct hfi1_packet *packet) * The only thing we need to do is a final update and call for an * interrupt */ - update_usrhead(packet->rcd, packet->rcd->head, packet->updegr, + update_usrhead(packet->rcd, hfi1_rcd_head(packet->rcd), packet->updegr, packet->etail, rcv_intr_dynamic, packet->numpkt); } @@ -832,13 +831,11 @@ static inline void finish_packet(struct hfi1_packet *packet) */ int handle_receive_interrupt_nodma_rtail(struct hfi1_ctxtdata *rcd, int thread) { - u32 seq; int last = RCV_PKT_OK; struct hfi1_packet packet; init_packet(rcd, &packet); - seq = rhf_rcv_seq(packet.rhf); - if (seq != rcd->seq_cnt) { + if (last_rcv_seq(rcd, rhf_rcv_seq(packet.rhf))) { last = RCV_PKT_DONE; goto bail; } @@ -847,15 +844,12 @@ int handle_receive_interrupt_nodma_rtail(struct hfi1_ctxtdata *rcd, int thread) while (last == RCV_PKT_OK) { last = process_rcv_packet(&packet, thread); - seq = rhf_rcv_seq(packet.rhf); - if (++rcd->seq_cnt > 13) - rcd->seq_cnt = 1; - if (seq != rcd->seq_cnt) + if (hfi1_seq_incr(rcd, rhf_rcv_seq(packet.rhf))) last = RCV_PKT_DONE; process_rcv_update(last, &packet); } process_rcv_qp_work(&packet); - rcd->head = packet.rhqoff; + hfi1_set_rcd_head(rcd, packet.rhqoff); bail: finish_packet(&packet); return last; @@ -884,15 +878,14 @@ int handle_receive_interrupt_dma_rtail(struct hfi1_ctxtdata *rcd, int thread) process_rcv_update(last, &packet); } process_rcv_qp_work(&packet); - rcd->head = packet.rhqoff; + hfi1_set_rcd_head(rcd, packet.rhqoff); bail: finish_packet(&packet); return last; } -static inline void set_nodma_rtail(struct hfi1_devdata *dd, u16 ctxt) +static void set_all_fastpath(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd) { - struct hfi1_ctxtdata *rcd; u16 i; /* @@ -900,50 +893,17 @@ static inline void set_nodma_rtail(struct hfi1_devdata *dd, u16 ctxt) * interrupt handler only for that context. Otherwise, switch * interrupt handler for all statically allocated kernel contexts. */ - if (ctxt >= dd->first_dyn_alloc_ctxt) { - rcd = hfi1_rcd_get_by_index_safe(dd, ctxt); - if (rcd) { - rcd->do_interrupt = - &handle_receive_interrupt_nodma_rtail; - hfi1_rcd_put(rcd); - } - return; - } - - for (i = HFI1_CTRL_CTXT + 1; i < dd->first_dyn_alloc_ctxt; i++) { - rcd = hfi1_rcd_get_by_index(dd, i); - if (rcd) - rcd->do_interrupt = - &handle_receive_interrupt_nodma_rtail; + if (rcd->ctxt >= dd->first_dyn_alloc_ctxt && !rcd->is_vnic) { + hfi1_rcd_get(rcd); + hfi1_set_fast(rcd); hfi1_rcd_put(rcd); - } -} - -static inline void set_dma_rtail(struct hfi1_devdata *dd, u16 ctxt) -{ - struct hfi1_ctxtdata *rcd; - u16 i; - - /* - * For dynamically allocated kernel contexts (like vnic) switch - * interrupt handler only for that context. Otherwise, switch - * interrupt handler for all statically allocated kernel contexts. - */ - if (ctxt >= dd->first_dyn_alloc_ctxt) { - rcd = hfi1_rcd_get_by_index_safe(dd, ctxt); - if (rcd) { - rcd->do_interrupt = - &handle_receive_interrupt_dma_rtail; - hfi1_rcd_put(rcd); - } return; } - for (i = HFI1_CTRL_CTXT + 1; i < dd->first_dyn_alloc_ctxt; i++) { + for (i = HFI1_CTRL_CTXT + 1; i < dd->num_rcv_contexts; i++) { rcd = hfi1_rcd_get_by_index(dd, i); - if (rcd) - rcd->do_interrupt = - &handle_receive_interrupt_dma_rtail; + if (rcd && (i < dd->first_dyn_alloc_ctxt || rcd->is_vnic)) + hfi1_set_fast(rcd); hfi1_rcd_put(rcd); } } @@ -959,17 +919,14 @@ void set_all_slowpath(struct hfi1_devdata *dd) if (!rcd) continue; if (i < dd->first_dyn_alloc_ctxt || rcd->is_vnic) - rcd->do_interrupt = &handle_receive_interrupt; + rcd->do_interrupt = rcd->slow_handler; hfi1_rcd_put(rcd); } } -static inline int set_armed_to_active(struct hfi1_ctxtdata *rcd, - struct hfi1_packet *packet, - struct hfi1_devdata *dd) +static bool __set_armed_to_active(struct hfi1_packet *packet) { - struct work_struct *lsaw = &rcd->ppd->linkstate_active_work; u8 etype = rhf_rcv_type(packet->rhf); u8 sc = SC15_PACKET; @@ -984,19 +941,34 @@ static inline int set_armed_to_active(struct hfi1_ctxtdata *rcd, sc = hfi1_16B_get_sc(hdr); } if (sc != SC15_PACKET) { - int hwstate = driver_lstate(rcd->ppd); + int hwstate = driver_lstate(packet->rcd->ppd); + struct work_struct *lsaw = + &packet->rcd->ppd->linkstate_active_work; if (hwstate != IB_PORT_ACTIVE) { - dd_dev_info(dd, + dd_dev_info(packet->rcd->dd, "Unexpected link state %s\n", opa_lstate_name(hwstate)); - return 0; + return false; } - queue_work(rcd->ppd->link_wq, lsaw); - return 1; + queue_work(packet->rcd->ppd->link_wq, lsaw); + return true; } - return 0; + return false; +} + +/** + * armed to active - the fast path for armed to active + * @packet: the packet structure + * + * Return true if packet processing needs to bail. + */ +static bool set_armed_to_active(struct hfi1_packet *packet) +{ + if (likely(packet->rcd->ppd->host_link_state != HLS_UP_ARMED)) + return false; + return __set_armed_to_active(packet); } /* @@ -1019,10 +991,8 @@ int handle_receive_interrupt(struct hfi1_ctxtdata *rcd, int thread) init_packet(rcd, &packet); - if (!HFI1_CAP_KGET_MASK(rcd->flags, DMA_RTAIL)) { - u32 seq = rhf_rcv_seq(packet.rhf); - - if (seq != rcd->seq_cnt) { + if (!get_dma_rtail_setting(rcd)) { + if (last_rcv_seq(rcd, rhf_rcv_seq(packet.rhf))) { last = RCV_PKT_DONE; goto bail; } @@ -1039,22 +1009,15 @@ int handle_receive_interrupt(struct hfi1_ctxtdata *rcd, int thread) * Control context can potentially receive an invalid * rhf. Drop such packets. */ - if (rcd->ctxt == HFI1_CTRL_CTXT) { - u32 seq = rhf_rcv_seq(packet.rhf); - - if (seq != rcd->seq_cnt) + if (rcd->ctxt == HFI1_CTRL_CTXT) + if (last_rcv_seq(rcd, rhf_rcv_seq(packet.rhf))) skip_pkt = 1; - } } prescan_rxq(rcd, &packet); while (last == RCV_PKT_OK) { - if (unlikely(dd->do_drop && - atomic_xchg(&dd->drop_packet, DROP_PACKET_OFF) == - DROP_PACKET_ON)) { - dd->do_drop = 0; - + if (hfi1_need_drop(dd)) { /* On to the next packet */ packet.rhqoff += packet.rsize; packet.rhf_addr = (__le32 *)rcd->rcvhdrq + @@ -1066,26 +1029,14 @@ int handle_receive_interrupt(struct hfi1_ctxtdata *rcd, int thread) last = skip_rcv_packet(&packet, thread); skip_pkt = 0; } else { - /* Auto activate link on non-SC15 packet receive */ - if (unlikely(rcd->ppd->host_link_state == - HLS_UP_ARMED) && - set_armed_to_active(rcd, &packet, dd)) + if (set_armed_to_active(&packet)) goto bail; last = process_rcv_packet(&packet, thread); } - if (!HFI1_CAP_KGET_MASK(rcd->flags, DMA_RTAIL)) { - u32 seq = rhf_rcv_seq(packet.rhf); - - if (++rcd->seq_cnt > 13) - rcd->seq_cnt = 1; - if (seq != rcd->seq_cnt) + if (!get_dma_rtail_setting(rcd)) { + if (hfi1_seq_incr(rcd, rhf_rcv_seq(packet.rhf))) last = RCV_PKT_DONE; - if (needset) { - dd_dev_info(dd, "Switching to NO_DMA_RTAIL\n"); - set_nodma_rtail(dd, rcd->ctxt); - needset = 0; - } } else { if (packet.rhqoff == hdrqtail) last = RCV_PKT_DONE; @@ -1094,27 +1045,24 @@ int handle_receive_interrupt(struct hfi1_ctxtdata *rcd, int thread) * rhf. Drop such packets. */ if (rcd->ctxt == HFI1_CTRL_CTXT) { - u32 seq = rhf_rcv_seq(packet.rhf); + bool lseq; - if (++rcd->seq_cnt > 13) - rcd->seq_cnt = 1; - if (!last && (seq != rcd->seq_cnt)) + lseq = hfi1_seq_incr(rcd, + rhf_rcv_seq(packet.rhf)); + if (!last && lseq) skip_pkt = 1; } - - if (needset) { - dd_dev_info(dd, - "Switching to DMA_RTAIL\n"); - set_dma_rtail(dd, rcd->ctxt); - needset = 0; - } } + if (needset) { + needset = false; + set_all_fastpath(dd, rcd); + } process_rcv_update(last, &packet); } process_rcv_qp_work(&packet); - rcd->head = packet.rhqoff; + hfi1_set_rcd_head(rcd, packet.rhqoff); bail: /* @@ -1606,23 +1554,22 @@ void handle_eflags(struct hfi1_packet *packet) * The following functions are called by the interrupt handler. They are type * specific handlers for each packet type. */ -static int process_receive_ib(struct hfi1_packet *packet) +static void process_receive_ib(struct hfi1_packet *packet) { if (hfi1_setup_9B_packet(packet)) - return RHF_RCV_CONTINUE; + return; if (unlikely(hfi1_dbg_should_fault_rx(packet))) - return RHF_RCV_CONTINUE; + return; trace_hfi1_rcvhdr(packet); if (unlikely(rhf_err_flags(packet->rhf))) { handle_eflags(packet); - return RHF_RCV_CONTINUE; + return; } hfi1_ib_rcv(packet); - return RHF_RCV_CONTINUE; } static inline bool hfi1_is_vnic_packet(struct hfi1_packet *packet) @@ -1638,23 +1585,23 @@ static inline bool hfi1_is_vnic_packet(struct hfi1_packet *packet) return false; } -static int process_receive_bypass(struct hfi1_packet *packet) +static void process_receive_bypass(struct hfi1_packet *packet) { struct hfi1_devdata *dd = packet->rcd->dd; if (hfi1_is_vnic_packet(packet)) { hfi1_vnic_bypass_rcv(packet); - return RHF_RCV_CONTINUE; + return; } if (hfi1_setup_bypass_packet(packet)) - return RHF_RCV_CONTINUE; + return; trace_hfi1_rcvhdr(packet); if (unlikely(rhf_err_flags(packet->rhf))) { handle_eflags(packet); - return RHF_RCV_CONTINUE; + return; } if (hfi1_16B_get_l2(packet->hdr) == 0x2) { @@ -1677,17 +1624,16 @@ static int process_receive_bypass(struct hfi1_packet *packet) (OPA_EI_STATUS_SMASK | BAD_L2_ERR); } } - return RHF_RCV_CONTINUE; } -static int process_receive_error(struct hfi1_packet *packet) +static void process_receive_error(struct hfi1_packet *packet) { /* KHdrHCRCErr -- KDETH packet with a bad HCRC */ if (unlikely( hfi1_dbg_fault_suppress_err(&packet->rcd->dd->verbs_dev) && (rhf_rcv_type_err(packet->rhf) == RHF_RCV_TYPE_ERROR || packet->rhf & RHF_DC_ERR))) - return RHF_RCV_CONTINUE; + return; hfi1_setup_ib_header(packet); handle_eflags(packet); @@ -1695,32 +1641,29 @@ static int process_receive_error(struct hfi1_packet *packet) if (unlikely(rhf_err_flags(packet->rhf))) dd_dev_err(packet->rcd->dd, "Unhandled error packet received. Dropping.\n"); - - return RHF_RCV_CONTINUE; } -static int kdeth_process_expected(struct hfi1_packet *packet) +static void kdeth_process_expected(struct hfi1_packet *packet) { hfi1_setup_9B_packet(packet); if (unlikely(hfi1_dbg_should_fault_rx(packet))) - return RHF_RCV_CONTINUE; + return; if (unlikely(rhf_err_flags(packet->rhf))) { struct hfi1_ctxtdata *rcd = packet->rcd; if (hfi1_handle_kdeth_eflags(rcd, rcd->ppd, packet)) - return RHF_RCV_CONTINUE; + return; } hfi1_kdeth_expected_rcv(packet); - return RHF_RCV_CONTINUE; } -static int kdeth_process_eager(struct hfi1_packet *packet) +static void kdeth_process_eager(struct hfi1_packet *packet) { hfi1_setup_9B_packet(packet); if (unlikely(hfi1_dbg_should_fault_rx(packet))) - return RHF_RCV_CONTINUE; + return; trace_hfi1_rcvhdr(packet); if (unlikely(rhf_err_flags(packet->rhf))) { @@ -1728,37 +1671,41 @@ static int kdeth_process_eager(struct hfi1_packet *packet) show_eflags_errs(packet); if (hfi1_handle_kdeth_eflags(rcd, rcd->ppd, packet)) - return RHF_RCV_CONTINUE; + return; } hfi1_kdeth_eager_rcv(packet); - return RHF_RCV_CONTINUE; } -static int process_receive_invalid(struct hfi1_packet *packet) +static void process_receive_invalid(struct hfi1_packet *packet) { dd_dev_err(packet->rcd->dd, "Invalid packet type %d. Dropping\n", rhf_rcv_type(packet->rhf)); - return RHF_RCV_CONTINUE; } +#define HFI1_RCVHDR_DUMP_MAX 5 + void seqfile_dump_rcd(struct seq_file *s, struct hfi1_ctxtdata *rcd) { struct hfi1_packet packet; struct ps_mdata mdata; + int i; - seq_printf(s, "Rcd %u: RcvHdr cnt %u entsize %u %s head %llu tail %llu\n", - rcd->ctxt, rcd->rcvhdrq_cnt, rcd->rcvhdrqentsize, - HFI1_CAP_KGET_MASK(rcd->flags, DMA_RTAIL) ? + seq_printf(s, "Rcd %u: RcvHdr cnt %u entsize %u %s ctrl 0x%08llx status 0x%08llx, head %llu tail %llu sw head %u\n", + rcd->ctxt, get_hdrq_cnt(rcd), get_hdrqentsize(rcd), + get_dma_rtail_setting(rcd) ? "dma_rtail" : "nodma_rtail", + read_kctxt_csr(rcd->dd, rcd->ctxt, RCV_CTXT_CTRL), + read_kctxt_csr(rcd->dd, rcd->ctxt, RCV_CTXT_STATUS), read_uctxt_csr(rcd->dd, rcd->ctxt, RCV_HDR_HEAD) & RCV_HDR_HEAD_HEAD_MASK, - read_uctxt_csr(rcd->dd, rcd->ctxt, RCV_HDR_TAIL)); + read_uctxt_csr(rcd->dd, rcd->ctxt, RCV_HDR_TAIL), + rcd->head); init_packet(rcd, &packet); init_ps_mdata(&mdata, &packet); - while (1) { + for (i = 0; i < HFI1_RCVHDR_DUMP_MAX; i++) { __le32 *rhf_addr = (__le32 *)rcd->rcvhdrq + mdata.ps_head + rcd->rhf_offset; struct ib_header *hdr; |