diff options
| author | Jason Gunthorpe <jgg@mellanox.com> | 2019-06-28 21:18:23 -0300 |
|---|---|---|
| committer | Jason Gunthorpe <jgg@mellanox.com> | 2019-06-28 21:18:23 -0300 |
| commit | 371bb62158d53c1fc33e2fb9b6aeb9522caf6cf4 (patch) | |
| tree | 9442ae9b22e3cf24fbe8dcefae8862a3e4e05629 /drivers/infiniband/hw/hfi1/user_sdma.c | |
| parent | 10dcc7448e9ea49488a38bca7551de1a9da06ad9 (diff) | |
| parent | 4b972a01a7da614b4796475f933094751a295a2f (diff) | |
| download | linux-371bb62158d53c1fc33e2fb9b6aeb9522caf6cf4.tar.gz linux-371bb62158d53c1fc33e2fb9b6aeb9522caf6cf4.tar.bz2 linux-371bb62158d53c1fc33e2fb9b6aeb9522caf6cf4.zip | |
Merge tag 'v5.2-rc6' into rdma.git for-next
For dependencies in next patches.
Resolve conflicts:
- Use uverbs_get_cleared_udata() with new cq allocation flow
- Continue to delete nes despite SPDX conflict
- Resolve list appends in mlx5_command_str()
- Use u16 for vport_rule stuff
- Resolve list appends in struct ib_client
Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
Diffstat (limited to 'drivers/infiniband/hw/hfi1/user_sdma.c')
| -rw-r--r-- | drivers/infiniband/hw/hfi1/user_sdma.c | 12 |
1 files changed, 4 insertions, 8 deletions
diff --git a/drivers/infiniband/hw/hfi1/user_sdma.c b/drivers/infiniband/hw/hfi1/user_sdma.c index 8bfbc6d7ea34..fd754a16475a 100644 --- a/drivers/infiniband/hw/hfi1/user_sdma.c +++ b/drivers/infiniband/hw/hfi1/user_sdma.c @@ -130,20 +130,16 @@ static int defer_packet_queue( { struct hfi1_user_sdma_pkt_q *pq = container_of(wait->iow, struct hfi1_user_sdma_pkt_q, busy); - struct user_sdma_txreq *tx = - container_of(txreq, struct user_sdma_txreq, txreq); - if (sdma_progress(sde, seq, txreq)) { - if (tx->busycount++ < MAX_DEFER_RETRY_COUNT) - goto eagain; - } + write_seqlock(&sde->waitlock); + if (sdma_progress(sde, seq, txreq)) + goto eagain; /* * We are assuming that if the list is enqueued somewhere, it * is to the dmawait list since that is the only place where * it is supposed to be enqueued. */ xchg(&pq->state, SDMA_PKT_Q_DEFERRED); - write_seqlock(&sde->waitlock); if (list_empty(&pq->busy.list)) { iowait_get_priority(&pq->busy); iowait_queue(pkts_sent, &pq->busy, &sde->dmawait); @@ -151,6 +147,7 @@ static int defer_packet_queue( write_sequnlock(&sde->waitlock); return -EBUSY; eagain: + write_sequnlock(&sde->waitlock); return -EAGAIN; } @@ -804,7 +801,6 @@ static int user_sdma_send_pkts(struct user_sdma_request *req, u16 maxpkts) tx->flags = 0; tx->req = req; - tx->busycount = 0; INIT_LIST_HEAD(&tx->list); /* |
