summaryrefslogtreecommitdiff
path: root/drivers/net/wireless/ath/ath6kl/htc.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/wireless/ath/ath6kl/htc.c')
-rw-r--r--drivers/net/wireless/ath/ath6kl/htc.c2466
1 files changed, 2466 insertions, 0 deletions
diff --git a/drivers/net/wireless/ath/ath6kl/htc.c b/drivers/net/wireless/ath/ath6kl/htc.c
new file mode 100644
index 000000000000..95c47bbd1d78
--- /dev/null
+++ b/drivers/net/wireless/ath/ath6kl/htc.c
@@ -0,0 +1,2466 @@
+/*
+ * Copyright (c) 2007-2011 Atheros Communications Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "core.h"
+#include "htc_hif.h"
+#include "debug.h"
+#include "hif-ops.h"
+#include <asm/unaligned.h>
+
+#define CALC_TXRX_PADDED_LEN(dev, len) (__ALIGN_MASK((len), (dev)->block_mask))
+
+static void htc_prep_send_pkt(struct htc_packet *packet, u8 flags, int ctrl0,
+ int ctrl1)
+{
+ struct htc_frame_hdr *hdr;
+
+ packet->buf -= HTC_HDR_LENGTH;
+ hdr = (struct htc_frame_hdr *)packet->buf;
+
+ /* Endianess? */
+ put_unaligned((u16)packet->act_len, &hdr->payld_len);
+ hdr->flags = flags;
+ hdr->eid = packet->endpoint;
+ hdr->ctrl[0] = ctrl0;
+ hdr->ctrl[1] = ctrl1;
+}
+
+static void htc_reclaim_txctrl_buf(struct htc_target *target,
+ struct htc_packet *pkt)
+{
+ spin_lock_bh(&target->htc_lock);
+ list_add_tail(&pkt->list, &target->free_ctrl_txbuf);
+ spin_unlock_bh(&target->htc_lock);
+}
+
+static struct htc_packet *htc_get_control_buf(struct htc_target *target,
+ bool tx)
+{
+ struct htc_packet *packet = NULL;
+ struct list_head *buf_list;
+
+ buf_list = tx ? &target->free_ctrl_txbuf : &target->free_ctrl_rxbuf;
+
+ spin_lock_bh(&target->htc_lock);
+
+ if (list_empty(buf_list)) {
+ spin_unlock_bh(&target->htc_lock);
+ return NULL;
+ }
+
+ packet = list_first_entry(buf_list, struct htc_packet, list);
+ list_del(&packet->list);
+ spin_unlock_bh(&target->htc_lock);
+
+ if (tx)
+ packet->buf = packet->buf_start + HTC_HDR_LENGTH;
+
+ return packet;
+}
+
+static void htc_tx_comp_update(struct htc_target *target,
+ struct htc_endpoint *endpoint,
+ struct htc_packet *packet)
+{
+ packet->completion = NULL;
+ packet->buf += HTC_HDR_LENGTH;
+
+ if (!packet->status)
+ return;
+
+ ath6kl_err("req failed (status:%d, ep:%d, len:%d creds:%d)\n",
+ packet->status, packet->endpoint, packet->act_len,
+ packet->info.tx.cred_used);
+
+ /* on failure to submit, reclaim credits for this packet */
+ spin_lock_bh(&target->tx_lock);
+ endpoint->cred_dist.cred_to_dist +=
+ packet->info.tx.cred_used;
+ endpoint->cred_dist.txq_depth = get_queue_depth(&endpoint->txq);
+
+ ath6kl_dbg(ATH6KL_DBG_HTC_SEND, "ctxt:0x%p dist:0x%p\n",
+ target->cred_dist_cntxt, &target->cred_dist_list);
+
+ ath6k_credit_distribute(target->cred_dist_cntxt,
+ &target->cred_dist_list,
+ HTC_CREDIT_DIST_SEND_COMPLETE);
+
+ spin_unlock_bh(&target->tx_lock);
+}
+
+static void htc_tx_complete(struct htc_endpoint *endpoint,
+ struct list_head *txq)
+{
+ if (list_empty(txq))
+ return;
+
+ ath6kl_dbg(ATH6KL_DBG_HTC_SEND,
+ "send complete ep %d, (%d pkts)\n",
+ endpoint->eid, get_queue_depth(txq));
+
+ ath6kl_tx_complete(endpoint->target->dev->ar, txq);
+}
+
+static void htc_tx_comp_handler(struct htc_target *target,
+ struct htc_packet *packet)
+{
+ struct htc_endpoint *endpoint = &target->endpoint[packet->endpoint];
+ struct list_head container;
+
+ htc_tx_comp_update(target, endpoint, packet);
+ INIT_LIST_HEAD(&container);
+ list_add_tail(&packet->list, &container);
+ /* do completion */
+ htc_tx_complete(endpoint, &container);
+}
+
+static void htc_async_tx_scat_complete(struct hif_scatter_req *scat_req)
+{
+ struct htc_endpoint *endpoint = scat_req->ep;
+ struct htc_target *target = endpoint->target;
+ struct htc_packet *packet;
+ struct list_head tx_compq;
+ int i;
+
+ INIT_LIST_HEAD(&tx_compq);
+
+ ath6kl_dbg(ATH6KL_DBG_HTC_SEND,
+ "htc_async_tx_scat_complete total len: %d entries: %d\n",
+ scat_req->len, scat_req->scat_entries);
+
+ if (scat_req->status)
+ ath6kl_err("send scatter req failed: %d\n", scat_req->status);
+
+ /* walk through the scatter list and process */
+ for (i = 0; i < scat_req->scat_entries; i++) {
+ packet = scat_req->scat_list[i].packet;
+ if (!packet) {
+ WARN_ON(1);
+ return;
+ }
+
+ packet->status = scat_req->status;
+ htc_tx_comp_update(target, endpoint, packet);
+ list_add_tail(&packet->list, &tx_compq);
+ }
+
+ /* free scatter request */
+ hif_scatter_req_add(target->dev->ar, scat_req);
+
+ /* complete all packets */
+ htc_tx_complete(endpoint, &tx_compq);
+}
+
+static int htc_issue_send(struct htc_target *target, struct htc_packet *packet)
+{
+ int status;
+ bool sync = false;
+ u32 padded_len, send_len;
+
+ if (!packet->completion)
+ sync = true;
+
+ send_len = packet->act_len + HTC_HDR_LENGTH;
+
+ ath6kl_dbg(ATH6KL_DBG_HTC_SEND, "%s: transmit len : %d (%s)\n",
+ __func__, send_len, sync ? "sync" : "async");
+
+ padded_len = CALC_TXRX_PADDED_LEN(target->dev, send_len);
+
+ ath6kl_dbg(ATH6KL_DBG_HTC_SEND,
+ "DevSendPacket, padded len: %d mbox:0x%X (mode:%s)\n",
+ padded_len,
+ target->dev->ar->mbox_info.htc_addr,
+ sync ? "sync" : "async");
+
+ if (sync) {
+ status = hif_read_write_sync(target->dev->ar,
+ target->dev->ar->mbox_info.htc_addr,
+ packet->buf, padded_len,
+ HIF_WR_SYNC_BLOCK_INC);
+
+ packet->status = status;
+ packet->buf += HTC_HDR_LENGTH;
+ } else
+ status = hif_write_async(target->dev->ar,
+ target->dev->ar->mbox_info.htc_addr,
+ packet->buf, padded_len,
+ HIF_WR_ASYNC_BLOCK_INC, packet);
+
+ return status;
+}
+
+static int htc_check_credits(struct htc_target *target,
+ struct htc_endpoint *ep, u8 *flags,
+ enum htc_endpoint_id eid, unsigned int len,
+ int *req_cred)
+{
+
+ *req_cred = (len > target->tgt_cred_sz) ?
+ DIV_ROUND_UP(len, target->tgt_cred_sz) : 1;
+
+ ath6kl_dbg(ATH6KL_DBG_HTC_SEND, "creds required:%d got:%d\n",
+ *req_cred, ep->cred_dist.credits);
+
+ if (ep->cred_dist.credits < *req_cred) {
+ if (eid == ENDPOINT_0)
+ return -EINVAL;
+
+ /* Seek more credits */
+ ep->cred_dist.seek_cred = *req_cred - ep->cred_dist.credits;
+
+ ath6kl_dbg(ATH6KL_DBG_HTC_SEND, "ctxt:0x%p dist:0x%p\n",
+ target->cred_dist_cntxt, &ep->cred_dist);
+
+ ath6k_seek_credits(target->cred_dist_cntxt, &ep->cred_dist);
+
+ ep->cred_dist.seek_cred = 0;
+
+ if (ep->cred_dist.credits < *req_cred) {
+ ath6kl_dbg(ATH6KL_DBG_HTC_SEND,
+ "not enough credits for ep %d - leaving packet in queue\n",
+ eid);
+ return -EINVAL;
+ }
+ }
+
+ ep->cred_dist.credits -= *req_cred;
+ ep->ep_st.cred_cosumd += *req_cred;
+
+ /* When we are getting low on credits, ask for more */
+ if (ep->cred_dist.credits < ep->cred_dist.cred_per_msg) {
+ ep->cred_dist.seek_cred =
+ ep->cred_dist.cred_per_msg - ep->cred_dist.credits;
+
+ ath6kl_dbg(ATH6KL_DBG_HTC_SEND, "ctxt:0x%p dist:0x%p\n",
+ target->cred_dist_cntxt, &ep->cred_dist);
+
+ ath6k_seek_credits(target->cred_dist_cntxt, &ep->cred_dist);
+
+ /* see if we were successful in getting more */
+ if (ep->cred_dist.credits < ep->cred_dist.cred_per_msg) {
+ /* tell the target we need credits ASAP! */
+ *flags |= HTC_FLAGS_NEED_CREDIT_UPDATE;
+ ep->ep_st.cred_low_indicate += 1;
+ ath6kl_dbg(ATH6KL_DBG_HTC_SEND, "host needs credits\n");
+ }
+ }
+
+ return 0;
+}
+
+static void htc_tx_pkts_get(struct htc_target *target,
+ struct htc_endpoint *endpoint,
+ struct list_head *queue)
+{
+ int req_cred;
+ u8 flags;
+ struct htc_packet *packet;
+ unsigned int len;
+
+ while (true) {
+
+ flags = 0;
+
+ if (list_empty(&endpoint->txq))
+ break;
+ packet = list_first_entry(&endpoint->txq, struct htc_packet,
+ list);
+
+ ath6kl_dbg(ATH6KL_DBG_HTC_SEND,
+ "got head pkt:0x%p , queue depth: %d\n",
+ packet, get_queue_depth(&endpoint->txq));
+
+ len = CALC_TXRX_PADDED_LEN(target->dev,
+ packet->act_len + HTC_HDR_LENGTH);
+
+ if (htc_check_credits(target, endpoint, &flags,
+ packet->endpoint, len, &req_cred))
+ break;
+
+ /* now we can fully move onto caller's queue */
+ packet = list_first_entry(&endpoint->txq, struct htc_packet,
+ list);
+ list_move_tail(&packet->list, queue);
+
+ /* save the number of credits this packet consumed */
+ packet->info.tx.cred_used = req_cred;
+
+ /* all TX packets are handled asynchronously */
+ packet->completion = htc_tx_comp_handler;
+ packet->context = target;
+ endpoint->ep_st.tx_issued += 1;
+
+ /* save send flags */
+ packet->info.tx.flags = flags;
+ packet->info.tx.seqno = endpoint->seqno;
+ endpoint->seqno++;
+ }
+}
+
+/* See if the padded tx length falls on a credit boundary */
+static int htc_get_credit_padding(unsigned int cred_sz, int *len,
+ struct htc_endpoint *ep)
+{
+ int rem_cred, cred_pad;
+
+ rem_cred = *len % cred_sz;
+
+ /* No padding needed */
+ if (!rem_cred)
+ return 0;
+
+ if (!(ep->conn_flags & HTC_FLGS_TX_BNDL_PAD_EN))
+ return -1;
+
+ /*
+ * The transfer consumes a "partial" credit, this
+ * packet cannot be bundled unless we add
+ * additional "dummy" padding (max 255 bytes) to
+ * consume the entire credit.
+ */
+ cred_pad = *len < cred_sz ? (cred_sz - *len) : rem_cred;
+
+ if ((cred_pad > 0) && (cred_pad <= 255))
+ *len += cred_pad;
+ else
+ /* The amount of padding is too large, send as non-bundled */
+ return -1;
+
+ return cred_pad;
+}
+
+static int htc_setup_send_scat_list(struct htc_target *target,
+ struct htc_endpoint *endpoint,
+ struct hif_scatter_req *scat_req,
+ int n_scat,
+ struct list_head *queue)
+{
+ struct htc_packet *packet;
+ int i, len, rem_scat, cred_pad;
+ int status = 0;
+
+ rem_scat = target->dev->max_tx_bndl_sz;
+
+ for (i = 0; i < n_scat; i++) {
+ scat_req->scat_list[i].packet = NULL;
+
+ if (list_empty(queue))
+ break;
+
+ packet = list_first_entry(queue, struct htc_packet, list);
+ len = CALC_TXRX_PADDED_LEN(target->dev,
+ packet->act_len + HTC_HDR_LENGTH);
+
+ cred_pad = htc_get_credit_padding(target->tgt_cred_sz,
+ &len, endpoint);
+ if (cred_pad < 0) {
+ status = -EINVAL;
+ break;
+ }
+
+ if (rem_scat < len) {
+ /* exceeds what we can transfer */
+ status = -ENOSPC;
+ break;
+ }
+
+ rem_scat -= len;
+ /* now remove it from the queue */
+ packet = list_first_entry(queue, struct htc_packet, list);
+ list_del(&packet->list);
+
+ scat_req->scat_list[i].packet = packet;
+ /* prepare packet and flag message as part of a send bundle */
+ htc_prep_send_pkt(packet,
+ packet->info.tx.flags | HTC_FLAGS_SEND_BUNDLE,
+ cred_pad, packet->info.tx.seqno);
+ scat_req->scat_list[i].buf = packet->buf;
+ scat_req->scat_list[i].len = len;
+
+ scat_req->len += len;
+ scat_req->scat_entries++;
+ ath6kl_dbg(ATH6KL_DBG_HTC_SEND,
+ "%d, adding pkt : 0x%p len:%d (remaining space:%d)\n",
+ i, packet, len, rem_scat);
+ }
+
+ /* Roll back scatter setup in case of any failure */
+ if (status || (scat_req->scat_entries < HTC_MIN_HTC_MSGS_TO_BUNDLE)) {
+ for (i = scat_req->scat_entries - 1; i >= 0; i--) {
+ packet = scat_req->scat_list[i].packet;
+ if (packet) {
+ packet->buf += HTC_HDR_LENGTH;
+ list_add(&packet->list, queue);
+ }
+ }
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/*
+ * htc_issue_send_bundle: drain a queue and send as bundles
+ * this function may return without fully draining the queue
+ * when
+ *
+ * 1. scatter resources are exhausted
+ * 2. a message that will consume a partial credit will stop the
+ * bundling process early
+ * 3. we drop below the minimum number of messages for a bundle
+ */
+static void htc_issue_send_bundle(struct htc_endpoint *endpoint,
+ struct list_head *queue,
+ int *sent_bundle, int *n_bundle_pkts)
+{
+ struct htc_target *target = endpoint->target;
+ struct hif_scatter_req *scat_req = NULL;
+ struct hif_dev_scat_sup_info hif_info;
+ int n_scat, n_sent_bundle = 0, tot_pkts_bundle = 0;
+
+ hif_info = target->dev->hif_scat_info;
+
+ while (true) {
+ n_scat = get_queue_depth(queue);
+ n_scat = min(n_scat, target->msg_per_bndl_max);
+
+ if (n_scat < HTC_MIN_HTC_MSGS_TO_BUNDLE)
+ /* not enough to bundle */
+ break;
+
+ scat_req = hif_scatter_req_get(target->dev->ar);
+
+ if (!scat_req) {
+ /* no scatter resources */
+ ath6kl_dbg(ATH6KL_DBG_HTC_SEND,
+ "no more scatter resources\n");
+ break;
+ }
+
+ ath6kl_dbg(ATH6KL_DBG_HTC_SEND, "pkts to scatter: %d\n",
+ n_scat);
+
+ scat_req->len = 0;
+ scat_req->scat_entries = 0;
+
+ if (htc_setup_send_scat_list(target, endpoint, scat_req,
+ n_scat, queue)) {
+ hif_scatter_req_add(target->dev->ar, scat_req);
+ break;
+ }
+
+ /* send path is always asynchronous */
+ scat_req->complete = htc_async_tx_scat_complete;
+ scat_req->ep = endpoint;
+ n_sent_bundle++;
+ tot_pkts_bundle += scat_req->scat_entries;
+
+ ath6kl_dbg(ATH6KL_DBG_HTC_SEND,
+ "send scatter total bytes: %d , entries: %d\n",
+ scat_req->len, scat_req->scat_entries);
+ ath6kldev_submit_scat_req(target->dev, scat_req, false);
+ }
+
+ *sent_bundle = n_sent_bundle;
+ *n_bundle_pkts = tot_pkts_bundle;
+ ath6kl_dbg(ATH6KL_DBG_HTC_SEND, "htc_issue_send_bundle (sent:%d)\n",
+ n_sent_bundle);
+
+ return;
+}
+
+static void htc_tx_from_ep_txq(struct htc_target *target,
+ struct htc_endpoint *endpoint)
+{
+ struct list_head txq;
+ struct htc_packet *packet;
+ int bundle_sent;
+ int n_pkts_bundle;
+
+ spin_lock_bh(&target->tx_lock);
+
+ endpoint->tx_proc_cnt++;
+ if (endpoint->tx_proc_cnt > 1) {
+ endpoint->tx_proc_cnt--;
+ spin_unlock_bh(&target->tx_lock);
+ ath6kl_dbg(ATH6KL_DBG_HTC_SEND, "htc_try_send (busy)\n");
+ return;
+ }
+
+ /*
+ * drain the endpoint TX queue for transmission as long
+ * as we have enough credits.
+ */
+ INIT_LIST_HEAD(&txq);
+
+ while (true) {
+
+ if (list_empty(&endpoint->txq))
+ break;
+
+ htc_tx_pkts_get(target, endpoint, &txq);
+
+ if (list_empty(&txq))
+ break;
+
+ spin_unlock_bh(&target->tx_lock);
+
+ bundle_sent = 0;
+ n_pkts_bundle = 0;
+
+ while (true) {
+ /* try to send a bundle on each pass */
+ if ((target->tx_bndl_enable) &&
+ (get_queue_depth(&txq) >=
+ HTC_MIN_HTC_MSGS_TO_BUNDLE)) {
+ int temp1 = 0, temp2 = 0;
+
+ htc_issue_send_bundle(endpoint, &txq,
+ &temp1, &temp2);
+ bundle_sent += temp1;
+ n_pkts_bundle += temp2;
+ }
+
+ if (list_empty(&txq))
+ break;
+
+ packet = list_first_entry(&txq, struct htc_packet,
+ list);
+ list_del(&packet->list);
+
+ htc_prep_send_pkt(packet, packet->info.tx.flags,
+ 0, packet->info.tx.seqno);
+ htc_issue_send(target, packet);
+ }
+
+ spin_lock_bh(&target->tx_lock);
+
+ endpoint->ep_st.tx_bundles += bundle_sent;
+ endpoint->ep_st.tx_pkt_bundled += n_pkts_bundle;
+ }
+
+ endpoint->tx_proc_cnt = 0;
+ spin_unlock_bh(&target->tx_lock);
+}
+
+static bool htc_try_send(struct htc_target *target,
+ struct htc_endpoint *endpoint,
+ struct htc_packet *tx_pkt)
+{
+ struct htc_ep_callbacks ep_cb;
+ int txq_depth;
+ bool overflow = false;
+
+ ep_cb = endpoint->ep_cb;
+
+ spin_lock_bh(&target->tx_lock);
+ txq_depth = get_queue_depth(&endpoint->txq);
+ spin_unlock_bh(&target->tx_lock);
+
+ if (txq_depth >= endpoint->max_txq_depth)
+ overflow = true;
+
+ if (overflow)
+ ath6kl_dbg(ATH6KL_DBG_HTC_SEND,
+ "ep %d, tx queue will overflow :%d , tx depth:%d, max:%d\n",
+ endpoint->eid, overflow, txq_depth,
+ endpoint->max_txq_depth);
+
+ if (overflow && ep_cb.tx_full) {
+ ath6kl_dbg(ATH6KL_DBG_HTC_SEND,
+ "indicating overflowed tx packet: 0x%p\n", tx_pkt);
+
+ if (ep_cb.tx_full(endpoint->target, tx_pkt) ==
+ HTC_SEND_FULL_DROP) {
+ endpoint->ep_st.tx_dropped += 1;
+ return false;
+ }
+ }
+
+ spin_lock_bh(&target->tx_lock);
+ list_add_tail(&tx_pkt->list, &endpoint->txq);
+ spin_unlock_bh(&target->tx_lock);
+
+ htc_tx_from_ep_txq(target, endpoint);
+
+ return true;
+}
+
+static void htc_chk_ep_txq(struct htc_target *target)
+{
+ struct htc_endpoint *endpoint;
+ struct htc_endpoint_credit_dist *cred_dist;
+
+ /*
+ * Run through the credit distribution list to see if there are
+ * packets queued. NOTE: no locks need to be taken since the
+ * distribution list is not dynamic (cannot be re-ordered) and we
+ * are not modifying any state.
+ */
+ list_for_each_entry(cred_dist, &target->cred_dist_list, list) {
+ endpoint = (struct htc_endpoint *)cred_dist->htc_rsvd;
+
+ spin_lock_bh(&target->tx_lock);
+ if (!list_empty(&endpoint->txq)) {
+ ath6kl_dbg(ATH6KL_DBG_HTC_SEND,
+ "ep %d has %d credits and %d packets in tx queue\n",
+ cred_dist->endpoint,
+ endpoint->cred_dist.credits,
+ get_queue_depth(&endpoint->txq));
+ spin_unlock_bh(&target->tx_lock);
+ /*
+ * Try to start the stalled queue, this list is
+ * ordered by priority. If there are credits
+ * available the highest priority queue will get a
+ * chance to reclaim credits from lower priority
+ * ones.
+ */
+ htc_tx_from_ep_txq(target, endpoint);
+ spin_lock_bh(&target->tx_lock);
+ }
+ spin_unlock_bh(&target->tx_lock);
+ }
+}
+
+static int htc_setup_tx_complete(struct htc_target *target)
+{
+ struct htc_packet *send_pkt = NULL;
+ int status;
+
+ send_pkt = htc_get_control_buf(target, true);
+
+ if (!send_pkt)
+ return -ENOMEM;
+
+ if (target->htc_tgt_ver >= HTC_VERSION_2P1) {
+ struct htc_setup_comp_ext_msg *setup_comp_ext;
+ u32 flags = 0;
+
+ setup_comp_ext =
+ (struct htc_setup_comp_ext_msg *)send_pkt->buf;
+ memset(setup_comp_ext, 0, sizeof(*setup_comp_ext));
+ setup_comp_ext->msg_id =
+ cpu_to_le16(HTC_MSG_SETUP_COMPLETE_EX_ID);
+
+ if (target->msg_per_bndl_max > 0) {
+ /* Indicate HTC bundling to the target */
+ flags |= HTC_SETUP_COMP_FLG_RX_BNDL_EN;
+ setup_comp_ext->msg_per_rxbndl =
+ target->msg_per_bndl_max;
+ }
+
+ memcpy(&setup_comp_ext->flags, &flags,
+ sizeof(setup_comp_ext->flags));
+ set_htc_pkt_info(send_pkt, NULL, (u8 *) setup_comp_ext,
+ sizeof(struct htc_setup_comp_ext_msg),
+ ENDPOINT_0, HTC_SERVICE_TX_PACKET_TAG);
+
+ } else {
+ struct htc_setup_comp_msg *setup_comp;
+ setup_comp = (struct htc_setup_comp_msg *)send_pkt->buf;
+ memset(setup_comp, 0, sizeof(struct htc_setup_comp_msg));
+ setup_comp->msg_id = cpu_to_le16(HTC_MSG_SETUP_COMPLETE_ID);
+ set_htc_pkt_info(send_pkt, NULL, (u8 *) setup_comp,
+ sizeof(struct htc_setup_comp_msg),
+ ENDPOINT_0, HTC_SERVICE_TX_PACKET_TAG);
+ }
+
+ /* we want synchronous operation */
+ send_pkt->completion = NULL;
+ htc_prep_send_pkt(send_pkt, 0, 0, 0);
+ status = htc_issue_send(target, send_pkt);
+
+ if (send_pkt != NULL)
+ htc_reclaim_txctrl_buf(target, send_pkt);
+
+ return status;
+}
+
+void htc_set_credit_dist(struct htc_target *target,
+ struct htc_credit_state_info *cred_dist_cntxt,
+ u16 srvc_pri_order[], int list_len)
+{
+ struct htc_endpoint *endpoint;
+ int i, ep;
+
+ target->cred_dist_cntxt = cred_dist_cntxt;
+
+ list_add_tail(&target->endpoint[ENDPOINT_0].cred_dist.list,
+ &target->cred_dist_list);
+
+ for (i = 0; i < list_len; i++) {
+ for (ep = ENDPOINT_1; ep < ENDPOINT_MAX; ep++) {
+ endpoint = &target->endpoint[ep];
+ if (endpoint->svc_id == srvc_pri_order[i]) {
+ list_add_tail(&endpoint->cred_dist.list,
+ &target->cred_dist_list);
+ break;
+ }
+ }
+ if (ep >= ENDPOINT_MAX) {
+ WARN_ON(1);
+ return;
+ }
+ }
+}
+
+int htc_tx(struct htc_target *target, struct htc_packet *packet)
+{
+ struct htc_endpoint *endpoint;
+ struct list_head queue;
+
+ ath6kl_dbg(ATH6KL_DBG_HTC_SEND,
+ "htc_tx: ep id: %d, buf: 0x%p, len: %d\n",
+ packet->endpoint, packet->buf, packet->act_len);
+
+ if (packet->endpoint >= ENDPOINT_MAX) {
+ WARN_ON(1);
+ return -EINVAL;
+ }
+
+ endpoint = &target->endpoint[packet->endpoint];
+
+ if (!htc_try_send(target, endpoint, packet)) {
+ packet->status = (target->htc_flags & HTC_OP_STATE_STOPPING) ?
+ -ECANCELED : -ENOSPC;
+ INIT_LIST_HEAD(&queue);
+ list_add(&packet->list, &queue);
+ htc_tx_complete(endpoint, &queue);
+ }
+
+ return 0;
+}
+
+/* flush endpoint TX queue */
+void htc_flush_txep(struct htc_target *target,
+ enum htc_endpoint_id eid, u16 tag)
+{
+ struct htc_packet *packet, *tmp_pkt;
+ struct list_head discard_q, container;
+ struct htc_endpoint *endpoint = &target->endpoint[eid];
+
+ if (!endpoint->svc_id) {
+ WARN_ON(1);
+ return;
+ }
+
+ /* initialize the discard queue */
+ INIT_LIST_HEAD(&discard_q);
+
+ spin_lock_bh(&target->tx_lock);
+
+ list_for_each_entry_safe(packet, tmp_pkt, &endpoint->txq, list) {
+ if ((tag == HTC_TX_PACKET_TAG_ALL) ||
+ (tag == packet->info.tx.tag))
+ list_move_tail(&packet->list, &discard_q);
+ }
+
+ spin_unlock_bh(&target->tx_lock);
+
+ list_for_each_entry_safe(packet, tmp_pkt, &discard_q, list) {
+ packet->status = -ECANCELED;
+ list_del(&packet->list);
+ ath6kl_dbg(ATH6KL_DBG_TRC,
+ "flushing tx pkt:0x%p, len:%d, ep:%d tag:0x%X\n",
+ packet, packet->act_len,
+ packet->endpoint, packet->info.tx.tag);
+
+ INIT_LIST_HEAD(&container);
+ list_add_tail(&packet->list, &container);
+ htc_tx_complete(endpoint, &container);
+ }
+
+}
+
+static void htc_flush_txep_all(struct htc_target *target)
+{
+ struct htc_endpoint *endpoint;
+ int i;
+
+ dump_cred_dist_stats(target);
+
+ for (i = ENDPOINT_0; i < ENDPOINT_MAX; i++) {
+ endpoint = &target->endpoint[i];
+ if (endpoint->svc_id == 0)
+ /* not in use.. */
+ continue;
+ htc_flush_txep(target, i, HTC_TX_PACKET_TAG_ALL);
+ }
+}
+
+void htc_indicate_activity_change(struct htc_target *target,
+ enum htc_endpoint_id eid, bool active)
+{
+ struct htc_endpoint *endpoint = &target->endpoint[eid];
+ bool dist = false;
+
+ if (endpoint->svc_id == 0) {
+ WARN_ON(1);
+ return;
+ }
+
+ spin_lock_bh(&target->tx_lock);
+
+ if (active) {
+ if (!(endpoint->cred_dist.dist_flags & HTC_EP_ACTIVE)) {
+ endpoint->cred_dist.dist_flags |= HTC_EP_ACTIVE;
+ dist = true;
+ }
+ } else {
+ if (endpoint->cred_dist.dist_flags & HTC_EP_ACTIVE) {
+ endpoint->cred_dist.dist_flags &= ~HTC_EP_ACTIVE;
+ dist = true;
+ }
+ }
+
+ if (dist) {
+ endpoint->cred_dist.txq_depth =
+ get_queue_depth(&endpoint->txq);
+
+ ath6kl_dbg(ATH6KL_DBG_HTC_SEND, "ctxt:0x%p dist:0x%p\n",
+ target->cred_dist_cntxt, &target->cred_dist_list);
+
+ ath6k_credit_distribute(target->cred_dist_cntxt,
+ &target->cred_dist_list,
+ HTC_CREDIT_DIST_ACTIVITY_CHANGE);
+ }
+
+ spin_unlock_bh(&target->tx_lock);
+
+ if (dist && !active)
+ htc_chk_ep_txq(target);
+}
+
+/* HTC Rx */
+
+static inline void htc_update_rx_stats(struct htc_endpoint *endpoint,
+ int n_look_ahds)
+{
+ endpoint->ep_st.rx_pkts++;
+ if (n_look_ahds == 1)
+ endpoint->ep_st.rx_lkahds++;
+ else if (n_look_ahds > 1)
+ endpoint->ep_st.rx_bundle_lkahd++;
+}
+
+static inline bool htc_valid_rx_frame_len(struct htc_target *target,
+ enum htc_endpoint_id eid, int len)
+{
+ return (eid == target->dev->ar->ctrl_ep) ?
+ len <= ATH6KL_BUFFER_SIZE : len <= ATH6KL_AMSDU_BUFFER_SIZE;
+}
+
+static int htc_add_rxbuf(struct htc_target *target, struct htc_packet *packet)
+{
+ struct list_head queue;
+
+ INIT_LIST_HEAD(&queue);
+ list_add_tail(&packet->list, &queue);
+ return htc_add_rxbuf_multiple(target, &queue);
+}
+
+static void htc_reclaim_rxbuf(struct htc_target *target,
+ struct htc_packet *packet,
+ struct htc_endpoint *ep)
+{
+ if (packet->info.rx.rx_flags & HTC_RX_PKT_NO_RECYCLE) {
+ htc_rxpkt_reset(packet);
+ packet->status = -ECANCELED;
+ ep->ep_cb.rx(ep->target, packet);
+ } else {
+ htc_rxpkt_reset(packet);
+ htc_add_rxbuf((void *)(target), packet);
+ }
+}
+
+static void reclaim_rx_ctrl_buf(struct htc_target *target,
+ struct htc_packet *packet)
+{
+ spin_lock_bh(&target->htc_lock);
+ list_add_tail(&packet->list, &target->free_ctrl_rxbuf);
+ spin_unlock_bh(&target->htc_lock);
+}
+
+static int dev_rx_pkt(struct htc_target *target, struct htc_packet *packet,
+ u32 rx_len)
+{
+ struct ath6kl_device *dev = target->dev;
+ u32 padded_len;
+ int status;
+
+ padded_len = CALC_TXRX_PADDED_LEN(dev, rx_len);
+
+ if (padded_len > packet->buf_len) {
+ ath6kl_err("not enough receive space for packet - padlen:%d recvlen:%d bufferlen:%d\n",
+ padded_len, rx_len, packet->buf_len);
+ return -ENOMEM;
+ }
+
+ ath6kl_dbg(ATH6KL_DBG_HTC_RECV,
+ "dev_rx_pkt (0x%p : hdr:0x%X) padded len: %d mbox:0x%X (mode:%s)\n",
+ packet, packet->info.rx.exp_hdr,
+ padded_len, dev->ar->mbox_info.htc_addr, "sync");
+
+ status = hif_read_write_sync(dev->ar,
+ dev->ar->mbox_info.htc_addr,
+ packet->buf, padded_len,
+ HIF_RD_SYNC_BLOCK_FIX);
+
+ packet->status = status;
+
+ return status;
+}
+
+/*
+ * optimization for recv packets, we can indicate a
+ * "hint" that there are more single-packets to fetch
+ * on this endpoint.
+ */
+static void set_rxpkt_indication_flag(u32 lk_ahd,
+ struct htc_endpoint *endpoint,
+ struct htc_packet *packet)
+{
+ struct htc_frame_hdr *htc_hdr = (struct htc_frame_hdr *)&lk_ahd;
+
+ if (htc_hdr->eid == packet->endpoint) {
+ if (!list_empty(&endpoint->rx_bufq))
+ packet->info.rx.indicat_flags |=
+ HTC_RX_FLAGS_INDICATE_MORE_PKTS;
+ }
+}
+
+static void chk_rx_water_mark(struct htc_endpoint *endpoint)
+{
+ struct htc_ep_callbacks ep_cb = endpoint->ep_cb;
+
+ if (ep_cb.rx_refill_thresh > 0) {
+ spin_lock_bh(&endpoint->target->rx_lock);
+ if (get_queue_depth(&endpoint->rx_bufq)
+ < ep_cb.rx_refill_thresh) {
+ spin_unlock_bh(&endpoint->target->rx_lock);
+ ep_cb.rx_refill(endpoint->target, endpoint->eid);
+ return;
+ }
+ spin_unlock_bh(&endpoint->target->rx_lock);
+ }
+}
+
+/* This function is called with rx_lock held */
+static int htc_setup_rxpkts(struct htc_target *target, struct htc_endpoint *ep,
+ u32 *lk_ahds, struct list_head *queue, int n_msg)
+{
+ struct htc_packet *packet;
+ /* FIXME: type of lk_ahds can't be right */
+ struct htc_frame_hdr *htc_hdr = (struct htc_frame_hdr *)lk_ahds;
+ struct htc_ep_callbacks ep_cb;
+ int status = 0, j, full_len;
+ bool no_recycle;
+
+ full_len = CALC_TXRX_PADDED_LEN(target->dev,
+ le16_to_cpu(htc_hdr->payld_len) +
+ sizeof(*htc_hdr));
+
+ if (!htc_valid_rx_frame_len(target, ep->eid, full_len)) {
+ ath6kl_warn("Rx buffer requested with invalid length\n");
+ return -EINVAL;
+ }
+
+ ep_cb = ep->ep_cb;
+ for (j = 0; j < n_msg; j++) {
+
+ /*
+ * Reset flag, any packets allocated using the
+ * rx_alloc() API cannot be recycled on
+ * cleanup,they must be explicitly returned.
+ */
+ no_recycle = false;
+
+ if (ep_cb.rx_allocthresh &&
+ (full_len > ep_cb.rx_alloc_thresh)) {
+ ep->ep_st.rx_alloc_thresh_hit += 1;
+ ep->ep_st.rxalloc_thresh_byte +=
+ le16_to_cpu(htc_hdr->payld_len);
+
+ spin_unlock_bh(&target->rx_lock);
+ no_recycle = true;
+
+ packet = ep_cb.rx_allocthresh(ep->target, ep->eid,
+ full_len);
+ spin_lock_bh(&target->rx_lock);
+ } else {
+ /* refill handler is being used */
+ if (list_empty(&ep->rx_bufq)) {
+ if (ep_cb.rx_refill) {
+ spin_unlock_bh(&target->rx_lock);
+ ep_cb.rx_refill(ep->target, ep->eid);
+ spin_lock_bh(&target->rx_lock);
+ }
+ }
+
+ if (list_empty(&ep->rx_bufq))
+ packet = NULL;
+ else {
+ packet = list_first_entry(&ep->rx_bufq,
+ struct htc_packet, list);
+ list_del(&packet->list);
+ }
+ }
+
+ if (!packet) {
+ target->rx_st_flags |= HTC_RECV_WAIT_BUFFERS;
+ target->ep_waiting = ep->eid;
+ return -ENOSPC;
+ }
+
+ /* clear flags */
+ packet->info.rx.rx_flags = 0;
+ packet->info.rx.indicat_flags = 0;
+ packet->status = 0;
+
+ if (no_recycle)
+ /*
+ * flag that these packets cannot be
+ * recycled, they have to be returned to
+ * the user
+ */
+ packet->info.rx.rx_flags |= HTC_RX_PKT_NO_RECYCLE;
+
+ /* Caller needs to free this upon any failure */
+ list_add_tail(&packet->list, queue);
+
+ if (target->htc_flags & HTC_OP_STATE_STOPPING) {
+ status = -ECANCELED;
+ break;
+ }
+
+ if (j) {
+ packet->info.rx.rx_flags |= HTC_RX_PKT_REFRESH_HDR;
+ packet->info.rx.exp_hdr = 0xFFFFFFFF;
+ } else
+ /* set expected look ahead */
+ packet->info.rx.exp_hdr = *lk_ahds;
+
+ packet->act_len = le16_to_cpu(htc_hdr->payld_len) +
+ HTC_HDR_LENGTH;
+ }
+
+ return status;
+}
+
+static int alloc_and_prep_rxpkts(struct htc_target *target,
+ u32 lk_ahds[], int msg,
+ struct htc_endpoint *endpoint,
+ struct list_head *queue)
+{
+ int status = 0;
+ struct htc_packet *packet, *tmp_pkt;
+ struct htc_frame_hdr *htc_hdr;
+ int i, n_msg;
+
+ spin_lock_bh(&target->rx_lock);
+
+ for (i = 0; i < msg; i++) {
+
+ htc_hdr = (struct htc_frame_hdr *)&lk_ahds[i];
+
+ if (htc_hdr->eid >= ENDPOINT_MAX) {
+ ath6kl_err("invalid ep in look-ahead: %d\n",
+ htc_hdr->eid);
+ status = -ENOMEM;
+ break;
+ }
+
+ if (htc_hdr->eid != endpoint->eid) {
+ ath6kl_err("invalid ep in look-ahead: %d should be : %d (index:%d)\n",
+ htc_hdr->eid, endpoint->eid, i);
+ status = -ENOMEM;
+ break;
+ }
+
+ if (le16_to_cpu(htc_hdr->payld_len) > HTC_MAX_PAYLOAD_LENGTH) {
+ ath6kl_err("payload len %d exceeds max htc : %d !\n",
+ htc_hdr->payld_len,
+ (u32) HTC_MAX_PAYLOAD_LENGTH);
+ status = -ENOMEM;
+ break;
+ }
+
+ if (endpoint->svc_id == 0) {
+ ath6kl_err("ep %d is not connected !\n", htc_hdr->eid);
+ status = -ENOMEM;
+ break;
+ }
+
+ if (htc_hdr->flags & HTC_FLG_RX_BNDL_CNT) {
+ /*
+ * HTC header indicates that every packet to follow
+ * has the same padded length so that it can be
+ * optimally fetched as a full bundle.
+ */
+ n_msg = (htc_hdr->flags & HTC_FLG_RX_BNDL_CNT) >>
+ HTC_FLG_RX_BNDL_CNT_S;
+
+ /* the count doesn't include the starter frame */
+ n_msg++;
+ if (n_msg > target->msg_per_bndl_max) {
+ status = -ENOMEM;
+ break;
+ }
+
+ endpoint->ep_st.rx_bundle_from_hdr += 1;
+ ath6kl_dbg(ATH6KL_DBG_HTC_RECV,
+ "htc hdr indicates :%d msg can be fetched as a bundle\n",
+ n_msg);
+ } else
+ /* HTC header only indicates 1 message to fetch */
+ n_msg = 1;
+
+ /* Setup packet buffers for each message */
+ status = htc_setup_rxpkts(target, endpoint, &lk_ahds[i], queue,
+ n_msg);
+
+ /*
+ * This is due to unavailabilty of buffers to rx entire data.
+ * Return no error so that free buffers from queue can be used
+ * to receive partial data.
+ */
+ if (status == -ENOSPC) {
+ spin_unlock_bh(&target->rx_lock);
+ return 0;
+ }
+
+ if (status)
+ break;
+ }
+
+ spin_unlock_bh(&target->rx_lock);
+
+ if (status) {
+ list_for_each_entry_safe(packet, tmp_pkt, queue, list) {
+ list_del(&packet->list);
+ htc_reclaim_rxbuf(target, packet,
+ &target->endpoint[packet->endpoint]);
+ }
+ }
+
+ return status;
+}
+