summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--include/linux/socket.h6
-rw-r--r--include/net/kcm.h125
-rw-r--r--include/uapi/linux/kcm.h40
-rw-r--r--net/Kconfig1
-rw-r--r--net/Makefile1
-rw-r--r--net/kcm/Kconfig10
-rw-r--r--net/kcm/Makefile3
-rw-r--r--net/kcm/kcmsock.c2016
8 files changed, 2201 insertions, 1 deletions
diff --git a/include/linux/socket.h b/include/linux/socket.h
index d834af22a460..73bf6c6a833b 100644
--- a/include/linux/socket.h
+++ b/include/linux/socket.h
@@ -200,7 +200,9 @@ struct ucred {
#define AF_ALG 38 /* Algorithm sockets */
#define AF_NFC 39 /* NFC sockets */
#define AF_VSOCK 40 /* vSockets */
-#define AF_MAX 41 /* For now.. */
+#define AF_KCM 41 /* Kernel Connection Multiplexor*/
+
+#define AF_MAX 42 /* For now.. */
/* Protocol families, same as address families. */
#define PF_UNSPEC AF_UNSPEC
@@ -246,6 +248,7 @@ struct ucred {
#define PF_ALG AF_ALG
#define PF_NFC AF_NFC
#define PF_VSOCK AF_VSOCK
+#define PF_KCM AF_KCM
#define PF_MAX AF_MAX
/* Maximum queue length specifiable by listen. */
@@ -323,6 +326,7 @@ struct ucred {
#define SOL_CAIF 278
#define SOL_ALG 279
#define SOL_NFC 280
+#define SOL_KCM 281
/* IPX options */
#define IPX_TYPE 1
diff --git a/include/net/kcm.h b/include/net/kcm.h
new file mode 100644
index 000000000000..1bcae39070ec
--- /dev/null
+++ b/include/net/kcm.h
@@ -0,0 +1,125 @@
+/*
+ * Kernel Connection Multiplexor
+ *
+ * Copyright (c) 2016 Tom Herbert <tom@herbertland.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation.
+ */
+
+#ifndef __NET_KCM_H_
+#define __NET_KCM_H_
+
+#include <linux/skbuff.h>
+#include <net/sock.h>
+#include <uapi/linux/kcm.h>
+
+extern unsigned int kcm_net_id;
+
+struct kcm_tx_msg {
+ unsigned int sent;
+ unsigned int fragidx;
+ unsigned int frag_offset;
+ unsigned int msg_flags;
+ struct sk_buff *frag_skb;
+ struct sk_buff *last_skb;
+};
+
+struct kcm_rx_msg {
+ int full_len;
+ int accum_len;
+ int offset;
+};
+
+/* Socket structure for KCM client sockets */
+struct kcm_sock {
+ struct sock sk;
+ struct kcm_mux *mux;
+ struct list_head kcm_sock_list;
+ int index;
+ u32 done : 1;
+ struct work_struct done_work;
+
+ /* Transmit */
+ struct kcm_psock *tx_psock;
+ struct work_struct tx_work;
+ struct list_head wait_psock_list;
+ struct sk_buff *seq_skb;
+
+ /* Don't use bit fields here, these are set under different locks */
+ bool tx_wait;
+ bool tx_wait_more;
+
+ /* Receive */
+ struct kcm_psock *rx_psock;
+ struct list_head wait_rx_list; /* KCMs waiting for receiving */
+ bool rx_wait;
+ u32 rx_disabled : 1;
+};
+
+struct bpf_prog;
+
+/* Structure for an attached lower socket */
+struct kcm_psock {
+ struct sock *sk;
+ struct kcm_mux *mux;
+ int index;
+
+ u32 tx_stopped : 1;
+ u32 rx_stopped : 1;
+ u32 done : 1;
+ u32 unattaching : 1;
+
+ void (*save_state_change)(struct sock *sk);
+ void (*save_data_ready)(struct sock *sk);
+ void (*save_write_space)(struct sock *sk);
+
+ struct list_head psock_list;
+
+ /* Receive */
+ struct sk_buff *rx_skb_head;
+ struct sk_buff **rx_skb_nextp;
+ struct sk_buff *ready_rx_msg;
+ struct list_head psock_ready_list;
+ struct work_struct rx_work;
+ struct delayed_work rx_delayed_work;
+ struct bpf_prog *bpf_prog;
+ struct kcm_sock *rx_kcm;
+
+ /* Transmit */
+ struct kcm_sock *tx_kcm;
+ struct list_head psock_avail_list;
+};
+
+/* Per net MUX list */
+struct kcm_net {
+ struct mutex mutex;
+ struct list_head mux_list;
+ int count;
+};
+
+/* Structure for a MUX */
+struct kcm_mux {
+ struct list_head kcm_mux_list;
+ struct rcu_head rcu;
+ struct kcm_net *knet;
+
+ struct list_head kcm_socks; /* All KCM sockets on MUX */
+ int kcm_socks_cnt; /* Total KCM socket count for MUX */
+ struct list_head psocks; /* List of all psocks on MUX */
+ int psocks_cnt; /* Total attached sockets */
+
+ /* Receive */
+ spinlock_t rx_lock ____cacheline_aligned_in_smp;
+ struct list_head kcm_rx_waiters; /* KCMs waiting for receiving */
+ struct list_head psocks_ready; /* List of psocks with a msg ready */
+ struct sk_buff_head rx_hold_queue;
+
+ /* Transmit */
+ spinlock_t lock ____cacheline_aligned_in_smp; /* TX and mux locking */
+ struct list_head psocks_avail; /* List of available psocks */
+ struct list_head kcm_tx_waiters; /* KCMs waiting for a TX psock */
+};
+
+#endif /* __NET_KCM_H_ */
diff --git a/include/uapi/linux/kcm.h b/include/uapi/linux/kcm.h
new file mode 100644
index 000000000000..a5a530940b99
--- /dev/null
+++ b/include/uapi/linux/kcm.h
@@ -0,0 +1,40 @@
+/*
+ * Kernel Connection Multiplexor
+ *
+ * Copyright (c) 2016 Tom Herbert <tom@herbertland.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation.
+ *
+ * User API to clone KCM sockets and attach transport socket to a KCM
+ * multiplexor.
+ */
+
+#ifndef KCM_KERNEL_H
+#define KCM_KERNEL_H
+
+struct kcm_attach {
+ int fd;
+ int bpf_fd;
+};
+
+struct kcm_unattach {
+ int fd;
+};
+
+struct kcm_clone {
+ int fd;
+};
+
+#define SIOCKCMATTACH (SIOCPROTOPRIVATE + 0)
+#define SIOCKCMUNATTACH (SIOCPROTOPRIVATE + 1)
+#define SIOCKCMCLONE (SIOCPROTOPRIVATE + 2)
+
+#define KCMPROTO_CONNECTED 0
+
+/* Socket options */
+#define KCM_RECV_DISABLE 1
+
+#endif
+
diff --git a/net/Kconfig b/net/Kconfig
index 2760825e53fa..10640d5f8bee 100644
--- a/net/Kconfig
+++ b/net/Kconfig
@@ -360,6 +360,7 @@ source "net/can/Kconfig"
source "net/irda/Kconfig"
source "net/bluetooth/Kconfig"
source "net/rxrpc/Kconfig"
+source "net/kcm/Kconfig"
config FIB_RULES
bool
diff --git a/net/Makefile b/net/Makefile
index a5d04098dfce..81d14119eab5 100644
--- a/net/Makefile
+++ b/net/Makefile
@@ -34,6 +34,7 @@ obj-$(CONFIG_IRDA) += irda/
obj-$(CONFIG_BT) += bluetooth/
obj-$(CONFIG_SUNRPC) += sunrpc/
obj-$(CONFIG_AF_RXRPC) += rxrpc/
+obj-$(CONFIG_AF_KCM) += kcm/
obj-$(CONFIG_ATM) += atm/
obj-$(CONFIG_L2TP) += l2tp/
obj-$(CONFIG_DECNET) += decnet/
diff --git a/net/kcm/Kconfig b/net/kcm/Kconfig
new file mode 100644
index 000000000000..5db94d940ecc
--- /dev/null
+++ b/net/kcm/Kconfig
@@ -0,0 +1,10 @@
+
+config AF_KCM
+ tristate "KCM sockets"
+ depends on INET
+ select BPF_SYSCALL
+ ---help---
+ KCM (Kernel Connection Multiplexor) sockets provide a method
+ for multiplexing messages of a message based application
+ protocol over kernel connectons (e.g. TCP connections).
+
diff --git a/net/kcm/Makefile b/net/kcm/Makefile
new file mode 100644
index 000000000000..cb525f7c5a13
--- /dev/null
+++ b/net/kcm/Makefile
@@ -0,0 +1,3 @@
+obj-$(CONFIG_AF_KCM) += kcm.o
+
+kcm-y := kcmsock.o
diff --git a/net/kcm/kcmsock.c b/net/kcm/kcmsock.c
new file mode 100644
index 000000000000..30ef69ac6b81
--- /dev/null
+++ b/net/kcm/kcmsock.c
@@ -0,0 +1,2016 @@
+#include <linux/bpf.h>
+#include <linux/errno.h>
+#include <linux/errqueue.h>
+#include <linux/file.h>
+#include <linux/in.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/net.h>
+#include <linux/netdevice.h>
+#include <linux/poll.h>
+#include <linux/rculist.h>
+#include <linux/skbuff.h>
+#include <linux/socket.h>
+#include <linux/uaccess.h>
+#include <linux/workqueue.h>
+#include <net/kcm.h>
+#include <net/netns/generic.h>
+#include <net/sock.h>
+#include <net/tcp.h>
+#include <uapi/linux/kcm.h>
+
+unsigned int kcm_net_id;
+
+static struct kmem_cache *kcm_psockp __read_mostly;
+static struct kmem_cache *kcm_muxp __read_mostly;
+static struct workqueue_struct *kcm_wq;
+
+static inline struct kcm_sock *kcm_sk(const struct sock *sk)
+{
+ return (struct kcm_sock *)sk;
+}
+
+static inline struct kcm_tx_msg *kcm_tx_msg(struct sk_buff *skb)
+{
+ return (struct kcm_tx_msg *)skb->cb;
+}
+
+static inline struct kcm_rx_msg *kcm_rx_msg(struct sk_buff *skb)
+{
+ return (struct kcm_rx_msg *)((void *)skb->cb +
+ offsetof(struct qdisc_skb_cb, data));
+}
+
+static void report_csk_error(struct sock *csk, int err)
+{
+ csk->sk_err = EPIPE;
+ csk->sk_error_report(csk);
+}
+
+/* Callback lock held */
+static void kcm_abort_rx_psock(struct kcm_psock *psock, int err,
+ struct sk_buff *skb)
+{
+ struct sock *csk = psock->sk;
+
+ /* Unrecoverable error in receive */
+
+ if (psock->rx_stopped)
+ return;
+
+ psock->rx_stopped = 1;
+
+ /* Report an error on the lower socket */
+ report_csk_error(csk, err);
+}
+
+static void kcm_abort_tx_psock(struct kcm_psock *psock, int err,
+ bool wakeup_kcm)
+{
+ struct sock *csk = psock->sk;
+ struct kcm_mux *mux = psock->mux;
+
+ /* Unrecoverable error in transmit */
+
+ spin_lock_bh(&mux->lock);
+
+ if (psock->tx_stopped) {
+ spin_unlock_bh(&mux->lock);
+ return;
+ }
+
+ psock->tx_stopped = 1;
+
+ if (!psock->tx_kcm) {
+ /* Take off psocks_avail list */
+ list_del(&psock->psock_avail_list);
+ } else if (wakeup_kcm) {
+ /* In this case psock is being aborted while outside of
+ * write_msgs and psock is reserved. Schedule tx_work
+ * to handle the failure there. Need to commit tx_stopped
+ * before queuing work.
+ */
+ smp_mb();
+
+ queue_work(kcm_wq, &psock->tx_kcm->tx_work);
+ }
+
+ spin_unlock_bh(&mux->lock);
+
+ /* Report error on lower socket */
+ report_csk_error(csk, err);
+}
+
+static int kcm_queue_rcv_skb(struct sock *sk, struct sk_buff *skb);
+
+/* KCM is ready to receive messages on its queue-- either the KCM is new or
+ * has become unblocked after being blocked on full socket buffer. Queue any
+ * pending ready messages on a psock. RX mux lock held.
+ */
+static void kcm_rcv_ready(struct kcm_sock *kcm)
+{
+ struct kcm_mux *mux = kcm->mux;
+ struct kcm_psock *psock;
+ struct sk_buff *skb;
+
+ if (unlikely(kcm->rx_wait || kcm->rx_psock || kcm->rx_disabled))
+ return;
+
+ while (unlikely((skb = __skb_dequeue(&mux->rx_hold_queue)))) {
+ if (kcm_queue_rcv_skb(&kcm->sk, skb)) {
+ /* Assuming buffer limit has been reached */
+ skb_queue_head(&mux->rx_hold_queue, skb);
+ WARN_ON(!sk_rmem_alloc_get(&kcm->sk));
+ return;
+ }
+ }
+
+ while (!list_empty(&mux->psocks_ready)) {
+ psock = list_first_entry(&mux->psocks_ready, struct kcm_psock,
+ psock_ready_list);
+
+ if (kcm_queue_rcv_skb(&kcm->sk, psock->ready_rx_msg)) {
+ /* Assuming buffer limit has been reached */
+ WARN_ON(!sk_rmem_alloc_get(&kcm->sk));
+ return;
+ }
+
+ /* Consumed the ready message on the psock. Schedule rx_work to
+ * get more messages.
+ */
+ list_del(&psock->psock_ready_list);
+ psock->ready_rx_msg = NULL;
+
+ /* Commit clearing of ready_rx_msg for queuing work */
+ smp_mb();
+
+ queue_work(kcm_wq, &psock->rx_work);
+ }
+
+ /* Buffer limit is okay now, add to ready list */
+ list_add_tail(&kcm->wait_rx_list,
+ &kcm->mux->kcm_rx_waiters);
+ kcm->rx_wait = true;
+}
+
+static void kcm_rfree(struct sk_buff *skb)
+{
+ struct sock *sk = skb->sk;
+ struct kcm_sock *kcm = kcm_sk(sk);
+ struct kcm_mux *mux = kcm->mux;
+ unsigned int len = skb->truesize;
+
+ sk_mem_uncharge(sk, len);
+ atomic_sub(len, &sk->sk_rmem_alloc);
+
+ /* For reading rx_wait and rx_psock without holding lock */
+ smp_mb__after_atomic();
+
+ if (!kcm->rx_wait && !kcm->rx_psock &&
+ sk_rmem_alloc_get(sk) < sk->sk_rcvlowat) {
+ spin_lock_bh(&mux->rx_lock);
+ kcm_rcv_ready(kcm);
+ spin_unlock_bh(&mux->rx_lock);
+ }
+}
+
+static int kcm_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
+{
+ struct sk_buff_head *list = &sk->sk_receive_queue;
+
+ if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf)
+ return -ENOMEM;
+
+ if (!sk_rmem_schedule(sk, skb, skb->truesize))
+ return -ENOBUFS;
+
+ skb->dev = NULL;
+
+ skb_orphan(skb);
+ skb->sk = sk;
+ skb->destructor = kcm_rfree;
+ atomic_add(skb->truesize, &sk->sk_rmem_alloc);
+ sk_mem_charge(sk, skb->truesize);
+
+ skb_queue_tail(list, skb);
+
+ if (!sock_flag(sk, SOCK_DEAD))
+ sk->sk_data_ready(sk);
+
+ return 0;
+}
+
+/* Requeue received messages for a kcm socket to other kcm sockets. This is
+ * called with a kcm socket is receive disabled.
+ * RX mux lock held.
+ */
+static void requeue_rx_msgs(struct kcm_mux *mux, struct sk_buff_head *head)
+{
+ struct sk_buff *skb;
+ struct kcm_sock *kcm;
+
+ while ((skb = __skb_dequeue(head))) {
+ /* Reset destructor to avoid calling kcm_rcv_ready */
+ skb->destructor = sock_rfree;
+ skb_orphan(skb);
+try_again:
+ if (list_empty(&mux->kcm_rx_waiters)) {
+ skb_queue_tail(&mux->rx_hold_queue, skb);
+ continue;
+ }
+
+ kcm = list_first_entry(&mux->kcm_rx_waiters,
+ struct kcm_sock, wait_rx_list);
+
+ if (kcm_queue_rcv_skb(&kcm->sk, skb)) {
+ /* Should mean socket buffer full */
+ list_del(&kcm->wait_rx_list);
+ kcm->rx_wait = false;
+
+ /* Commit rx_wait to read in kcm_free */
+ smp_wmb();
+
+ goto try_again;
+ }
+ }
+}
+
+/* Lower sock lock held */
+static struct kcm_sock *reserve_rx_kcm(struct kcm_psock *psock,
+ struct sk_buff *head)
+{
+ struct kcm_mux *mux = psock->mux;
+ struct kcm_sock *kcm;
+
+ WARN_ON(psock->ready_rx_msg);
+
+ if (psock->rx_kcm)
+ return psock->rx_kcm;
+
+ spin_lock_bh(&mux->rx_lock);
+
+ if (psock->rx_kcm) {
+ spin_unlock_bh(&mux->rx_lock);
+ return psock->rx_kcm;
+ }
+
+ if (list_empty(&mux->kcm_rx_waiters)) {
+ psock->ready_rx_msg = head;
+ list_add_tail(&psock->psock_ready_list,
+ &mux->psocks_ready);
+ spin_unlock_bh(&mux->rx_lock);
+ return NULL;
+ }
+
+ kcm = list_first_entry(&mux->kcm_rx_waiters,
+ struct kcm_sock, wait_rx_list);
+ list_del(&kcm->wait_rx_list);
+ kcm->rx_wait = false;
+
+ psock->rx_kcm = kcm;
+ kcm->rx_psock = psock;
+
+ spin_unlock_bh(&mux->rx_lock);
+
+ return kcm;
+}
+
+static void kcm_done(struct kcm_sock *kcm);
+
+static void kcm_done_work(struct work_struct *w)
+{
+ kcm_done(container_of(w, struct kcm_sock, done_work));
+}
+
+/* Lower sock held */
+static void unreserve_rx_kcm(struct kcm_psock *psock,
+ bool rcv_ready)
+{
+ struct kcm_sock *kcm = psock->rx_kcm;
+ struct kcm_mux *mux = psock->mux;
+
+ if (!kcm)
+ return;
+
+ spin_lock_bh(&mux->rx_lock);
+
+ psock->rx_kcm = NULL;
+ kcm->rx_psock = NULL;
+
+ /* Commit kcm->rx_psock before sk_rmem_alloc_get to sync with
+ * kcm_rfree
+ */
+ smp_mb();
+
+ if (unlikely(kcm->done)) {
+ spin_unlock_bh(&mux->rx_lock);
+
+ /* Need to run kcm_done in a task since we need to qcquire
+ * callback locks which may already be held here.
+ */
+ INIT_WORK(&kcm->done_work, kcm_done_work);
+ schedule_work(&kcm->done_work);
+ return;
+ }
+
+ if (unlikely(kcm->rx_disabled)) {
+ requeue_rx_msgs(mux, &kcm->sk.sk_receive_queue);
+ } else if (rcv_ready || unlikely(!sk_rmem_alloc_get(&kcm->sk))) {
+ /* Check for degenerative race with rx_wait that all
+ * data was dequeued (accounted for in kcm_rfree).
+ */
+ kcm_rcv_ready(kcm);
+ }
+ spin_unlock_bh(&mux->rx_lock);
+}
+
+/* Macro to invoke filter function. */
+#define KCM_RUN_FILTER(prog, ctx) \
+ (*prog->bpf_func)(ctx, prog->insnsi)
+
+/* Lower socket lock held */
+static int kcm_tcp_recv(read_descriptor_t *desc, struct sk_buff *orig_skb,
+ unsigned int orig_offset, size_t orig_len)
+{
+ struct kcm_psock *psock = (struct kcm_psock *)desc->arg.data;
+ struct kcm_rx_msg *rxm;
+ struct kcm_sock *kcm;
+ struct sk_buff *head, *skb;
+ size_t eaten = 0, cand_len;
+ ssize_t extra;
+ int err;
+ bool cloned_orig = false;
+
+ if (psock->ready_rx_msg)
+ return 0;
+
+ head = psock->rx_skb_head;
+ if (head) {
+ /* Message already in progress */
+
+ if (unlikely(orig_offset)) {
+ /* Getting data with a non-zero offset when a message is
+ * in progress is not expected. If it does happen, we
+ * need to clone and pull since we can't deal with
+ * offsets in the skbs for a message expect in the head.
+ */
+ orig_skb = skb_clone(orig_skb, GFP_ATOMIC);
+ if (!orig_skb) {
+ desc->error = -ENOMEM;
+ return 0;
+ }
+ if (!pskb_pull(orig_skb, orig_offset)) {
+ kfree_skb(orig_skb);
+ desc->error = -ENOMEM;
+ return 0;
+ }
+ cloned_orig = true;
+ orig_offset = 0;
+ }
+
+ if (!psock->rx_skb_nextp) {
+ /* We are going to append to the frags_list of head.
+ * Need to unshare the frag_list.
+ */
+ err = skb_unclone(head, GFP_ATOMIC);
+ if (err) {
+ desc->error = err;
+ return 0;
+ }
+
+ if (unlikely(skb_shinfo(head)->frag_list)) {
+ /* We can't append to an sk_buff that already
+ * has a frag_list. We create a new head, point
+ * the frag_list of that to the old head, and
+ * then are able to use the old head->next for
+ * appending to the message.
+ */
+ if (WARN_ON(head->next)) {
+ desc->error = -EINVAL;
+ return 0;
+ }
+
+ skb = alloc_skb(0, GFP_ATOMIC);
+ if (!skb) {
+ desc->error = -ENOMEM;
+ return 0;
+ }
+ skb->len = head->len;
+ skb->data_len = head->len;
+ skb->truesize = head->truesize;
+ *kcm_rx_msg(skb) = *kcm_rx_msg(head);
+ psock->rx_skb_nextp = &head->next;
+ skb_shinfo(skb)->frag_list = head;
+ psock->rx_skb_head = skb;
+ head = skb;
+ } else {
+ psock->rx_skb_nextp =
+ &skb_shinfo(head)->frag_list;
+ }
+ }
+ }
+
+ while (eaten < orig_len) {
+ /* Always clone since we will consume something */
+ skb = skb_clone(orig_skb, GFP_ATOMIC);
+ if (!skb) {
+ desc->error = -ENOMEM;
+ break;
+ }
+
+ cand_len = orig_len - eaten;
+
+ head = psock->rx_skb_head;
+ if (!head) {
+ head = skb;
+ psock->rx_skb_head = head;
+ /* Will set rx_skb_nextp on next packet if needed */
+ psock->rx_skb_nextp = NULL;
+ rxm = kcm_rx_msg(head);
+ memset(rxm, 0, sizeof(*rxm));
+ rxm->offset = orig_offset + eaten;
+ } else {
+ /* Unclone since we may be appending to an skb that we
+ * already share a frag_list with.
+ */
+ err = skb_unclone(skb, GFP_ATOMIC);
+ if (err) {
+ desc->error = err;
+ break;
+ }
+
+ rxm = kcm_rx_msg(head);
+ *psock->rx_skb_nextp = skb;
+ psock->rx_skb_nextp = &skb->next;
+ head->data_len += skb->len;
+ head->len += skb->len;
+ head->truesize += skb->truesize;
+ }
+
+ if (!rxm->full_len) {
+ ssize_t len;
+
+ len = KCM_RUN_FILTER(psock->bpf_prog, head);
+
+ if (!len) {
+ /* Need more header to determine length */
+ rxm->accum_len += cand_len;
+ eaten += cand_len;
+ WARN_ON(eaten != orig_len);
+ break;
+ } else if (len <= (ssize_t)head->len -
+ skb->len - rxm->offset) {
+ /* Length must be into new skb (and also
+ * greater than zero)
+ */
+ desc->error = -EPROTO;
+ psock->rx_skb_head = NULL;
+ kcm_abort_rx_psock(psock, EPROTO, head);
+ break;
+ }
+
+ rxm->full_len = len;
+ }
+
+ extra = (ssize_t)(rxm->accum_len + cand_len) - rxm->full_len;
+
+ if (extra < 0) {
+ /* Message not complete yet. */
+ rxm->accum_len += cand_len;
+ eaten += cand_len;
+ WARN_ON(eaten != orig_len);
+ break;
+ }
+
+ /* Positive extra indicates ore bytes than needed for the
+ * message
+ */
+
+ WARN_ON(extra > cand_len);
+
+ eaten += (cand_len - extra);
+
+ /* Hurray, we have a new message! */
+ psock->rx_skb_head = NULL;
+
+try_queue:
+ kcm = reserve_rx_kcm(psock, head);
+ if (!kcm) {
+ /* Unable to reserve a KCM, message is held in psock. */
+ break;
+ }
+
+ if (kcm_queue_rcv_skb(&kcm->sk, head)) {
+ /* Should mean socket buffer full */
+ unreserve_rx_kcm(psock, false);
+ goto try_queue;
+ }
+ }
+
+ if (cloned_orig)
+ kfree_skb(orig_skb);
+
+ return eaten;
+}
+
+/* Called with lock held on lower socket */
+static int psock_tcp_read_sock(struct kcm_psock *psock)
+{
+ read_descriptor_t desc;
+
+ desc.arg.data = psock;
+ desc.error = 0;
+ desc.count = 1; /* give more than one skb per call */
+
+ /* sk should be locked here, so okay to do tcp_read_sock */
+ tcp_read_sock(psock->sk, &desc, kcm_tcp_recv);
+
+ unreserve_rx_kcm(psock, true);
+
+ return desc.error;
+}
+
+/* Lower sock lock held */
+static void psock_tcp_data_ready(struct sock *sk)
+{
+ struct kcm_psock *psock;
+
+ read_lock_bh(&sk->sk_callback_lock);
+
+ psock = (struct kcm_psock *)sk->sk_user_data;
+ if (unlikely(!psock || psock->rx_stopped))
+ goto out;
+
+ if (psock->ready_rx_msg)
+ goto out;
+
+ if (psock_tcp_read_sock(psock) == -ENOMEM)
+ queue_delayed_work(kcm_wq, &psock->rx_delayed_work, 0);
+
+out:
+ read_unlock_bh(&sk->sk_callback_lock);
+}
+
+static void do_psock_rx_work(struct kcm_psock *psock)
+{
+ read_descriptor_t rd_desc;
+ struct sock *csk = psock->sk;
+
+ /* We need the read lock to synchronize with psock_tcp_data_ready. We
+ * need the socket lock for calling tcp_read_sock.
+ */
+ lock_sock(csk);
+ read_lock_bh(&csk->sk_callback_lock);
+
+ if (unlikely(csk->sk_user_data != psock))
+ goto out;
+
+ if (unlikely(psock->rx_stopped))
+ goto out;
+
+ if (psock->ready_rx_msg)
+ goto out;
+
+ rd_desc.arg.data = psock;
+
+ if (psock_tcp_read_sock(psock) == -ENOMEM)
+ queue_delayed_work(kcm_wq, &psock->rx_delayed_work, 0);
+
+out:
+ read_unlock_bh(&csk->sk_callback_lock);
+ release_sock(csk);
+}
+
+static void psock_rx_work(struct work_struct *w)
+{
+ do_psock_rx_work(container_of(w, struct kcm_psock, rx_work));
+}
+
+static void psock_rx_delayed_work(struct work_struct *w)
+{
+ do_psock_rx_work(container_of(w, struct kcm_psock,
+ rx_delayed_work.work));
+}
+
+static void psock_tcp_state_change(struct sock *sk)
+{
+ /* TCP only does a POLLIN for a half close. Do a POLLHUP here
+ * since application will normally not poll with POLLIN
+ * on the TCP sockets.
+ */
+
+ report_csk_error(sk, EPIPE);
+}
+
+static void psock_tcp_write_space(struct sock *sk)
+{
+ struct kcm_psock *psock;
+ struct kcm_mux *mux;
+ struct kcm_sock *kcm;
+
+ read_lock_bh(&sk->sk_callback_lock);
+
+ psock = (struct kcm_psock *)sk->sk_user_data;
+ if (unlikely(!psock))
+ goto out;
+
+ mux = psock->mux;
+
+ spin_lock_bh(&mux->lock);
+
+ /* Check if the socket is reserved so someone is waiting for sending. */
+ kcm = psock->tx_kcm;
+ if (kcm)
+ queue_work(kcm_wq, &kcm->tx_work);
+
+ spin_unlock_bh(&mux->lock);
+out:
+ read_unlock_bh(&sk->sk_callback_lock);
+}
+
+static void unreserve_psock(struct kcm_sock *kcm);
+
+/* kcm sock is locked. */
+static struct kcm_psock *reserve_psock(struct kcm_sock *kcm)
+{
+ struct kcm_mux *mux = kcm->mux;
+ struct kcm_psock *psock;
+
+ psock = kcm->tx_psock;
+
+ smp_rmb(); /* Must read tx_psock before tx_wait */
+
+ if (psock) {
+ WARN_ON(kcm->tx_wait);
+ if (unlikely(psock->tx_stopped))
+ unreserve_psock(kcm);
+ else
+ return kcm->tx_psock;
+ }
+
+ spin_lock_bh(&mux->lock);
+
+ /* Check again under lock to see if psock was reserved for this
+ * psock via psock_unreserve.
+ */
+ psock = kcm->tx_psock;
+ if (unlikely(psock)) {
+ WARN_ON(kcm->tx_wait);
+ spin_unlock_bh(&mux->lock);
+ return kcm->tx_psock;
+ }
+
+ if (!list_empty(&mux->psocks_avail)) {
+ psock = list_first_entry(&mux->psocks_avail,
+ struct kcm_psock,
+ psock_avail_list);
+ list_del(&psock->psock_avail_list);
+ if (kcm->tx_wait) {
+ list_del(&kcm->wait_psock_list);
+ kcm->tx_wait = false;
+ }
+ kcm->tx_psock = psock;
+ psock->tx_kcm = kcm;
+ } else if (!kcm->tx_wait) {
+ list_add_tail(&kcm->wait_psock_list,
+ &mux->kcm_tx_waiters);
+ kcm->tx_wait = true;
+ }
+
+ spin_unlock_bh(&mux->lock);
+
+ return psock;
+}
+
+/* mux lock held */
+static void psock_now_avail(struct kcm_psock *psock)
+{
+ struct kcm_mux *mux = psock->mux;
+ struct kcm_sock *kcm;
+
+ if (list_empty(&mux->kcm_tx_waiters)) {
+ list_add_tail(&psock->psock_avail_list,
+ &mux->psocks_avail);
+ } else {
+ kcm = list_first_entry(&mux->kcm_tx_waiters,
+ struct kcm_sock,
+ wait_psock_list);
+ list_del(&kcm->wait_psock_list);
+ kcm->tx_wait = false;
+ psock->tx_kcm = kcm;
+
+ /* Commit before changing tx_psock since that is read in
+ * reserve_psock before queuing work.
+ */
+ smp_mb();
+
+ kcm->tx_psock = psock;
+ queue_work(kcm_wq, &kcm->tx_work);
+ }
+}
+
+/* kcm sock is locked. */
+static void unreserve_psock(struct kcm_sock *kcm)
+{
+ struct kcm_psock *psock;
+ struct kcm_mux *mux = kcm->mux;
+
+ spin_lock_bh(&mux->lock);
+
+ psock = kcm->tx_psock;
+
+ if (WARN_ON(!psock)) {
+ spin_unlock_bh(&mux->lock);
+ return;
+ }
+
+ smp_rmb(); /* Read tx_psock before tx_wait */
+
+ WARN_ON(kcm->tx_wait);
+
+ kcm->tx_psock = NULL;
+ psock->tx_kcm = NULL;
+
+ if (unlikely(psock->tx_stopped)) {
+ if (psock->done) {
+ /* Deferred free */
+ list_del(&psock->psock_list);
+ mux->psocks_cnt--;
+ sock_put(psock->sk);
+ fput(psock->sk->sk_socket->file);
+ kmem_cache_free(kcm_psockp, psock);
+ }
+
+ /* Don't put back on available list */
+
+ spin_unlock_bh(&mux->lock);
+
+ return;
+ }
+
+ psock_now_avail(psock);
+
+ spin_unlock_bh(&mux->lock);
+}
+
+/* Write any messages ready on the kcm socket. Called with kcm sock lock
+ * held. Return bytes actually sent or error.
+ */
+static int kcm_write_msgs(struct kcm_sock *kcm)
+{
+ struct sock *sk = &kcm->sk;
+ struct kcm_psock *psock;
+ struct sk_buff *skb, *head;
+ struct kcm_tx_msg *txm;
+ unsigned short fragidx, frag_offset;
+ unsigned int sent, total_sent = 0;
+ int ret = 0;
+
+ kcm->tx_wait_more = false;
+ psock = kcm->tx_psock;
+ if (unlikely(psock && psock->tx_stopped)) {
+ /* A reserved psock was aborted asynchronously. Unreserve
+ * it and we'll retry the message.
+ */
+ unreserve_psock(kcm);
+ if (skb_queue_empty(&sk->sk_write_queue))
+ return 0;
+
+ kcm_tx_msg(skb_peek(&sk->sk_write_queue))->sent = 0;
+
+ } else if (skb_queue_empty(&sk->sk_write_queue)) {
+ return 0;
+ }
+
+ head = skb_peek(&sk->sk_write_queue);
+ txm = kcm_tx_msg(head);
+
+ if (txm->sent) {
+ /* Send of first skbuff in queue already in progress */
+ if (WARN_ON(!psock)) {
+ ret = -EINVAL;
+ goto out;
+ }
+ sent = txm->sent;
+ frag_offset = txm->frag_offset;
+ fragidx = txm->fragidx;
+ skb = txm->frag_skb;
+
+ goto do_frag;
+ }
+
+try_again:
+ psock = reserve_psock(kcm);
+ if (!psock)
+ goto out;
+
+ do {
+ skb = head;
+ txm = kcm_tx_msg(head);
+ sent = 0;
+
+do_frag_list:
+ if (WARN_ON(!skb_shinfo(skb)->nr_frags)) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ for (fragidx = 0; fragidx < skb_shinfo(skb)->nr_frags;
+ fragidx++) {
+ skb_frag_t *frag;
+
+ frag_offset = 0;
+do_frag:
+ frag = &skb_shinfo(skb)->frags[fragidx];
+ if (WARN_ON(!frag->size)) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ ret = kernel_sendpage(psock->sk->sk_socket,
+ frag->page.p,
+ frag->page_offset + frag_offset,
+ frag->size - frag_offset,
+ MSG_DONTWAIT);
+ if (ret <= 0) {
+ if (ret == -EAGAIN) {
+ /* Save state to try again when there's
+ * write space on the socket
+ */
+ txm->sent = sent;
+ txm->frag_offset = frag_offset;
+ txm->fragidx = fragidx;
+ txm->frag_skb = skb;
+
+ ret = 0;
+ goto out;
+ }
+
+ /* Hard failure in sending message, abort this
+ * psock since it has lost framing
+ * synchonization and retry sending the
+ * message from the beginning.
+ */
+ kcm_abort_tx_psock(psock, ret ? -ret : EPIPE,
+ true);
+ unreserve_psock(kcm);
+
+ txm->sent = 0;
+ ret = 0;
+
+ goto try_again;
+ }
+
+ sent += ret;
+ frag_offset += ret;
+ if (frag_offset < frag->size) {
+ /* Not finished with this frag */
+ goto do_frag;
+ }
+ }
+
+ if (skb == head) {
+ if (skb_has_frag_list(skb)) {
+ skb = skb_shinfo(skb)->frag_list;
+ goto do_frag_list;
+ }
+ } else if (skb->next) {
+ skb = skb->next;
+ goto do_frag_list;
+ }
+
+ /* Successfully sent the whole packet, account for it. */
+ skb_dequeue(&sk->sk_write_queue);
+ kfree_skb(head);
+ sk->sk_wmem_queued -= sent;
+ total_sent += sent;
+ } while ((head = skb_peek(&sk->sk_write_queue)));
+out:
+ if (!head) {
+ /* Done with all queued messages. */
+ WARN_ON(!skb_queue_empty(&sk->sk_write_queue));
+ unreserve_psock(kcm);
+ }
+
+ /* Check if write space is available */
+ sk->sk_write_space(sk);
+
+ return total_sent ? : ret;
+}
+
+static void kcm_tx_work(struct work_struct *w)
+{
+ struct kcm_sock *kcm = container_of(w, struct kcm_sock, tx_work);
+ struct sock *sk = &kcm->sk;
+ int err;
+
+ lock_sock(sk);
+
+ /* Primarily for SOCK_DGRAM sockets, also handle asynchronous tx
+ * aborts
+ */
+ err = kcm_write_msgs(kcm);
+ if (err < 0) {
+ /* Hard failure in write, report error on KCM socket */
+ pr_warn("KCM: Hard failure on kcm_write_msgs %d\n", err);
+ report_csk_error(&kcm->sk, -err);
+ goto out;
+ }
+
+ /* Primarily for SOCK_SEQPACKET sockets */
+ if (likely(sk->sk_socket) &&
+ test_bit(SOCK_NOSPACE, &sk->sk_socket->flags)) {
+ clear_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
+ sk->sk_write_space(sk);
+ }
+
+out: