summaryrefslogtreecommitdiff
path: root/net
diff options
context:
space:
mode:
Diffstat (limited to 'net')
-rw-r--r--net/9p/trans_fd.c26
-rw-r--r--net/atm/common.c8
-rw-r--r--net/batman-adv/icmp_socket.c2
-rw-r--r--net/batman-adv/log.c2
-rw-r--r--net/bluetooth/af_bluetooth.c16
-rw-r--r--net/caif/caif_socket.c12
-rw-r--r--net/core/datagram.c16
-rw-r--r--net/core/sock.c10
-rw-r--r--net/core/stream.c4
-rw-r--r--net/dccp/proto.c12
-rw-r--r--net/decnet/af_decnet.c2
-rw-r--r--net/ipv4/af_inet.c2
-rw-r--r--net/ipv4/tcp.c34
-rw-r--r--net/ipv4/tcp_input.c2
-rw-r--r--net/ipv4/udp.c6
-rw-r--r--net/iucv/af_iucv.c18
-rw-r--r--net/kcm/kcmsock.c6
-rw-r--r--net/nfc/llcp_sock.c16
-rw-r--r--net/packet/af_packet.c4
-rw-r--r--net/phonet/socket.c10
-rw-r--r--net/rds/af_rds.c16
-rw-r--r--net/rfkill/core.c4
-rw-r--r--net/rxrpc/af_rxrpc.c4
-rw-r--r--net/sctp/socket.c20
-rw-r--r--net/smc/af_smc.c24
-rw-r--r--net/smc/smc_rx.c4
-rw-r--r--net/smc/smc_tx.c4
-rw-r--r--net/sunrpc/cache.c4
-rw-r--r--net/sunrpc/rpc_pipe.c6
-rw-r--r--net/tipc/socket.c22
-rw-r--r--net/unix/af_unix.c40
-rw-r--r--net/vmw_vsock/af_vsock.c30
32 files changed, 193 insertions, 193 deletions
diff --git a/net/9p/trans_fd.c b/net/9p/trans_fd.c
index d6f7f7cb79c4..0cfba919d167 100644
--- a/net/9p/trans_fd.c
+++ b/net/9p/trans_fd.c
@@ -240,7 +240,7 @@ p9_fd_poll(struct p9_client *client, struct poll_table_struct *pt, int *err)
if (!ts) {
if (err)
*err = -EREMOTEIO;
- return POLLERR;
+ return EPOLLERR;
}
if (!ts->rd->f_op->poll)
@@ -253,7 +253,7 @@ p9_fd_poll(struct p9_client *client, struct poll_table_struct *pt, int *err)
n = DEFAULT_POLLMASK;
else
n = ts->wr->f_op->poll(ts->wr, pt);
- ret = (ret & ~POLLOUT) | (n & ~POLLIN);
+ ret = (ret & ~EPOLLOUT) | (n & ~EPOLLIN);
}
return ret;
@@ -396,11 +396,11 @@ end_clear:
if (!list_empty(&m->req_list)) {
if (test_and_clear_bit(Rpending, &m->wsched))
- n = POLLIN;
+ n = EPOLLIN;
else
n = p9_fd_poll(m->client, NULL, NULL);
- if ((n & POLLIN) && !test_and_set_bit(Rworksched, &m->wsched)) {
+ if ((n & EPOLLIN) && !test_and_set_bit(Rworksched, &m->wsched)) {
p9_debug(P9_DEBUG_TRANS, "sched read work %p\n", m);
schedule_work(&m->rq);
}
@@ -505,11 +505,11 @@ end_clear:
if (m->wsize || !list_empty(&m->unsent_req_list)) {
if (test_and_clear_bit(Wpending, &m->wsched))
- n = POLLOUT;
+ n = EPOLLOUT;
else
n = p9_fd_poll(m->client, NULL, NULL);
- if ((n & POLLOUT) &&
+ if ((n & EPOLLOUT) &&
!test_and_set_bit(Wworksched, &m->wsched)) {
p9_debug(P9_DEBUG_TRANS, "sched write work %p\n", m);
schedule_work(&m->wq);
@@ -599,12 +599,12 @@ static void p9_conn_create(struct p9_client *client)
init_poll_funcptr(&m->pt, p9_pollwait);
n = p9_fd_poll(client, &m->pt, NULL);
- if (n & POLLIN) {
+ if (n & EPOLLIN) {
p9_debug(P9_DEBUG_TRANS, "mux %p can read\n", m);
set_bit(Rpending, &m->wsched);
}
- if (n & POLLOUT) {
+ if (n & EPOLLOUT) {
p9_debug(P9_DEBUG_TRANS, "mux %p can write\n", m);
set_bit(Wpending, &m->wsched);
}
@@ -625,12 +625,12 @@ static void p9_poll_mux(struct p9_conn *m)
return;
n = p9_fd_poll(m->client, NULL, &err);
- if (n & (POLLERR | POLLHUP | POLLNVAL)) {
+ if (n & (EPOLLERR | EPOLLHUP | EPOLLNVAL)) {
p9_debug(P9_DEBUG_TRANS, "error mux %p err %d\n", m, n);
p9_conn_cancel(m, err);
}
- if (n & POLLIN) {
+ if (n & EPOLLIN) {
set_bit(Rpending, &m->wsched);
p9_debug(P9_DEBUG_TRANS, "mux %p can read\n", m);
if (!test_and_set_bit(Rworksched, &m->wsched)) {
@@ -639,7 +639,7 @@ static void p9_poll_mux(struct p9_conn *m)
}
}
- if (n & POLLOUT) {
+ if (n & EPOLLOUT) {
set_bit(Wpending, &m->wsched);
p9_debug(P9_DEBUG_TRANS, "mux %p can write\n", m);
if ((m->wsize || !list_empty(&m->unsent_req_list)) &&
@@ -678,11 +678,11 @@ static int p9_fd_request(struct p9_client *client, struct p9_req_t *req)
spin_unlock(&client->lock);
if (test_and_clear_bit(Wpending, &m->wsched))
- n = POLLOUT;
+ n = EPOLLOUT;
else
n = p9_fd_poll(m->client, NULL, NULL);
- if (n & POLLOUT && !test_and_set_bit(Wworksched, &m->wsched))
+ if (n & EPOLLOUT && !test_and_set_bit(Wworksched, &m->wsched))
schedule_work(&m->wq);
return 0;
diff --git a/net/atm/common.c b/net/atm/common.c
index 6523f38c4957..fc78a0508ae1 100644
--- a/net/atm/common.c
+++ b/net/atm/common.c
@@ -661,15 +661,15 @@ __poll_t vcc_poll(struct file *file, struct socket *sock, poll_table *wait)
/* exceptional events */
if (sk->sk_err)
- mask = POLLERR;
+ mask = EPOLLERR;
if (test_bit(ATM_VF_RELEASED, &vcc->flags) ||
test_bit(ATM_VF_CLOSE, &vcc->flags))
- mask |= POLLHUP;
+ mask |= EPOLLHUP;
/* readable? */
if (!skb_queue_empty(&sk->sk_receive_queue))
- mask |= POLLIN | POLLRDNORM;
+ mask |= EPOLLIN | EPOLLRDNORM;
/* writable? */
if (sock->state == SS_CONNECTING &&
@@ -678,7 +678,7 @@ __poll_t vcc_poll(struct file *file, struct socket *sock, poll_table *wait)
if (vcc->qos.txtp.traffic_class != ATM_NONE &&
vcc_writable(sk))
- mask |= POLLOUT | POLLWRNORM | POLLWRBAND;
+ mask |= EPOLLOUT | EPOLLWRNORM | EPOLLWRBAND;
return mask;
}
diff --git a/net/batman-adv/icmp_socket.c b/net/batman-adv/icmp_socket.c
index 581375d0eed2..e91f29c7c638 100644
--- a/net/batman-adv/icmp_socket.c
+++ b/net/batman-adv/icmp_socket.c
@@ -304,7 +304,7 @@ static __poll_t batadv_socket_poll(struct file *file, poll_table *wait)
poll_wait(file, &socket_client->queue_wait, wait);
if (socket_client->queue_len > 0)
- return POLLIN | POLLRDNORM;
+ return EPOLLIN | EPOLLRDNORM;
return 0;
}
diff --git a/net/batman-adv/log.c b/net/batman-adv/log.c
index 9be74a44e99d..dc9fa37ddd14 100644
--- a/net/batman-adv/log.c
+++ b/net/batman-adv/log.c
@@ -193,7 +193,7 @@ static __poll_t batadv_log_poll(struct file *file, poll_table *wait)
poll_wait(file, &debug_log->queue_wait, wait);
if (!batadv_log_empty(debug_log))
- return POLLIN | POLLRDNORM;
+ return EPOLLIN | EPOLLRDNORM;
return 0;
}
diff --git a/net/bluetooth/af_bluetooth.c b/net/bluetooth/af_bluetooth.c
index f897681780db..84d92a077834 100644
--- a/net/bluetooth/af_bluetooth.c
+++ b/net/bluetooth/af_bluetooth.c
@@ -431,7 +431,7 @@ static inline __poll_t bt_accept_poll(struct sock *parent)
if (sk->sk_state == BT_CONNECTED ||
(test_bit(BT_SK_DEFER_SETUP, &bt_sk(parent)->flags) &&
sk->sk_state == BT_CONNECT2))
- return POLLIN | POLLRDNORM;
+ return EPOLLIN | EPOLLRDNORM;
}
return 0;
@@ -451,20 +451,20 @@ __poll_t bt_sock_poll(struct file *file, struct socket *sock,
return bt_accept_poll(sk);
if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue))
- mask |= POLLERR |
- (sock_flag(sk, SOCK_SELECT_ERR_QUEUE) ? POLLPRI : 0);
+ mask |= EPOLLERR |
+ (sock_flag(sk, SOCK_SELECT_ERR_QUEUE) ? EPOLLPRI : 0);
if (sk->sk_shutdown & RCV_SHUTDOWN)
- mask |= POLLRDHUP | POLLIN | POLLRDNORM;
+ mask |= EPOLLRDHUP | EPOLLIN | EPOLLRDNORM;
if (sk->sk_shutdown == SHUTDOWN_MASK)
- mask |= POLLHUP;
+ mask |= EPOLLHUP;
if (!skb_queue_empty(&sk->sk_receive_queue))
- mask |= POLLIN | POLLRDNORM;
+ mask |= EPOLLIN | EPOLLRDNORM;
if (sk->sk_state == BT_CLOSED)
- mask |= POLLHUP;
+ mask |= EPOLLHUP;
if (sk->sk_state == BT_CONNECT ||
sk->sk_state == BT_CONNECT2 ||
@@ -472,7 +472,7 @@ __poll_t bt_sock_poll(struct file *file, struct socket *sock,
return mask;
if (!test_bit(BT_SK_SUSPEND, &bt_sk(sk)->flags) && sock_writeable(sk))
- mask |= POLLOUT | POLLWRNORM | POLLWRBAND;
+ mask |= EPOLLOUT | EPOLLWRNORM | EPOLLWRBAND;
else
sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk);
diff --git a/net/caif/caif_socket.c b/net/caif/caif_socket.c
index b109445a1df9..a6fb1b3bcad9 100644
--- a/net/caif/caif_socket.c
+++ b/net/caif/caif_socket.c
@@ -924,7 +924,7 @@ static int caif_release(struct socket *sock)
caif_disconnect_client(sock_net(sk), &cf_sk->layer);
cf_sk->sk.sk_socket->state = SS_DISCONNECTING;
- wake_up_interruptible_poll(sk_sleep(sk), POLLERR|POLLHUP);
+ wake_up_interruptible_poll(sk_sleep(sk), EPOLLERR|EPOLLHUP);
sock_orphan(sk);
sk_stream_kill_queues(&cf_sk->sk);
@@ -946,23 +946,23 @@ static __poll_t caif_poll(struct file *file,
/* exceptional events? */
if (sk->sk_err)
- mask |= POLLERR;
+ mask |= EPOLLERR;
if (sk->sk_shutdown == SHUTDOWN_MASK)
- mask |= POLLHUP;
+ mask |= EPOLLHUP;
if (sk->sk_shutdown & RCV_SHUTDOWN)
- mask |= POLLRDHUP;
+ mask |= EPOLLRDHUP;
/* readable? */
if (!skb_queue_empty(&sk->sk_receive_queue) ||
(sk->sk_shutdown & RCV_SHUTDOWN))
- mask |= POLLIN | POLLRDNORM;
+ mask |= EPOLLIN | EPOLLRDNORM;
/*
* we set writable also when the other side has shut down the
* connection. This prevents stuck sockets.
*/
if (sock_writeable(sk) && tx_flow_is_on(cf_sk))
- mask |= POLLOUT | POLLWRNORM | POLLWRBAND;
+ mask |= EPOLLOUT | EPOLLWRNORM | EPOLLWRBAND;
return mask;
}
diff --git a/net/core/datagram.c b/net/core/datagram.c
index b7d9293940b5..9938952c5c78 100644
--- a/net/core/datagram.c
+++ b/net/core/datagram.c
@@ -75,7 +75,7 @@ static int receiver_wake_function(wait_queue_entry_t *wait, unsigned int mode, i
/*
* Avoid a wakeup if event not interesting for us
*/
- if (key && !(key_to_poll(key) & (POLLIN | POLLERR)))
+ if (key && !(key_to_poll(key) & (EPOLLIN | EPOLLERR)))
return 0;
return autoremove_wake_function(wait, mode, sync, key);
}
@@ -842,22 +842,22 @@ __poll_t datagram_poll(struct file *file, struct socket *sock,
/* exceptional events? */
if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue))
- mask |= POLLERR |
- (sock_flag(sk, SOCK_SELECT_ERR_QUEUE) ? POLLPRI : 0);
+ mask |= EPOLLERR |
+ (sock_flag(sk, SOCK_SELECT_ERR_QUEUE) ? EPOLLPRI : 0);
if (sk->sk_shutdown & RCV_SHUTDOWN)
- mask |= POLLRDHUP | POLLIN | POLLRDNORM;
+ mask |= EPOLLRDHUP | EPOLLIN | EPOLLRDNORM;
if (sk->sk_shutdown == SHUTDOWN_MASK)
- mask |= POLLHUP;
+ mask |= EPOLLHUP;
/* readable? */
if (!skb_queue_empty(&sk->sk_receive_queue))
- mask |= POLLIN | POLLRDNORM;
+ mask |= EPOLLIN | EPOLLRDNORM;
/* Connection-based need to check for termination and startup */
if (connection_based(sk)) {
if (sk->sk_state == TCP_CLOSE)
- mask |= POLLHUP;
+ mask |= EPOLLHUP;
/* connection hasn't started yet? */
if (sk->sk_state == TCP_SYN_SENT)
return mask;
@@ -865,7 +865,7 @@ __poll_t datagram_poll(struct file *file, struct socket *sock,
/* writable? */
if (sock_writeable(sk))
- mask |= POLLOUT | POLLWRNORM | POLLWRBAND;
+ mask |= EPOLLOUT | EPOLLWRNORM | EPOLLWRBAND;
else
sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk);
diff --git a/net/core/sock.c b/net/core/sock.c
index b026e1717df4..c501499a04fe 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -2619,7 +2619,7 @@ static void sock_def_error_report(struct sock *sk)
rcu_read_lock();
wq = rcu_dereference(sk->sk_wq);
if (skwq_has_sleeper(wq))
- wake_up_interruptible_poll(&wq->wait, POLLERR);
+ wake_up_interruptible_poll(&wq->wait, EPOLLERR);
sk_wake_async(sk, SOCK_WAKE_IO, POLL_ERR);
rcu_read_unlock();
}
@@ -2631,8 +2631,8 @@ static void sock_def_readable(struct sock *sk)
rcu_read_lock();
wq = rcu_dereference(sk->sk_wq);
if (skwq_has_sleeper(wq))
- wake_up_interruptible_sync_poll(&wq->wait, POLLIN | POLLPRI |
- POLLRDNORM | POLLRDBAND);
+ wake_up_interruptible_sync_poll(&wq->wait, EPOLLIN | EPOLLPRI |
+ EPOLLRDNORM | EPOLLRDBAND);
sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN);
rcu_read_unlock();
}
@@ -2649,8 +2649,8 @@ static void sock_def_write_space(struct sock *sk)
if ((refcount_read(&sk->sk_wmem_alloc) << 1) <= sk->sk_sndbuf) {
wq = rcu_dereference(sk->sk_wq);
if (skwq_has_sleeper(wq))
- wake_up_interruptible_sync_poll(&wq->wait, POLLOUT |
- POLLWRNORM | POLLWRBAND);
+ wake_up_interruptible_sync_poll(&wq->wait, EPOLLOUT |
+ EPOLLWRNORM | EPOLLWRBAND);
/* Should agree with poll, otherwise some programs break */
if (sock_writeable(sk))
diff --git a/net/core/stream.c b/net/core/stream.c
index 1cff9c6270c6..7d329fb1f553 100644
--- a/net/core/stream.c
+++ b/net/core/stream.c
@@ -38,8 +38,8 @@ void sk_stream_write_space(struct sock *sk)
rcu_read_lock();
wq = rcu_dereference(sk->sk_wq);
if (skwq_has_sleeper(wq))
- wake_up_interruptible_poll(&wq->wait, POLLOUT |
- POLLWRNORM | POLLWRBAND);
+ wake_up_interruptible_poll(&wq->wait, EPOLLOUT |
+ EPOLLWRNORM | EPOLLWRBAND);
if (wq && wq->fasync_list && !(sk->sk_shutdown & SEND_SHUTDOWN))
sock_wake_async(wq, SOCK_WAKE_SPACE, POLL_OUT);
rcu_read_unlock();
diff --git a/net/dccp/proto.c b/net/dccp/proto.c
index 74685fecfdb9..15bdc002d90c 100644
--- a/net/dccp/proto.c
+++ b/net/dccp/proto.c
@@ -338,21 +338,21 @@ __poll_t dccp_poll(struct file *file, struct socket *sock,
mask = 0;
if (sk->sk_err)
- mask = POLLERR;
+ mask = EPOLLERR;
if (sk->sk_shutdown == SHUTDOWN_MASK || sk->sk_state == DCCP_CLOSED)
- mask |= POLLHUP;
+ mask |= EPOLLHUP;
if (sk->sk_shutdown & RCV_SHUTDOWN)
- mask |= POLLIN | POLLRDNORM | POLLRDHUP;
+ mask |= EPOLLIN | EPOLLRDNORM | EPOLLRDHUP;
/* Connected? */
if ((1 << sk->sk_state) & ~(DCCPF_REQUESTING | DCCPF_RESPOND)) {
if (atomic_read(&sk->sk_rmem_alloc) > 0)
- mask |= POLLIN | POLLRDNORM;
+ mask |= EPOLLIN | EPOLLRDNORM;
if (!(sk->sk_shutdown & SEND_SHUTDOWN)) {
if (sk_stream_is_writeable(sk)) {
- mask |= POLLOUT | POLLWRNORM;
+ mask |= EPOLLOUT | EPOLLWRNORM;
} else { /* send SIGIO later */
sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk);
set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
@@ -362,7 +362,7 @@ __poll_t dccp_poll(struct file *file, struct socket *sock,
* IO signal will be lost.
*/
if (sk_stream_is_writeable(sk))
- mask |= POLLOUT | POLLWRNORM;
+ mask |= EPOLLOUT | EPOLLWRNORM;
}
}
}
diff --git a/net/decnet/af_decnet.c b/net/decnet/af_decnet.c
index cc1b505453a8..91dd09f79808 100644
--- a/net/decnet/af_decnet.c
+++ b/net/decnet/af_decnet.c
@@ -1216,7 +1216,7 @@ static __poll_t dn_poll(struct file *file, struct socket *sock, poll_table *wai
__poll_t mask = datagram_poll(file, sock, wait);
if (!skb_queue_empty(&scp->other_receive_queue))
- mask |= POLLRDBAND;
+ mask |= EPOLLRDBAND;
return mask;
}
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
index c24008daa3d8..e4329e161943 100644
--- a/net/ipv4/af_inet.c
+++ b/net/ipv4/af_inet.c
@@ -828,7 +828,7 @@ int inet_shutdown(struct socket *sock, int how)
case TCP_CLOSE:
err = -ENOTCONN;
/* Hack to wake up other listeners, who can poll for
- POLLHUP, even on eg. unconnected UDP sockets -- RR */
+ EPOLLHUP, even on eg. unconnected UDP sockets -- RR */
/* fall through */
default:
sk->sk_shutdown |= how;
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index c059aa7df0a9..48636aee23c3 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -512,36 +512,36 @@ __poll_t tcp_poll(struct file *file, struct socket *sock, poll_table *wait)
mask = 0;
/*
- * POLLHUP is certainly not done right. But poll() doesn't
+ * EPOLLHUP is certainly not done right. But poll() doesn't
* have a notion of HUP in just one direction, and for a
* socket the read side is more interesting.
*
- * Some poll() documentation says that POLLHUP is incompatible
- * with the POLLOUT/POLLWR flags, so somebody should check this
+ * Some poll() documentation says that EPOLLHUP is incompatible
+ * with the EPOLLOUT/POLLWR flags, so somebody should check this
* all. But careful, it tends to be safer to return too many
* bits than too few, and you can easily break real applications
* if you don't tell them that something has hung up!
*
* Check-me.
*
- * Check number 1. POLLHUP is _UNMASKABLE_ event (see UNIX98 and
+ * Check number 1. EPOLLHUP is _UNMASKABLE_ event (see UNIX98 and
* our fs/select.c). It means that after we received EOF,
* poll always returns immediately, making impossible poll() on write()
- * in state CLOSE_WAIT. One solution is evident --- to set POLLHUP
+ * in state CLOSE_WAIT. One solution is evident --- to set EPOLLHUP
* if and only if shutdown has been made in both directions.
* Actually, it is interesting to look how Solaris and DUX
- * solve this dilemma. I would prefer, if POLLHUP were maskable,
+ * solve this dilemma. I would prefer, if EPOLLHUP were maskable,
* then we could set it on SND_SHUTDOWN. BTW examples given
* in Stevens' books assume exactly this behaviour, it explains
- * why POLLHUP is incompatible with POLLOUT. --ANK
+ * why EPOLLHUP is incompatible with EPOLLOUT. --ANK
*
* NOTE. Check for TCP_CLOSE is added. The goal is to prevent
* blocking on fresh not-connected or disconnected socket. --ANK
*/
if (sk->sk_shutdown == SHUTDOWN_MASK || state == TCP_CLOSE)
- mask |= POLLHUP;
+ mask |= EPOLLHUP;
if (sk->sk_shutdown & RCV_SHUTDOWN)
- mask |= POLLIN | POLLRDNORM | POLLRDHUP;
+ mask |= EPOLLIN | EPOLLRDNORM | EPOLLRDHUP;
/* Connected or passive Fast Open socket? */
if (state != TCP_SYN_SENT &&
@@ -554,11 +554,11 @@ __poll_t tcp_poll(struct file *file, struct socket *sock, poll_table *wait)
target++;
if (tp->rcv_nxt - tp->copied_seq >= target)
- mask |= POLLIN | POLLRDNORM;
+ mask |= EPOLLIN | EPOLLRDNORM;
if (!(sk->sk_shutdown & SEND_SHUTDOWN)) {
if (sk_stream_is_writeable(sk)) {
- mask |= POLLOUT | POLLWRNORM;
+ mask |= EPOLLOUT | EPOLLWRNORM;
} else { /* send SIGIO later */
sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk);
set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
@@ -570,24 +570,24 @@ __poll_t tcp_poll(struct file *file, struct socket *sock, poll_table *wait)
*/
smp_mb__after_atomic();
if (sk_stream_is_writeable(sk))
- mask |= POLLOUT | POLLWRNORM;
+ mask |= EPOLLOUT | EPOLLWRNORM;
}
} else
- mask |= POLLOUT | POLLWRNORM;
+ mask |= EPOLLOUT | EPOLLWRNORM;
if (tp->urg_data & TCP_URG_VALID)
- mask |= POLLPRI;
+ mask |= EPOLLPRI;
} else if (state == TCP_SYN_SENT && inet_sk(sk)->defer_connect) {
/* Active TCP fastopen socket with defer_connect
- * Return POLLOUT so application can call write()
+ * Return EPOLLOUT so application can call write()
* in order for kernel to generate SYN+data
*/
- mask |= POLLOUT | POLLWRNORM;
+ mask |= EPOLLOUT | EPOLLWRNORM;
}
/* This barrier is coupled with smp_wmb() in tcp_reset() */
smp_rmb();
if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue))
- mask |= POLLERR;
+ mask |= EPOLLERR;
return mask;
}
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index cfa51cfd2d99..575d3c1fb6e8 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -315,7 +315,7 @@ static void tcp_sndbuf_expand(struct sock *sk)
/* Fast Recovery (RFC 5681 3.2) :
* Cubic needs 1.7 factor, rounded to 2 to include
- * extra cushion (application might react slowly to POLLOUT)
+ * extra cushion (application might react slowly to EPOLLOUT)
*/
sndmem = ca_ops->sndbuf_expand ? ca_ops->sndbuf_expand(sk) : 2;
sndmem *= nr_segs * per_mss;
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index f81f969f9c06..bfaefe560b5c 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -2501,12 +2501,12 @@ __poll_t udp_poll(struct file *file, struct socket *sock, poll_table *wait)
struct sock *sk = sock->sk;
if (!skb_queue_empty(&udp_sk(sk)->reader_queue))
- mask |= POLLIN | POLLRDNORM;
+ mask |= EPOLLIN | EPOLLRDNORM;
/* Check for false positives due to checksum errors */
- if ((mask & POLLRDNORM) && !(file->f_flags & O_NONBLOCK) &&
+ if ((mask & EPOLLRDNORM) && !(file->f_flags & O_NONBLOCK) &&
!(sk->sk_shutdown & RCV_SHUTDOWN) && first_packet_length(sk) == -1)
- mask &= ~(POLLIN | POLLRDNORM);
+ mask &= ~(EPOLLIN | EPOLLRDNORM);
return mask;
diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c
index 64331158d693..1e8cc7bcbca3 100644
--- a/net/iucv/af_iucv.c
+++ b/net/iucv/af_iucv.c
@@ -1483,7 +1483,7 @@ static inline __poll_t iucv_accept_poll(struct sock *parent)
sk = (struct sock *) isk;
if (sk->sk_state == IUCV_CONNECTED)
- return POLLIN | POLLRDNORM;
+ return EPOLLIN | EPOLLRDNORM;
}
return 0;
@@ -1501,27 +1501,27 @@ __poll_t iucv_sock_poll(struct file *file, struct socket *sock,
return iucv_accept_poll(sk);
if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue))
- mask |= POLLERR |
- (sock_flag(sk, SOCK_SELECT_ERR_QUEUE) ? POLLPRI : 0);
+ mask |= EPOLLERR |
+ (sock_flag(sk, SOCK_SELECT_ERR_QUEUE) ? EPOLLPRI : 0);
if (sk->sk_shutdown & RCV_SHUTDOWN)
- mask |= POLLRDHUP;
+ mask |= EPOLLRDHUP;
if (sk->sk_shutdown == SHUTDOWN_MASK)
- mask |= POLLHUP;
+ mask |= EPOLLHUP;
if (!skb_queue_empty(&sk->sk_receive_queue) ||
(sk->sk_shutdown & RCV_SHUTDOWN))
- mask |= POLLIN | POLLRDNORM;
+ mask |= EPOLLIN | EPOLLRDNORM;
if (sk->sk_state == IUCV_CLOSED)
- mask |= POLLHUP;
+ mask |= EPOLLHUP;
if (sk->sk_state == IUCV_DISCONN)
- mask |= POLLIN;
+ mask |= EPOLLIN;
if (sock_writeable(sk) && iucv_below_msglim(sk))
- mask |= POLLOUT | POLLWRNORM | POLLWRBAND;
+ mask |= EPOLLOUT | EPOLLWRNORM | EPOLLWRBAND;
else
sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk);
diff --git a/net/kcm/kcmsock.c b/net/kcm/kcmsock.c
index 4a8d407f8902..f297d53a11aa 100644
--- a/net/kcm/kcmsock.c
+++ b/net/kcm/kcmsock.c
@@ -396,8 +396,8 @@ static int kcm_read_sock_done(struct strparser *strp, int err)
static void psock_state_change(struct sock *sk)
{
- /* TCP only does a POLLIN for a half close. Do a POLLHUP here
- * since application will normally not poll with POLLIN
+ /* TCP only does a EPOLLIN for a half close. Do a EPOLLHUP here
+ * since application will normally not poll with EPOLLIN
* on the TCP sockets.
*/
@@ -1338,7 +1338,7 @@ static void init_kcm_sock(struct kcm_sock *kcm, struct kcm_mux *mux)
/* For SOCK_SEQPACKET sock type, datagram_poll checks the sk_state, so
* we set sk_state, otherwise epoll_wait always returns right away with
- * POLLHUP
+ * EPOLLHUP
*/
kcm->sk.sk_state = TCP_ESTABLISHED;
diff --git a/net/nfc/llcp_sock.c b/net/nfc/llcp_sock.c
index 985909f105eb..376040092142 100644
--- a/net/nfc/llcp_sock.c
+++ b/net/nfc/llcp_sock.c
@@ -543,7 +543,7 @@ static inline __poll_t llcp_accept_poll(struct sock *parent)
sk = &llcp_sock->sk;
if (sk->sk_state == LLCP_CONNECTED)
- return POLLIN | POLLRDNORM;
+ return EPOLLIN | EPOLLRDNORM;
}
return 0;
@@ -563,23 +563,23 @@ static __poll_t llcp_sock_poll(struct file *file, struct socket *sock,
return llcp_accept_poll(sk);
if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue))
- mask |= POLLERR |
- (sock_flag(sk, SOCK_SELECT_ERR_QUEUE) ? POLLPRI : 0);
+ mask |= EPOLLERR |
+ (sock_flag(sk, SOCK_SELECT_ERR_QUEUE) ? EPOLLPRI : 0);
if (!skb_queue_empty(&sk->sk_receive_queue))
- mask |= POLLIN | POLLRDNORM;
+ mask |= EPOLLIN | EPOLLRDNORM;
if (sk->sk_state == LLCP_CLOSED)
- mask |= POLLHUP;
+ mask |= EPOLLHUP;
if (sk->sk_shutdown & RCV_SHUTDOWN)
- mask |= POLLRDHUP | POLLIN | POLLRDNORM;
+ mask |= EPOLLRDHUP | EPOLLIN | EPOLLRDNORM;
if (sk->sk_shutdown == SHUTDOWN_MASK)
- mask |= POLLHUP;
+ mask |= EPOLLHUP;
if (sock_writeable(sk) && sk->sk_state == LLCP_CONNECTED)
- mask |= POLLOUT | POLLWRNORM | POLLWRBAND;
+ mask |= EPOLLOUT | EPOLLWRNORM | EPOLLWRBAND;
else
sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk);
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index 1d1483007e46..e0f3f4aeeb4f 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -4085,7 +4085,7 @@ static __poll_t packet_poll(struct file *file, struct socket *sock,
if (po->rx_ring.pg_vec) {
if (!packet_previous_rx_frame(po, &po->rx_ring,
TP_STATUS_KERNEL))
- mask |= POLLIN | POLLRDNORM;
+ mask |= EPOLLIN | EPOLLRDNORM;
}
if (po->pressure && __packet_rcv_has_room(po, NULL) == ROOM_NORMAL)
po->pressure = 0;
@@ -4093,7 +4093,7 @@ static __poll_t packet_poll(struct file *file, struct socket *sock,
spin_lock_bh(&sk->sk_write_queue.lock);
if (po->tx_ring.pg_vec) {
if (packet_current_frame(po, &po->tx_ring, TP_STATUS_AVAILABLE))
- mask |= POLLOUT | POLLWRNORM;
+ mask |= EPOLLOUT | EPOLLWRNORM;
}
spin_unlock_bh(&sk->sk_write_queue.lock);
return mask;
diff --git a/net/phonet/socket.c b/net/phonet/socket.c
index 08f6751d2030..fffcd69f63ff 100644
--- a/net/phonet/socket.c
+++ b/net/phonet/socket.c
@@ -351,18 +351,18 @@ static __poll_t pn_socket_poll(struct file *file, struct socket *sock,
poll_wait(file, sk_sleep(sk), wait);
if (sk->sk_state == TCP_CLOSE)
- return POLLERR;
+ return EPOLLERR;
if (!skb_queue_empty(&sk->sk_receive_queue))
- mask |= POLLIN | POLLRDNORM;
+ mask |= EPOLLIN | EPOLLRDNORM;
if (!skb_queue_empty(&pn->ctrlreq_queue))
- mask |= POLLPRI;
+ mask |= EPOLLPRI;
if (!mask && sk->sk_state == TCP_CLOSE_WAIT)
- return POLLHUP;
+ return EPOLLHUP;
if (sk->sk_state == TCP_ESTABLISHED &&
refcount_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf &&
atomic_read(&pn->tx_credits))
- mask |= POLLOUT | POLLWRNORM | POLLWRBAND;
+ mask |= EPOLLOUT | EPOLLWRNORM | EPOLLWRBAND;
return mask;
}
diff --git a/net/rds/af_rds.c b/net/rds/af_rds.c
index 88aa8ad0f5b6..744c637c86b0 100644
--- a/net/rds/af_rds.c
+++ b/net/rds/af_rds.c
@@ -137,17 +137,17 @@ static int rds_getname(struct socket *sock, struct sockaddr *uaddr,
/*
* RDS' poll is without a doubt the least intuitive part of the interface,
- * as POLLIN and POLLOUT do not behave entirely as you would expect from
+ * as EPOLLIN and EPOLLOUT do not behave entirely as you would expect from
* a network protocol.
*
- * POLLIN is asserted if
+ * EPOLLIN is asserted if
* - there is data on the receive queue.
* - to signal that a previously congested destination may have become
* uncongested
* - A notification has been queued to the socket (this can be a congestion
* update, or a RDMA completion).
*
- * POLLOUT is asserted if there is room on the send queue. This does not mean
+ * EPOLLOUT is asserted if there is room on the send queue. This does not mean
* however, that the next sendmsg() call will succeed. If the application tries
* to send to a congested destination, the system call may still fail (and
* return ENOBUFS).
@@ -167,22 +167,22 @@ static __poll_t rds_poll(struct file *file, struct socket *sock,
read_lock_irqsave(&rs->rs_recv_lock, flags);
if (!rs->rs_cong_monitor) {
- /* When a congestion map was updated, we signal POLLIN for
+ /* When a congestion map was updated, we signal EPOLLIN for
* "historical" reasons. Applications can also poll for
* WRBAND instead. */
if (rds_cong_updated_since(&rs->rs_cong_track))
- mask |= (POLLIN | POLLRDNORM | POLLWRBAND);
+ mask |= (EPOLLIN | EPOLLRDNORM | EPOLLWRBAND);
} else {
spin_lock(&rs->rs_lock);
if (rs->rs_cong_notify)
- mask |= (POLLIN | POLLRDNORM);
+ mask |= (EPOLLIN | EPOLLRDNORM);
spin_unlock(&rs->rs_lock);
}
if (!list_empty(&rs->rs_recv_queue) ||
!list_empty(&rs->rs_notify_queue))
- mask |= (POLLIN | POLLRDNORM);
+ mask |= (EPOLLIN | EPOLLRDNORM);
if (rs->rs_snd_bytes < rds_sk_sndbuf(rs))
- mask |= (POLLOUT | POLLWRNORM);
+ mask |= (EPOLLOUT | EPOLLWRNORM);
read_unlock_irqrestore(&rs->rs_recv_lock, flags);
/* clear state any time we wake a seen-congested socket */
diff --git a/net/rfkill/core.c b/net/rfkill/core.c
index 124c77e9d058..