// SPDX-License-Identifier: GPL-2.0-or-later
/*
* net/sched/sch_fq.c Fair Queue Packet Scheduler (per flow pacing)
*
* Copyright (C) 2013-2023 Eric Dumazet <edumazet@google.com>
*
* Meant to be mostly used for locally generated traffic :
* Fast classification depends on skb->sk being set before reaching us.
* If not, (router workload), we use rxhash as fallback, with 32 bits wide hash.
* All packets belonging to a socket are considered as a 'flow'.
*
* Flows are dynamically allocated and stored in a hash table of RB trees
* They are also part of one Round Robin 'queues' (new or old flows)
*
* Burst avoidance (aka pacing) capability :
*
* Transport (eg TCP) can set in sk->sk_pacing_rate a rate, enqueue a
* bunch of packets, and this packet scheduler adds delay between
* packets to respect rate limitation.
*
* enqueue() :
* - lookup one RB tree (out of 1024 or more) to find the flow.
* If non existent flow, create it, add it to the tree.
* Add skb to the per flow list of skb (fifo).
* - Use a special fifo for high prio packets
*
* dequeue() : serves flows in Round Robin
* Note : When a flow becomes empty, we do not immediately remove it from
* rb trees, for performance reasons (its expected to send additional packets,
* or SLAB cache will reuse socket for another flow)
*/
#include <linux/module.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/jiffies.h>
#include <linux/string.h>
#include <linux/in.h>
#include <linux/errno.h>
#include <linux/init.h>
#include <linux/skbuff.h>
#include <linux/slab.h>
#include <linux/rbtree.h>
#include <linux/hash.h>
#include <linux/prefetch.h>
#include <linux/vmalloc.h>
#include <net/netlink.h>
#include <net/pkt_sched.h>
#include <net/sock.h>
#include <net/tcp_states.h>
#include <net/tcp.h>
struct fq_skb_cb {
u64 time_to_send;
u8 band;
};
static inline struct fq_skb_cb *fq_skb_cb(struct sk_buff *skb)
{
qdisc_cb_private_validate(skb, sizeof(struct fq_skb_cb));
return (struct fq_skb_cb *)qdisc_skb_cb(skb)->data;
}
/*
* Per flow structure, dynamically allocated.
* If packets have monotically increasing time_to_send, they are placed in O(1)
* in linear list (head,tail), otherwise are placed in a rbtree (t_root).
*/
struct fq_flow {
/* First cache line : used in fq_gc(), fq_enqueue(), fq_dequeue() */
struct rb_root t_root;
struct sk_buff *head; /* list of skbs for this flow : first skb */
union {
struct sk_buff *tail; /* last skb in the list */
unsigned long age; /* (jiffies | 1UL) when flow was emptied, for gc */
};
union {
struct rb_node fq_node; /* anchor in fq_root[] trees */
/* Following field is only used for q->internal,
* because q->internal is not hashed in fq_root[]
*/
u64 stat_fastpath_packets;
};
struct sock *sk;
u32 socket_hash; /* sk_hash */
int qlen; /* number of packets in flow queue */
/* Second cache line */
int credit;
int band;
struct fq_flow *next; /* next pointer in RR lists */
struct rb_node rate_node; /* anchor in q->delayed tree */
u64 time_next_packet;
};
struct fq_flow_head {
struct fq_flow *first;
struct fq_flow *last;
};
struct fq_perband_flows {
struct fq_flow_head new_flows;
struct fq_flow_head old_flows;
int credit;
int quantum; /* based on band nr : 576KB, 192KB, 64KB */
};
#define FQ_PRIO2BAND_CRUMB_SIZE ((TC_PRIO_MAX + 1) >> 2)
struct fq_sched_data {
/* Read mostly cache line */
u32 quantum;
u32 initial_quantum;
u32 flow_refill_delay;
u32 flow_plimit; /* max packets per flow */
unsigned long flow_max_rate; /* optional max rate per flow */
u64 ce_threshold;
u64 horizon; /* horizon in ns */
u32 orphan_mask; /* mask for orphaned skb */
u32 low_rate_threshold;
struct rb_root *fq_root;
u8 rate_enable;
u8 fq_trees_log;
u8 horizon_drop;
u8 prio2band[FQ_PRIO2BAND_CRUMB_SIZE];
u32 timer_slack; /* hrtimer slack in ns */
/* Read/Write fields. */