LVS
lvs-devel
Google
 
Web LinuxVirtualServer.org

Re: [PATCHv6 net-next 13/14] ipvs: add ip_vs_status info

To: Julian Anastasov <ja@xxxxxx>
Subject: Re: [PATCHv6 net-next 13/14] ipvs: add ip_vs_status info
Cc: Simon Horman <horms@xxxxxxxxxxxx>, lvs-devel@xxxxxxxxxxxxxxx, netfilter-devel@xxxxxxxxxxxxxxx, Dust Li <dust.li@xxxxxxxxxxxxxxxxx>, Jiejian Wu <jiejian@xxxxxxxxxxxxxxxxx>, rcu@xxxxxxxxxxxxxxx
From: Pablo Neira Ayuso <pablo@xxxxxxxxxxxxx>
Date: Mon, 24 Nov 2025 22:42:06 +0100
On Sun, Oct 19, 2025 at 06:57:10PM +0300, Julian Anastasov wrote:
> Add /proc/net/ip_vs_status to show current state of IPVS.

The motivation for this new /proc interface is to provide the output
for the users to help them decide when to shrink or grow the
hashtable, which is possible with the new sysctl knobs coming in 14/14
in this series.

> Signed-off-by: Julian Anastasov <ja@xxxxxx>
> ---
>  net/netfilter/ipvs/ip_vs_ctl.c | 145 +++++++++++++++++++++++++++++++++
>  1 file changed, 145 insertions(+)
> 
> diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
> index 3dfc01ef1890..a508e9bdde73 100644
> --- a/net/netfilter/ipvs/ip_vs_ctl.c
> +++ b/net/netfilter/ipvs/ip_vs_ctl.c
> @@ -2915,6 +2915,144 @@ static int ip_vs_stats_percpu_show(struct seq_file 
> *seq, void *v)
>  
>       return 0;
>  }
> +
> +static int ip_vs_status_show(struct seq_file *seq, void *v)
> +{
> +     struct net *net = seq_file_single_net(seq);
> +     struct netns_ipvs *ipvs = net_ipvs(net);
> +     unsigned int resched_score = 0;
> +     struct ip_vs_conn_hnode *hn;
> +     struct hlist_bl_head *head;
> +     struct ip_vs_service *svc;
> +     struct ip_vs_rht *t, *pt;
> +     struct hlist_bl_node *e;
> +     int old_gen, new_gen;
> +     u32 counts[8];
> +     u32 bucket;
> +     int count;
> +     u32 sum1;
> +     u32 sum;
> +     int i;
> +
> +     rcu_read_lock();
> +
> +     t = rcu_dereference(ipvs->conn_tab);
> +
> +     seq_printf(seq, "Conns:\t%d\n", atomic_read(&ipvs->conn_count));
> +     seq_printf(seq, "Conn buckets:\t%d (%d bits, lfactor %d)\n",
> +                t ? t->size : 0, t ? t->bits : 0, t ? t->lfactor : 0);
> +
> +     if (!atomic_read(&ipvs->conn_count))
> +             goto after_conns;
> +     old_gen = atomic_read(&ipvs->conn_tab_changes);
> +
> +repeat_conn:
> +     smp_rmb(); /* ipvs->conn_tab and conn_tab_changes */
> +     memset(counts, 0, sizeof(counts));
> +     ip_vs_rht_for_each_table_rcu(ipvs->conn_tab, t, pt) {
> +             for (bucket = 0; bucket < t->size; bucket++) {
> +                     DECLARE_IP_VS_RHT_WALK_BUCKET_RCU();
> +
> +                     count = 0;
> +                     resched_score++;
> +                     ip_vs_rht_walk_bucket_rcu(t, bucket, head) {
> +                             count = 0;
> +                             hlist_bl_for_each_entry_rcu(hn, e, head, node)
> +                                     count++;
> +                     }
> +                     resched_score += count;
> +                     if (resched_score >= 100) {
> +                             resched_score = 0;
> +                             cond_resched_rcu();
> +                             new_gen = atomic_read(&ipvs->conn_tab_changes);
> +                             /* New table installed ? */
> +                             if (old_gen != new_gen) {
> +                                     old_gen = new_gen;
> +                                     goto repeat_conn;
> +                             }
> +                     }
> +                     counts[min(count, (int)ARRAY_SIZE(counts) - 1)]++;
> +             }
> +     }
> +     for (sum = 0, i = 0; i < ARRAY_SIZE(counts); i++)
> +             sum += counts[i];
> +     sum1 = sum - counts[0];
> +     seq_printf(seq, "Conn buckets empty:\t%u (%lu%%)\n",
> +                counts[0], (unsigned long)counts[0] * 100 / max(sum, 1U));
> +     for (i = 1; i < ARRAY_SIZE(counts); i++) {
> +             if (!counts[i])
> +                     continue;
> +             seq_printf(seq, "Conn buckets len-%d:\t%u (%lu%%)\n",
> +                        i, counts[i],
> +                        (unsigned long)counts[i] * 100 / max(sum1, 1U));
> +     }
> +
> +after_conns:
> +     t = rcu_dereference(ipvs->svc_table);
> +
> +     count = ip_vs_get_num_services(ipvs);
> +     seq_printf(seq, "Services:\t%d\n", count);
> +     seq_printf(seq, "Service buckets:\t%d (%d bits, lfactor %d)\n",
> +                t ? t->size : 0, t ? t->bits : 0, t ? t->lfactor : 0);
> +
> +     if (!count)
> +             goto after_svc;
> +     old_gen = atomic_read(&ipvs->svc_table_changes);
> +
> +repeat_svc:
> +     smp_rmb(); /* ipvs->svc_table and svc_table_changes */
> +     memset(counts, 0, sizeof(counts));
> +     ip_vs_rht_for_each_table_rcu(ipvs->svc_table, t, pt) {
> +             for (bucket = 0; bucket < t->size; bucket++) {
> +                     DECLARE_IP_VS_RHT_WALK_BUCKET_RCU();
> +
> +                     count = 0;
> +                     resched_score++;
> +                     ip_vs_rht_walk_bucket_rcu(t, bucket, head) {
> +                             count = 0;
> +                             hlist_bl_for_each_entry_rcu(svc, e, head,
> +                                                         s_list)
> +                                     count++;
> +                     }
> +                     resched_score += count;
> +                     if (resched_score >= 100) {
> +                             resched_score = 0;
> +                             cond_resched_rcu();
> +                             new_gen = atomic_read(&ipvs->svc_table_changes);
> +                             /* New table installed ? */
> +                             if (old_gen != new_gen) {
> +                                     old_gen = new_gen;
> +                                     goto repeat_svc;
> +                             }
> +                     }
> +                     counts[min(count, (int)ARRAY_SIZE(counts) - 1)]++;
> +             }
> +     }
> +     for (sum = 0, i = 0; i < ARRAY_SIZE(counts); i++)
> +             sum += counts[i];
> +     sum1 = sum - counts[0];
> +     seq_printf(seq, "Service buckets empty:\t%u (%lu%%)\n",
> +                counts[0], (unsigned long)counts[0] * 100 / max(sum, 1U));
> +     for (i = 1; i < ARRAY_SIZE(counts); i++) {
> +             if (!counts[i])
> +                     continue;
> +             seq_printf(seq, "Service buckets len-%d:\t%u (%lu%%)\n",
> +                        i, counts[i],
> +                        (unsigned long)counts[i] * 100 / max(sum1, 1U));
> +     }
> +
> +after_svc:
> +     seq_printf(seq, "Stats thread slots:\t%d (max %lu)\n",
> +                ipvs->est_kt_count, ipvs->est_max_threads);
> +     seq_printf(seq, "Stats chain max len:\t%d\n", ipvs->est_chain_max);
> +     seq_printf(seq, "Stats thread ests:\t%d\n",
> +                ipvs->est_chain_max * IPVS_EST_CHAIN_FACTOR *
> +                IPVS_EST_NTICKS);
> +
> +     rcu_read_unlock();
> +     return 0;
> +}
> +
>  #endif
>  
>  /*
> @@ -4835,6 +4973,9 @@ int __net_init ip_vs_control_net_init(struct netns_ipvs 
> *ipvs)
>                                   ipvs->net->proc_net,
>                                   ip_vs_stats_percpu_show, NULL))
>               goto err_percpu;
> +     if (!proc_create_net_single("ip_vs_status", 0, ipvs->net->proc_net,
> +                                 ip_vs_status_show, NULL))
> +             goto err_status;
>  #endif
>  
>       ret = ip_vs_control_net_init_sysctl(ipvs);
> @@ -4845,6 +4986,9 @@ int __net_init ip_vs_control_net_init(struct netns_ipvs 
> *ipvs)
>  
>  err:
>  #ifdef CONFIG_PROC_FS
> +     remove_proc_entry("ip_vs_status", ipvs->net->proc_net);
> +
> +err_status:
>       remove_proc_entry("ip_vs_stats_percpu", ipvs->net->proc_net);
>  
>  err_percpu:
> @@ -4870,6 +5014,7 @@ void __net_exit ip_vs_control_net_cleanup(struct 
> netns_ipvs *ipvs)
>       ip_vs_control_net_cleanup_sysctl(ipvs);
>       cancel_delayed_work_sync(&ipvs->est_reload_work);
>  #ifdef CONFIG_PROC_FS
> +     remove_proc_entry("ip_vs_status", ipvs->net->proc_net);
>       remove_proc_entry("ip_vs_stats_percpu", ipvs->net->proc_net);
>       remove_proc_entry("ip_vs_stats", ipvs->net->proc_net);
>       remove_proc_entry("ip_vs", ipvs->net->proc_net);
> -- 
> 2.51.0
> 
> 
> 


<Prev in Thread] Current Thread [Next in Thread>
  • Re: [PATCHv6 net-next 13/14] ipvs: add ip_vs_status info, Pablo Neira Ayuso <=