|
Hello,
On Mon, 20 Apr 2026, Julian Anastasov wrote:
> Sashiko reports for races and possible crash around
> the usage of est_cpulist_valid and sysctl_est_cpulist.
> The problem is that we do not lock est_mutex in some
> places which can lead to wrong write ordering and
> as result problems when calling cpumask_weight()
> and cpumask_empty().
>
> Fix them by moving the est_max_threads read/write under
> locked est_mutex. Do the same for one ip_vs_est_reload_start()
> call to protect the cpumask_empty() usage of sysctl_est_cpulist.
>
> Link:
> https://sashiko.dev/#/patchset/20260331165015.2777765-1-longman%40redhat.com
> Fixes: f0be83d54217 ("ipvs: add est_cpulist and est_nice sysctl vars")
> Signed-off-by: Julian Anastasov <ja@xxxxxx>
According to Sashiko, this patch needs more
work, I'll send new version when I'm ready...
pw-bot: changes-requested
> ---
>
> Note that this patch complements v2 of patchset from 31-MAR-26
> "ipvs: Fix incorrect use of HK_TYPE_KTHREAD housekeeping cpumask"
> and can be applied before it to avoid the bad AI reviews:
>
> https://lore.kernel.org/all/20260331165015.2777765-1-longman@xxxxxxxxxx/
>
> net/netfilter/ipvs/ip_vs_ctl.c | 5 +++++
> net/netfilter/ipvs/ip_vs_est.c | 22 +++++++++++++++-------
> 2 files changed, 20 insertions(+), 7 deletions(-)
>
> diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
> index caec516856e9..8778e174ef56 100644
> --- a/net/netfilter/ipvs/ip_vs_ctl.c
> +++ b/net/netfilter/ipvs/ip_vs_ctl.c
> @@ -1812,11 +1812,16 @@ ip_vs_add_service(struct netns_ipvs *ipvs, struct
> ip_vs_service_user_kern *u,
> *svc_p = svc;
>
> if (!READ_ONCE(ipvs->enable)) {
> + mutex_lock(&ipvs->est_mutex);
> +
> /* Now there is a service - full throttle */
> WRITE_ONCE(ipvs->enable, 1);
>
> + ipvs->est_max_threads = ip_vs_est_max_threads(ipvs);
> +
> /* Start estimation for first time */
> ip_vs_est_reload_start(ipvs);
> + mutex_unlock(&ipvs->est_mutex);
> }
>
> return 0;
> diff --git a/net/netfilter/ipvs/ip_vs_est.c b/net/netfilter/ipvs/ip_vs_est.c
> index 433ba3cab58c..6c9981d5611e 100644
> --- a/net/netfilter/ipvs/ip_vs_est.c
> +++ b/net/netfilter/ipvs/ip_vs_est.c
> @@ -68,6 +68,10 @@
> and the limit of estimators per kthread
> - est_add_ktid: ktid where to add new ests, can point to empty slot where
> we should add kt data
> + - data protected by service_mutex: est_temp_list, est_add_ktid
> + - data protected by est_mutex: est_kt_count, est_kt_arr, est_max_threads,
> + sysctl_est_cpulist, est_cpulist_valid, sysctl_est_nice, est_stopped,
> + sysctl_run_estimation
> */
>
> static struct lock_class_key __ipvs_est_key;
> @@ -229,6 +233,8 @@ static int ip_vs_estimation_kthread(void *data)
> /* Schedule stop/start for kthread tasks */
> void ip_vs_est_reload_start(struct netns_ipvs *ipvs)
> {
> + lockdep_assert_held(&ipvs->est_mutex);
> +
> /* Ignore reloads before first service is added */
> if (!READ_ONCE(ipvs->enable))
> return;
> @@ -304,12 +310,17 @@ static int ip_vs_est_add_kthread(struct netns_ipvs
> *ipvs)
> void *arr = NULL;
> int i;
>
> - if ((unsigned long)ipvs->est_kt_count >= ipvs->est_max_threads &&
> - READ_ONCE(ipvs->enable) && ipvs->est_max_threads)
> - return -EINVAL;
> -
> mutex_lock(&ipvs->est_mutex);
>
> + /* Allow kt 0 data to be created before the services are added
> + * and limit the kthreads when services are present.
> + */
> + if ((unsigned long)ipvs->est_kt_count >= ipvs->est_max_threads &&
> + READ_ONCE(ipvs->enable) && ipvs->est_max_threads) {
> + ret = -EINVAL;
> + goto out;
> + }
> +
> for (i = 0; i < id; i++) {
> if (!ipvs->est_kt_arr[i])
> break;
> @@ -485,9 +496,6 @@ int ip_vs_start_estimator(struct netns_ipvs *ipvs, struct
> ip_vs_stats *stats)
> struct ip_vs_estimator *est = &stats->est;
> int ret;
>
> - if (!ipvs->est_max_threads && READ_ONCE(ipvs->enable))
> - ipvs->est_max_threads = ip_vs_est_max_threads(ipvs);
> -
> est->ktid = -1;
> est->ktrow = IPVS_EST_NTICKS - 1; /* Initial delay */
>
> --
> 2.53.0
Regards
--
Julian Anastasov <ja@xxxxxx>
|