Hello,
On Tue, 29 Jul 2025, Frederic Weisbecker wrote:
> The estimator kthreads' affinity are defined by sysctl overwritten
> preferences and applied through a plain call to the scheduler's affinity
> API.
>
> However since the introduction of managed kthreads preferred affinity,
> such a practice shortcuts the kthreads core code which eventually
> overwrites the target to the default unbound affinity.
>
> Fix this with using the appropriate kthread's API.
>
> Fixes: d1a89197589c ("kthread: Default affine kthread to its preferred NUMA
> node")
> Signed-off-by: Frederic Weisbecker <frederic@xxxxxxxxxx>
Looks good to me for the nf tree, thanks!
Acked-by: Julian Anastasov <ja@xxxxxx>
> ---
> include/net/ip_vs.h | 13 +++++++++++++
> kernel/kthread.c | 1 +
> net/netfilter/ipvs/ip_vs_est.c | 3 ++-
> 3 files changed, 16 insertions(+), 1 deletion(-)
>
> diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h
> index ff406ef4fd4a..29a36709e7f3 100644
> --- a/include/net/ip_vs.h
> +++ b/include/net/ip_vs.h
> @@ -1163,6 +1163,14 @@ static inline const struct cpumask
> *sysctl_est_cpulist(struct netns_ipvs *ipvs)
> return housekeeping_cpumask(HK_TYPE_KTHREAD);
> }
>
> +static inline const struct cpumask *sysctl_est_preferred_cpulist(struct
> netns_ipvs *ipvs)
> +{
> + if (ipvs->est_cpulist_valid)
> + return ipvs->sysctl_est_cpulist;
> + else
> + return NULL;
> +}
> +
> static inline int sysctl_est_nice(struct netns_ipvs *ipvs)
> {
> return ipvs->sysctl_est_nice;
> @@ -1270,6 +1278,11 @@ static inline const struct cpumask
> *sysctl_est_cpulist(struct netns_ipvs *ipvs)
> return housekeeping_cpumask(HK_TYPE_KTHREAD);
> }
>
> +static inline const struct cpumask *sysctl_est_preferred_cpulist(struct
> netns_ipvs *ipvs)
> +{
> + return NULL;
> +}
> +
> static inline int sysctl_est_nice(struct netns_ipvs *ipvs)
> {
> return IPVS_EST_NICE;
> diff --git a/kernel/kthread.c b/kernel/kthread.c
> index 85e29b250107..adf06196b844 100644
> --- a/kernel/kthread.c
> +++ b/kernel/kthread.c
> @@ -899,6 +899,7 @@ int kthread_affine_preferred(struct task_struct *p, const
> struct cpumask *mask)
>
> return ret;
> }
> +EXPORT_SYMBOL_GPL(kthread_affine_preferred);
>
> static int kthreads_update_affinity(bool force)
> {
> diff --git a/net/netfilter/ipvs/ip_vs_est.c b/net/netfilter/ipvs/ip_vs_est.c
> index f821ad2e19b3..15049b826732 100644
> --- a/net/netfilter/ipvs/ip_vs_est.c
> +++ b/net/netfilter/ipvs/ip_vs_est.c
> @@ -265,7 +265,8 @@ int ip_vs_est_kthread_start(struct netns_ipvs *ipvs,
> }
>
> set_user_nice(kd->task, sysctl_est_nice(ipvs));
> - set_cpus_allowed_ptr(kd->task, sysctl_est_cpulist(ipvs));
> + if (sysctl_est_preferred_cpulist(ipvs))
> + kthread_affine_preferred(kd->task,
> sysctl_est_preferred_cpulist(ipvs));
>
> pr_info("starting estimator thread %d...\n", kd->id);
> wake_up_process(kd->task);
> --
> 2.48.1
Regards
--
Julian Anastasov <ja@xxxxxx>
|