LVS
lvs-devel
Google
 
Web LinuxVirtualServer.org

Re: ovf scheduler

To: Julian Anastasov <ja@xxxxxx>
Subject: Re: ovf scheduler
Cc: lvs-devel@xxxxxxxxxxxxxxx
From: Raducu Deaconu <rhadoo.io88@xxxxxxxxx>
Date: Fri, 17 Jul 2015 08:53:52 +0300
Hello,

I have done the corrections.


Subject: [PATCH] Add ovf scheduler

Signed-off-by: Raducu Deaconu <rhadoo.io88@xxxxxxxxx>
---
 net/netfilter/ipvs/Kconfig     |   11 +++++
 net/netfilter/ipvs/Makefile    |    1 +
 net/netfilter/ipvs/ip_vs_ovf.c |   86
++++++++++++++++++++++++++++++++++++++++
 3 files changed, 98 insertions(+)
 create mode 100644 net/netfilter/ipvs/ip_vs_ovf.c

diff --git a/net/netfilter/ipvs/Kconfig b/net/netfilter/ipvs/Kconfig
index 3b6929d..b32fb0d 100644
--- a/net/netfilter/ipvs/Kconfig
+++ b/net/netfilter/ipvs/Kconfig
@@ -162,6 +162,17 @@ config  IP_VS_FO
          If you want to compile it in kernel, say Y. To compile it as
a
          module, choose M here. If unsure, say N.

+config  IP_VS_OVF
+       tristate "weighted overflow scheduling"
+       ---help---
+         The weighted overflow scheduling algorithm directs network
+         connections to the server with the highest weight that is
+         currently available and overflows to the next when active
+         connections exceed the node's weight.
+
+         If you want to compile it in kernel, say Y. To compile it as a
+         module, choose M here. If unsure, say N.
+
 config IP_VS_LBLC
        tristate "locality-based least-connection scheduling"
        ---help---
diff --git a/net/netfilter/ipvs/Makefile b/net/netfilter/ipvs/Makefile
index 38b2723..67f3f43 100644
--- a/net/netfilter/ipvs/Makefile
+++ b/net/netfilter/ipvs/Makefile
@@ -27,6 +27,7 @@ obj-$(CONFIG_IP_VS_WRR) += ip_vs_wrr.o
 obj-$(CONFIG_IP_VS_LC) += ip_vs_lc.o
 obj-$(CONFIG_IP_VS_WLC) += ip_vs_wlc.o
 obj-$(CONFIG_IP_VS_FO) += ip_vs_fo.o
+obj-$(CONFIG_IP_VS_OVF) += ip_vs_ovf.o
 obj-$(CONFIG_IP_VS_LBLC) += ip_vs_lblc.o
 obj-$(CONFIG_IP_VS_LBLCR) += ip_vs_lblcr.o
 obj-$(CONFIG_IP_VS_DH) += ip_vs_dh.o
diff --git a/net/netfilter/ipvs/ip_vs_ovf.c b/net/netfilter/ipvs/ip_vs_ovf.c
new file mode 100644
index 0000000..f7d62c3
--- /dev/null
+++ b/net/netfilter/ipvs/ip_vs_ovf.c
@@ -0,0 +1,86 @@
+/*
+ * IPVS:        Overflow-Connection Scheduling module
+ *
+ * Authors:     Raducu Deaconu <rhadoo_io@xxxxxxxxx>
+ *
+ *              This program is free software; you can redistribute it and/or
+ *              modify it under the terms of the GNU General Public License
+ *              as published by the Free Software Foundation; either version
+ *              2 of the License, or (at your option) any later version.
+ *
+ * Scheduler implements "overflow" loadbalancing according to number of active
+ * connections , will keep all conections to the node with the highest weight
+ * and overflow to the next node if the number of connections exceeds
the node's
+ * weight.
+ * Note that this scheduler might not be suitable for UDP because it only uses
+ * active connections
+ *
+ */
+
+#define KMSG_COMPONENT "IPVS"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+
+#include <net/ip_vs.h>
+
+/* OVF Connection scheduling  */
+static struct ip_vs_dest *
+ip_vs_ovf_schedule(struct ip_vs_service *svc, const struct sk_buff *skb,
+                  struct ip_vs_iphdr *iph)
+{
+       struct ip_vs_dest *dest, *h = NULL;
+       int hw = 0, w;
+
+       IP_VS_DBG(6, "ip_vs_ovf_schedule(): Scheduling...\n");
+       /* select the node with highest weight, go to next in line if active
+       * connections exceed weight
+       */
+       list_for_each_entry_rcu(dest, &svc->destinations, n_list) {
+               w = atomic_read(&dest->weight);
+               if ((dest->flags & IP_VS_DEST_F_OVERLOAD) ||
+                   atomic_read(&dest->activeconns) > w ||
+                   w == 0)
+                       continue;
+               if (!h || w > hw) {
+                       h = dest;
+                       hw = w;
+               }
+       }
+
+       if (h) {
+               IP_VS_DBG_BUF(6, "OVF: server %s:%u active %d w %d\n",
+                             IP_VS_DBG_ADDR(h->af, &h->addr),
+                             ntohs(h->port),
+                             atomic_read(&h->activeconns),
+                             atomic_read(&h->weight));
+               return h;
+       }
+
+       ip_vs_scheduler_err(svc, "no destination available");
+       return NULL;
+}
+
+static struct ip_vs_scheduler ip_vs_ovf_scheduler = {
+       .name =                 "ovf",
+       .refcnt =               ATOMIC_INIT(0),
+       .module =               THIS_MODULE,
+       .n_list =               LIST_HEAD_INIT(ip_vs_ovf_scheduler.n_list),
+       .schedule =             ip_vs_ovf_schedule,
+};
+
+static int __init ip_vs_ovf_init(void)
+{
+       return register_ip_vs_scheduler(&ip_vs_ovf_scheduler);
+}
+
+static void __exit ip_vs_ovf_cleanup(void)
+{
+       unregister_ip_vs_scheduler(&ip_vs_ovf_scheduler);
+       synchronize_rcu();
+}
+
+module_init(ip_vs_ovf_init);
+module_exit(ip_vs_ovf_cleanup);
+MODULE_LICENSE("GPL");
-- 
1.7.10.4

On Thu, Jul 16, 2015 at 9:27 PM, Julian Anastasov <ja@xxxxxx> wrote:
>
>         Hello,
>
> On Thu, 16 Jul 2015, Raducu Deaconu wrote:
>
>> Hello,
>>
>> I have readjusted the patch, sorry for the code style issues, this is
>> my first attempt at a contribution.
>
>         No worries. But one new problem...
>
>> +               if (h) {
>> +                       IP_VS_DBG_BUF(6, "OVF: server %s:%u active %d w 
>> %d\n",
>> +                                     IP_VS_DBG_ADDR(svc->af, &h->addr),
>
>         Indentation of this 'if' block should be fixed.
> It was correct in first patch. Also, change svc->af to
> h->af in above line.
>
>> +                                     ntohs(h->port),
>> +                                     atomic_read(&h->activeconns),
>> +                                     atomic_read(&h->weight));
>> +               return h;
>> +               }
>
> Regards
>
> --
> Julian Anastasov <ja@xxxxxx>

Attachment: addovf.patch
Description: Text Data

<Prev in Thread] Current Thread [Next in Thread>