本文档的Copyleft归yfydz所有,使用GPL发布,可以自由拷贝,转载,转载时请保持文档的完整性,严禁用于任何商业用途。
msn:
来源:
http://yfydz.cublog.cn
13. IPVS控制
IPVS控制包括定义IPVS提供的虚拟服务参数和实际的目的服务器等各种参数。
IPVS的控制信息是通过setsockopt系统调用传递到内核的,IPVS在用户层的管理工具是ipvsadm。
关于IPVS控制代码在net/ipv4/ipvs/ip_vs_ctl.c中。
13.1 登记sockopt操作
借用netfilter的struct nf_sockopt_ops结构来添加:
static struct nf_sockopt_ops ip_vs_sockopts = {
.pf = PF_INET,
.set_optmin = IP_VS_BASE_CTL,
.set_optmax = IP_VS_SO_SET_MAX+1,
.set = do_ip_vs_set_ctl,
.get_optmin = IP_VS_BASE_CTL,
.get_optmax = IP_VS_SO_GET_MAX+1,
.get = do_ip_vs_get_ctl,
};
ret = nf_register_sockopt(&ip_vs_sockopts);
13.2 写控制
static int
do_ip_vs_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len)
{
int ret;
unsigned char arg[MAX_ARG_LEN];
struct ip_vs_service_user *usvc;
struct ip_vs_service *svc;
struct ip_vs_dest_user *udest;
// 当前进程有网络管理权限
if (!capable(CAP_NET_ADMIN))
return -EPERM;
// 检查从用户空间传过来的数据长度是否正确
if (len != set_arglen[SET_CMDID(cmd)]) {
IP_VS_ERR("set_ctl: len %u != %u\n",
len, set_arglen[SET_CMDID(cmd)]);
return -EINVAL;
}
// 将数据从用户空间拷贝到内核空间
if (copy_from_user(arg, user, len) != 0)
return -EFAULT;
/* increase the module use count */
// 增加IPVS模块引用计算
ip_vs_use_count_inc();
// 锁可中断
if (mutex_lock_interruptible(&__ip_vs_mutex)) {
ret = -ERESTARTSYS;
goto out_dec;
}
if (cmd == IP_VS_SO_SET_FLUSH) {
/* Flush the virtual service */
// 删除所有虚拟服务
ret = ip_vs_flush();
goto out_unlock;
} else if (cmd == IP_VS_SO_SET_TIMEOUT) {
/* Set timeout values for (tcp tcpfin udp) */
// 设置超时时间
ret = ip_vs_set_timeout((struct ip_vs_timeout_user *)arg);
goto out_unlock;
} else if (cmd == IP_VS_SO_SET_STARTDAEMON) {
// 启动IPVS同步进程
struct ip_vs_daemon_user *dm = (struct ip_vs_daemon_user *)arg;
ret = start_sync_thread(dm->state, dm->mcast_ifn, dm->syncid);
goto out_unlock;
} else if (cmd == IP_VS_SO_SET_STOPDAEMON) {
// 停止IPVS同步进程
struct ip_vs_daemon_user *dm = (struct ip_vs_daemon_user *)arg;
ret = stop_sync_thread(dm->state);
goto out_unlock;
}
usvc = (struct ip_vs_service_user *)arg;
udest = (struct ip_vs_dest_user *)(usvc + 1);
if (cmd == IP_VS_SO_SET_ZERO) {
// 所有计数器清零
/* if no service address is set, zero counters in all */
if (!usvc->fwmark && !usvc->addr && !usvc->port) {
ret = ip_vs_zero_all();
goto out_unlock;
}
}
/* Check for valid protocol: TCP or UDP, even for fwmark!=0 */
// 检查协议是否正确,必须是TCP或UDP
if (usvc->protocol!=IPPROTO_TCP && usvc->protocol!=IPPROTO_UDP) {
IP_VS_ERR("set_ctl: invalid protocol: %d %d.%d.%d.%d:%d %s\n",
usvc->protocol, NIPQUAD(usvc->addr),
ntohs(usvc->port), usvc->sched_name);
ret = -EFAULT;
goto out_unlock;
}
/* Lookup the exact service by
or fwmark */
// 获取IPVS服务
if (usvc->fwmark == 0)
svc = __ip_vs_service_get(usvc->protocol,
usvc->addr, usvc->port);
else
svc = __ip_vs_svc_fwm_get(usvc->fwmark);
// 不是添加命令的话,服务不能为空,或者协议不匹配
if (cmd != IP_VS_SO_SET_ADD
&& (svc == NULL || svc->protocol != usvc->protocol)) {
ret = -ESRCH;
goto out_unlock;
}
switch (cmd) {
case IP_VS_SO_SET_ADD:
// 添加服务
if (svc != NULL)
ret = -EEXIST;
else
ret = ip_vs_add_service(usvc, &svc);
break;
case IP_VS_SO_SET_EDIT:
// 修改服务参数
ret = ip_vs_edit_service(svc, usvc);
break;
case IP_VS_SO_SET_DEL:
// 删除服务
ret = ip_vs_del_service(svc);
if (!ret)
goto out_unlock;
break;
case IP_VS_SO_SET_ZERO:
// 服务数据清零
ret = ip_vs_zero_service(svc);
break;
case IP_VS_SO_SET_ADDDEST:
// 添加目的服务器
ret = ip_vs_add_dest(svc, udest);
break;
case IP_VS_SO_SET_EDITDEST:
// 修改目的服务器参数
ret = ip_vs_edit_dest(svc, udest);
break;
case IP_VS_SO_SET_DELDEST:
// 删除目的服务器
ret = ip_vs_del_dest(svc, udest);
break;
default:
ret = -EINVAL;
}
// 减少服务的引用计数
if (svc)
ip_vs_service_put(svc);
out_unlock:
mutex_unlock(&__ip_vs_mutex);
out_dec:
/* decrease the module use count */
// 减少IPVS模块引用计数
ip_vs_use_count_dec();
return ret;
}
13.3 读控制
static int
do_ip_vs_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
{
unsigned char arg[128];
int ret = 0;
// 网络ADMIN权限
if (!capable(CAP_NET_ADMIN))
return -EPERM;
// 检查传过来的数据长度是否合法
if (*len < get_arglen[GET_CMDID(cmd)]) {
IP_VS_ERR("get_ctl: len %u < %u\n",
*len, get_arglen[GET_CMDID(cmd)]);
return -EINVAL;
}
// 从用户空间拷贝数据
if (copy_from_user(arg, user, get_arglen[GET_CMDID(cmd)]) != 0)
return -EFAULT;
if (mutex_lock_interruptible(&__ip_vs_mutex))
return -ERESTARTSYS;
switch (cmd) {
case IP_VS_SO_GET_VERSION:
{
// 读取IPVS版本信息
char buf[64];
sprintf(buf, "IP Virtual Server version %d.%d.%d (size=%d)",
NVERSION(IP_VS_VERSION_CODE), IP_VS_CONN_TAB_SIZE);
if (copy_to_user(user, buf, strlen(buf)+1) != 0) {
ret = -EFAULT;
goto out;
}
*len = strlen(buf)+1;
}
break;
case IP_VS_SO_GET_INFO:
{
// IPVS基本信息:版本号,连接HASH数,服务数
struct ip_vs_getinfo info;
info.version = IP_VS_VERSION_CODE;
info.size = IP_VS_CONN_TAB_SIZE;
info.num_services = ip_vs_num_services;
if (copy_to_user(user, &info, sizeof(info)) != 0)
ret = -EFAULT;
}
break;
case IP_VS_SO_GET_SERVICES:
{
// 获取IPVS服务表
struct ip_vs_get_services *get;
int size;
get = (struct ip_vs_get_services *)arg;
size = sizeof(*get) +
sizeof(struct ip_vs_service_entry) * get->num_services;
if (*len != size) {
IP_VS_ERR("length: %u != %u\n", *len, size);
ret = -EINVAL;
goto out;
}
ret = __ip_vs_get_service_entries(get, user);
}
break;
case IP_VS_SO_GET_SERVICE:
{
// 获取IPVS服务
struct ip_vs_service_entry *entry;
struct ip_vs_service *svc;
entry = (struct ip_vs_service_entry *)arg;
// 查找IPVS服务
if (entry->fwmark)
svc = __ip_vs_svc_fwm_get(entry->fwmark);
else
svc = __ip_vs_service_get(entry->protocol,
entry->addr, entry->port);
if (svc) {
// 将服务信息拷贝到服务项结构中
ip_vs_copy_service(entry, svc);
// 传递到用户空间
if (copy_to_user(user, entry, sizeof(*entry)) != 0)
ret = -EFAULT;
ip_vs_service_put(svc);
} else
ret = -ESRCH;
}
break;
case IP_VS_SO_GET_DESTS:
{
// 获取目的服务器表
struct ip_vs_get_dests *get;
int size;
get = (struct ip_vs_get_dests *)arg;
size = sizeof(*get) +
sizeof(struct ip_vs_dest_entry) * get->num_dests;
if (*len != size) {
IP_VS_ERR("length: %u != %u\n", *len, size);
ret = -EINVAL;
goto out;
}
ret = __ip_vs_get_dest_entries(get, user);
}
break;
case IP_VS_SO_GET_TIMEOUT:
{
// 获取超时时间
struct ip_vs_timeout_user t;
__ip_vs_get_timeouts(&t);
if (copy_to_user(user, &t, sizeof(t)) != 0)
ret = -EFAULT;
}
break;
case IP_VS_SO_GET_DAEMON:
{
// 获取同步进程信息:状态,同步通信网卡,同步ID
struct ip_vs_daemon_user d[2];
memset(&d, 0, sizeof(d));
if (ip_vs_sync_state & IP_VS_STATE_MASTER) {
d[0].state = IP_VS_STATE_MASTER;
strlcpy(d[0].mcast_ifn, ip_vs_master_mcast_ifn, sizeof(d[0].mcast_ifn));
d[0].syncid = ip_vs_master_syncid;
}
if (ip_vs_sync_state & IP_VS_STATE_BACKUP) {
d[1].state = IP_VS_STATE_BACKUP;
strlcpy(d[1].mcast_ifn, ip_vs_backup_mcast_ifn, sizeof(d[1].mcast_ifn));
d[1].syncid = ip_vs_backup_syncid;
}
if (copy_to_user(user, &d, sizeof(d)) != 0)
ret = -EFAULT;
}
break;
default:
ret = -EINVAL;
}
out:
mutex_unlock(&__ip_vs_mutex);
return ret;
}
13.2 服务管理
IPVS服务是IPVS均衡器对外提供的地址和端口信息,由结构struct ip_vs_serivce描述,一般情况下此结构的具体对象不多,由具体对外提供的服务数决定。
服务参数是通过ipvsadm命令添加的,命令为:
ipvsadm -A -t/u v_srv_ip:vport -s scheduler
13.2.1 添加服务
/*
* Add a service into the service hash table
*/
static int
ip_vs_add_service(struct ip_vs_service_user *u, struct ip_vs_service **svc_p)
{
int ret = 0;
struct ip_vs_scheduler *sched = NULL;
struct ip_vs_service *svc = NULL;
/* increase the module use count */
// IPVS模块计数增加,因为要有新服务使用IPVS了
ip_vs_use_count_inc();
/* Lookup the scheduler by 'u->sched_name' */
// 根据名称查找调度算法结构
sched = ip_vs_scheduler_get(u->sched_name);
if (sched == NULL) {
IP_VS_INFO("Scheduler module ip_vs_%s not found\n",
u->sched_name);
ret = -ENOENT;
goto out_mod_dec;
}
// 分配服务内存空间
svc = (struct ip_vs_service *)
kmalloc(sizeof(struct ip_vs_service), GFP_ATOMIC);
if (svc == NULL) {
IP_VS_DBG(1, "ip_vs_add_service: kmalloc failed.\n");
ret = -ENOMEM;
goto out_err;
}
memset(svc, 0, sizeof(struct ip_vs_service));
/* I'm the first user of the service */
// 初始值: 使用数1, 引用数0
atomic_set(&svc->usecnt, 1);
atomic_set(&svc->refcnt, 0);
// 根据用户空间传递过来的参数初始化服务参数
svc->protocol = u->protocol;
svc->addr = u->addr;
svc->port = u->port;
svc->fwmark = u->fwmark;
svc->flags = u->flags;
svc->timeout = u->timeout * HZ;
svc->netmask = u->netmask;
// 初始化服务结构中的内核相关参数
// 目的服务器链表初始化
INIT_LIST_HEAD(&svc->destinations);
// 服务锁
rwlock_init(&svc->sched_lock);
// 服务统计锁
spin_lock_init(&svc->stats.lock);
/* Bind the scheduler */
// 将服务和调度算法绑定
ret = ip_vs_bind_scheduler(svc, sched);
if (ret)
goto out_err;
// sched置空防止异常释放
sched = NULL;
/* Update the virtual service counters */
// 端口是FTP或0时增加相关计数器,这里应该象netfilter那样建立一个服务helper,
// 根据端口自动查找对应的helper,统一处理,就不用在程序中明确指定特殊服务端口号了
if (svc->port == FTPPORT)
atomic_inc(&ip_vs_ftpsvc_counter);
else if (svc->port == 0)
atomic_inc(&ip_vs_nullsvc_counter);
// 建立服务状态预估器,用于根据服务的连接数,包数等调整均衡策略
ip_vs_new_estimator(&svc->stats);
// IPVS服务数增加
ip_vs_num_services++;
/* Hash the service into the service table */
// 将服务挂接到服务HASH表中,
write_lock_bh(&__ip_vs_svc_lock);
ip_vs_svc_hash(svc);
write_unlock_bh(&__ip_vs_svc_lock);
// 返回的新服务指针
*svc_p = svc;
// 函数状态为成功
return 0;
out_err:
// 错误时的清除恢复
if (svc != NULL) {
// 解除服务与调度算法的绑定
if (svc->scheduler)
ip_vs_unbind_scheduler(svc);
if (svc->inc) {
// 删除服务实例
local_bh_disable();
ip_vs_app_inc_put(svc->inc);
local_bh_enable();
}
kfree(svc);
}
// 减少调度算法引用计数
ip_vs_scheduler_put(sched);
out_mod_dec:
/* decrease the module use count */
// 减少IPVS引用计数
ip_vs_use_count_dec();
return ret;
}
13.2.2 修改服务参数
/*
* Edit a service and bind it with a new scheduler
*/
static int
ip_vs_edit_service(struct ip_vs_service *svc, struct ip_vs_service_user *u)
{
struct ip_vs_scheduler *sched, *old_sched;
int ret = 0;
/*
* Lookup the scheduler, by 'u->sched_name'
*/
// 根据调度名查找调度结构
sched = ip_vs_scheduler_get(u->sched_name);
if (sched == NULL) {
IP_VS_INFO("Scheduler module ip_vs_%s not found\n",
u->sched_name);
return -ENOENT;
}
// 这里备份一下有必要么?好象没必要
old_sched = sched;
write_lock_bh(&__ip_vs_svc_lock);
/*
* Wait until all other svc users go away.
*/
// 要没人使用该服务时才能进行修改
IP_VS_WAIT_WHILE(atomic_read(&svc->usecnt) > 1);
/*
* Set the flags and timeout value
*/
// HASHED标识是内核里加上的,用户层不可能知道
svc->flags = u->flags | IP_VS_SVC_F_HASHED;
svc->timeout = u->timeout * HZ;
svc->netmask = u->netmask;
// old_sched值还没用就重新修改了,前面那个备份应该没必要
old_sched = svc->scheduler;
if (sched != old_sched) {
// 新调度算法和老调度算法不同,释放与老算法的绑定,绑定新算法
/*
* Unbind the old scheduler
*/
// 解除与原调度算法的绑定
if ((ret = ip_vs_unbind_scheduler(svc))) {
old_sched = sched;
goto out;
}
/*
* Bind the new scheduler
*/
// 绑定新算法
if ((ret = ip_vs_bind_scheduler(svc, sched))) {
/*
* If ip_vs_bind_scheduler fails, restore the old
* scheduler.
* The main reason of failure is out of memory.
*
* The question is if the old scheduler can be
* restored all the time. TODO: if it cannot be
* restored some time, we must delete the service,
* otherwise the system may crash.
*/
// 绑定失败的话恢复原来的调度算法
ip_vs_bind_scheduler(svc, old_sched);
old_sched = sched;
goto out;
}
}
out:
write_unlock_bh(&__ip_vs_svc_lock);
if (old_sched)
ip_vs_scheduler_put(old_sched);
return ret;
}
13.2.3 删除服务
/*
* Delete a service from the service list
*/
static int ip_vs_del_service(struct ip_vs_service *svc)
{
// 提供的服务指针为空是返回的是对象存在错误
if (svc == NULL)
return -EEXIST;
/*
* Unhash it from the service table
*/
write_lock_bh(&__ip_vs_svc_lock);
// 将服务从服务HASH表中拆除
ip_vs_svc_unhash(svc);
/*
* Wait until all the svc users go away.
*/
// 等待没有服务的使用者
IP_VS_WAIT_WHILE(atomic_read(&svc->usecnt) > 1);
// 删除服务结构本身
__ip_vs_del_service(svc);
write_unlock_bh(&__ip_vs_svc_lock);
return 0;
}
/*
* Delete a service from the service list
* - The service must be unlinked, unlocked and not referenced!
* - We are called under _bh lock
*/
static void __ip_vs_del_service(struct ip_vs_service *svc)
{
struct ip_vs_dest *dest, *nxt;
struct ip_vs_scheduler *old_sched;
// 减少IPVS服务数
ip_vs_num_services--;
// 删除相关预估器
ip_vs_kill_estimator(&svc->stats);
/* Unbind scheduler */
// 备份一下调度结构
old_sched = svc->scheduler;
ip_vs_unbind_scheduler(svc);
// 不论拆除调度绑定操作是否成功,减少调度引用计数
if (old_sched)
ip_vs_scheduler_put(old_sched);
/* Unbind app inc */
if (svc->inc) {
// 拆除应用实例
ip_vs_app_inc_put(svc->inc);
svc->inc = NULL;
}
/*
* Unlink the whole destination list
*/
// 删除服务使用的所有目的服务器结构
list_for_each_entry_safe(dest, nxt, &svc->destinations, n_list) {
__ip_vs_unlink_dest(svc, dest, 0);
__ip_vs_del_dest(dest);
}
/*
* Update the virtual service counters
*/
// 减少特殊端口的计数
if (svc->port == FTPPORT)
atomic_dec(&ip_vs_ftpsvc_counter);
else if (svc->port == 0)
atomic_dec(&ip_vs_nullsvc_counter);
/*
* Free the service if nobody refers to it
*/
// 引用为0时释放具体结构内存
if (atomic_read(&svc->refcnt) == 0)
kfree(svc);
/* decrease the module use count */
// 减少IPVS模块引用计数
ip_vs_use_count_dec();
}
13.2.4 删除所有服务
/*
* Flush all the virtual services
*/
static int ip_vs_flush(void)
{
int idx;
struct ip_vs_service *svc, *nxt;
/*
* Flush the service table hashed by
*/
// 循环所有根据协议地址端口的HASH表
for(idx = 0; idx < IP_VS_SVC_TAB_SIZE; idx++) {
// 循环每个链表
list_for_each_entry_safe(svc, nxt, &ip_vs_svc_table[idx], s_list) {
write_lock_bh(&__ip_vs_svc_lock);
ip_vs_svc_unhash(svc);
/*
* Wait until all the svc users go away.
*/
// 注意这里使用计数必须要为0
IP_VS_WAIT_WHILE(atomic_read(&svc->usecnt) > 0);
// 删除服务
__ip_vs_del_service(svc);
write_unlock_bh(&__ip_vs_svc_lock);
}
}
/*
* Flush the service table hashed by fwmark
*/
// 循环所有根据fwmark的HASH表
// 以上面相同的方式删除服务
for(idx = 0; idx < IP_VS_SVC_TAB_SIZE; idx++) {
list_for_each_entry_safe(svc, nxt,
&ip_vs_svc_fwm_table[idx], f_list) {
write_lock_bh(&__ip_vs_svc_lock);
ip_vs_svc_unhash(svc);
/*
* Wait until all the svc users go away.
*/
IP_VS_WAIT_WHILE(atomic_read(&svc->usecnt) > 0);
__ip_vs_del_service(svc);
write_unlock_bh(&__ip_vs_svc_lock);
}
}
return 0;
}
13.2.5 清空服务计数器
/*
* Zero counters in a service or all services
*/
// 清空一个服务结构的统计值
static int ip_vs_zero_service(struct ip_vs_service *svc)
{
struct ip_vs_dest *dest;
write_lock_bh(&__ip_vs_svc_lock);
// 循环服务的所有目的服务器链表
list_for_each_entry(dest, &svc->destinations, n_list) {
// 清空目的服务器的统计值
ip_vs_zero_stats(&dest->stats);
}
// 清空服务本身的统计值
ip_vs_zero_stats(&svc->stats);
write_unlock_bh(&__ip_vs_svc_lock);
return 0;
}
// 清空所有服务的统计值
static int ip_vs_zero_all(void)
{
int idx;
struct ip_vs_service *svc;
// 分别对两个HASH表中的全部服务清空统计值
for(idx = 0; idx < IP_VS_SVC_TAB_SIZE; idx++) {
list_for_each_entry(svc, &ip_vs_svc_table[idx], s_list) {
ip_vs_zero_service(svc);
}
}
for(idx = 0; idx < IP_VS_SVC_TAB_SIZE; idx++) {
list_for_each_entry(svc, &ip_vs_svc_fwm_table[idx], f_list) {
ip_vs_zero_service(svc);
}
}
ip_vs_zero_stats(&ip_vs_stats);
return 0;
}
13.3 目的服务器管理
目的服务器是实际提供对外服务的服务器,由结构struct ip_vs_dest描述,该结构的具体对象可以很多,每个结构描述一个具体的服务器,
目的服务器参数是通过ipvsadm命令添加的,命令为:
ipvsadm -a -t/u v_srv_ip:vport -r dest_ip:dest_port -w weight
13.3.1 添加服务器
/*
* Add a destination into an existing service
*/
static int
ip_vs_add_dest(struct ip_vs_service *svc, struct ip_vs_dest_user *udest)
{
struct ip_vs_dest *dest;
__u32 daddr = udest->addr;
__u16 dport = udest->port;
int ret;
EnterFunction(2);
// 每个服务器权重值不能小于0
if (udest->weight < 0) {
IP_VS_ERR("ip_vs_add_dest(): server weight less than zero\n");
return -ERANGE;
}
// 阈值上限要不能小于下限
if (udest->l_threshold > udest->u_threshold) {
IP_VS_ERR("ip_vs_add_dest(): lower threshold is higher than "
"upper threshold\n");
return -ERANGE;
}
/*
* Check if the dest already exists in the list
*/
// 根据目的地址和目的端口查找服务器是否已经存在
dest = ip_vs_lookup_dest(svc, daddr, dport);
if (dest != NULL) {
// 服务器已经存在了
IP_VS_DBG(1, "ip_vs_add_dest(): dest already exists\n");
return -EEXIST;
}
/*
* Check if the dest already exists in the trash and
* is from the same service
*/
// 在准备释放的垃圾内存中检查是否有目的服务器存在
dest = ip_vs_trash_get_dest(svc, daddr, dport);
if (dest != NULL) {
IP_VS_DBG(3, "Get destination %u.%u.%u.%u:%u from trash, "
"dest->refcnt=%d, service %u/%u.%u.%u.%u:%u\n",
NIPQUAD(daddr), ntohs(dport),
atomic_read(&dest->refcnt),
dest->vfwmark,
NIPQUAD(dest->vaddr),
ntohs(dest->vport));
// 找到目的服务器,恢复目的服务器的使用
// 更新目的服务器
__ip_vs_update_dest(svc, dest, udest);
/*
* Get the destination from the trash
*/
// 从垃圾链表中拆除
list_del(&dest->n_list);
// 新建目的服务器的预估器
ip_vs_new_estimator(&dest->stats);
write_lock_bh(&__ip_vs_svc_lock);
/*
* Wait until all other svc users go away.
*/
IP_VS_WAIT_WHILE(atomic_read(&svc->usecnt) > 1);
// 添加到服务的目的服务器链表中
list_add(&dest->n_list, &svc->destinations);
svc->num_dests++;
/* call the update_service function of its scheduler */
// 更新调度算法的调度处理,
svc->scheduler->update_service(svc);
write_unlock_bh(&__ip_vs_svc_lock);
return 0;
}
/*
* Allocate and initialize the dest structure
*/
// 目的服务器不存在,重新分配内存建立一个
ret = ip_vs_new_dest(svc, udest, &dest);
if (ret) {
return ret;
}
/*
* Add the dest entry into the list
*/
// 增加目的服务器引用计数
atomic_inc(&dest->refcnt);
write_lock_bh(&__ip_vs_svc_lock);
/*
* Wait until all other svc users go away.
*/
// 没人在使用服务时才能添加目的服务器
IP_VS_WAIT_WHILE(atomic_read(&svc->usecnt) > 1);
// 将目的服务器添加到服务的服务器链表
list_add(&dest->n_list, &svc->destinations);
svc->num_dests++;
/* call the update_service function of its scheduler */
// 更新调度算法的调度处理,
svc->scheduler->update_service(svc);
write_unlock_bh(&__ip_vs_svc_lock);
LeaveFunction(2);
return 0;
}
/*
* Create a destination for the given service
*/
// 新分配目的服务器结构
static int
ip_vs_new_dest(struct ip_vs_service *svc, struct ip_vs_dest_user *udest,
struct ip_vs_dest **dest_p)
{
struct ip_vs_dest *dest;
unsigned atype;
EnterFunction(2);
// 检查一下目的IP地址类型,不能是本地127.0.0.1和多播广播地址
atype = inet_addr_type(udest->addr);
if (atype != RTN_LOCAL && atype != RTN_UNICAST)
return -EINVAL;
// 分配目的服务器空间
dest = kmalloc(sizeof(struct ip_vs_dest), GFP_ATOMIC);
if (dest == NULL) {
IP_VS_ERR("ip_vs_new_dest: kmalloc failed.\n");
return -ENOMEM;
}
memset(dest, 0, sizeof(struct ip_vs_dest));
// 填充结构中服务信息
dest->protocol = svc->protocol;
dest->vaddr = svc->addr;
dest->vport = svc->port;
dest->vfwmark = svc->fwmark;
dest->addr = udest->addr;
dest->port = udest->port;
// 初始化各种统计
// 活动连接数
atomic_set(&dest->activeconns, 0);
// 不活动连接数
atomic_set(&dest->inactconns, 0);
// 固定连接数(主连接)
atomic_set(&dest->persistconns, 0);
//引用计数
atomic_set(&dest->refcnt, 0);
// 初始化各种锁
INIT_LIST_HEAD(&dest->d_list);
spin_lock_init(&dest->dst_lock);
spin_lock_init(&dest->stats.lock);
// 更新目的服务器
__ip_vs_update_dest(svc, dest, udest);
// 新建预估器
ip_vs_new_estimator(&dest->stats);
//返回值
*dest_p = dest;
LeaveFunction(2);
return 0;
}
13.3.2 修改服务器参数
/*
* Edit a destination in the given service
*/
static int
ip_vs_edit_dest(struct ip_vs_service *svc, struct ip_vs_dest_user *udest)
{
struct ip_vs_dest *dest;
__u32 daddr = udest->addr;
__u16 dport = udest->port;
EnterFunction(2);
// 权重不能小于0
if (udest->weight < 0) {
IP_VS_ERR("ip_vs_edit_dest(): server weight less than zero\n");
return -ERANGE;
}
//阈值上限不能小于下限
if (udest->l_threshold > udest->u_threshold) {
IP_VS_ERR("ip_vs_edit_dest(): lower threshold is higher than "
"upper threshold\n");
return -ERANGE;
}
/*
* Lookup the destination list
*/
// 查找目的服务器
dest = ip_vs_lookup_dest(svc, daddr, dport);
if (dest == NULL) {
IP_VS_DBG(1, "ip_vs_edit_dest(): dest doesn't exist\n");
return -ENOENT;
}
// 更新目的服务器信息
__ip_vs_update_dest(svc, dest, udest);
write_lock_bh(&__ip_vs_svc_lock);
/* Wait until all other svc users go away */
// 等待没其他人使用服务,这个死循环比较恐怖
while (atomic_read(&svc->usecnt) > 1) {};
/* call the update_service, because server weight may be changed */
// 更新调度算法处理
svc->scheduler->update_service(svc);
write_unlock_bh(&__ip_vs_svc_lock);
LeaveFunction(2);
return 0;
}
13.3.3 删除服务器
/*
* Delete a destination server in the given service
*/
static int
ip_vs_del_dest(struct ip_vs_service *svc,struct ip_vs_dest_user *udest)
{
struct ip_vs_dest *dest;
__u32 daddr = udest->addr;
__u16 dport = udest->port;
EnterFunction(2);
// 查找目的服务器
dest = ip_vs_lookup_dest(svc, daddr, dport);
if (dest == NULL) {
IP_VS_DBG(1, "ip_vs_del_dest(): destination not found!\n");
return -ENOENT;
}
write_lock_bh(&__ip_vs_svc_lock);
/*
* Wait until all other svc users go away.
*/
// 等待没人使用相应服务
IP_VS_WAIT_WHILE(atomic_read(&svc->usecnt) > 1);
/*
* Unlink dest from the service
*/
// 从服务链表中将目的服务器拆除
__ip_vs_unlink_dest(svc, dest, 1);
write_unlock_bh(&__ip_vs_svc_lock);
/*
* Delete the destination
*/
// 释放目的服务器
__ip_vs_del_dest(dest);
LeaveFunction(2);
return 0;
}
/*
* Delete a destination (must be already unlinked from the service)
*/
static void __ip_vs_del_dest(struct ip_vs_dest *dest)
{
// 删除预估器
ip_vs_kill_estimator(&dest->stats);
/*
* Remove it from the d-linked list with the real services.
*/
write_lock_bh(&__ip_vs_rs_lock);
// 将目的服务器从HASH表中拆除
ip_vs_rs_unhash(dest);
write_unlock_bh(&__ip_vs_rs_lock);
/*
* Decrease the refcnt of the dest, and free the dest
* if nobody refers to it (refcnt=0). Otherwise, throw
* the destination into the trash.
*/
if (atomic_dec_and_test(&dest->refcnt)) {
// 引用数已经为0
// 释放目的路由cache
ip_vs_dst_reset(dest);
/* simply decrease svc->refcnt here, let the caller check
and release the service if nobody refers to it.
Only user context can release destination and service,
and only one user context can update virtual service at a
time, so the operation here is OK */
// 减少服务的引用数
atomic_dec(&dest->svc->refcnt);
// 释放目的服务器结构
kfree(dest);
} else {
// 引用数不为0,还有人在用,将结构添加到目的服务器的垃圾链表
IP_VS_DBG(3, "Moving dest %u.%u.%u.%u:%u into trash, "
"dest->refcnt=%d\n",
NIPQUAD(dest->addr), ntohs(dest->port),
atomic_read(&dest->refcnt));
list_add(&dest->n_list, &ip_vs_dest_trash);
atomic_inc(&dest->refcnt);
}
}
/*
* Unlink a destination from the given service
*/
// 从目的服务器从服务的链表中拆除
static void __ip_vs_unlink_dest(struct ip_vs_service *svc,
struct ip_vs_dest *dest,
int svcupd)
{
// 目的服务器删除可用标志
dest->flags &= ~IP_VS_DEST_F_AVAILABLE;
/*
* Remove it from the d-linked destination list.
*/
// 从服务的链表中拆除
list_del(&dest->n_list);
// 服务的目的服务器数减
svc->num_dests--;
if (svcupd) {
/*
* Call the update_service function of its scheduler
*/
// 更新调度算法
svc->scheduler->update_service(svc);
}
}
13.4 防御级别调整
IPVS有个定时函数周期性根据当前系统配置和性能状况调整IPVS的防御级别:
/*
* Timer for checking the defense
*/
// 调整周期1秒
#define DEFENSE_TIMER_PERIOD 1*HZ
static void defense_work_handler(void *data);
static DECLARE_WORK(defense_work, defense_work_handler, NULL);
// 定时处理函数
static void defense_work_handler(void *data)
{
// 更新防御级别
update_defense_level();
if (atomic_read(&ip_vs_dropentry))
ip_vs_random_dropentry();
// 重新定时调用
schedule_delayed_work(&defense_work, DEFENSE_TIMER_PERIOD);
}
/*
* update_defense_level is called from keventd and from sysctl,
* so it needs to protect itself from softirqs
*/
// 更新IPVS的防御级别,需要用到/proc下定义的一些控制参数
static void update_defense_level(void)
{
struct sysinfo i;
static int old_secure_tcp = 0;
int availmem;
int nomem;
int to_change = -1;
/* we only count free and buffered memory (in pages) */
// 计算可用的内存量
si_meminfo(&i);
availmem = i.freeram + i.bufferram;
/* however in linux 2.5 the i.bufferram is total page cache size,
we need adjust it */
/* si_swapinfo(&i); */
/* availmem = availmem - (i.totalswap - i.freeswap); */
// 根据设置的内存阈值判断现在是否算内存不足
nomem = (availmem < sysctl_ip_vs_amemthresh);
local_bh_disable();
/* drop_entry */
spin_lock(&__ip_vs_dropentry_lock);
// 删除连接策略
switch (sysctl_ip_vs_drop_entry) {
case 0:
// 不丢包
atomic_set(&ip_vs_dropentry, 0);
break;
case 1:
if (nomem) {
// 内存不足时允许丢
atomic_set(&ip_vs_dropentry, 1);
sysctl_ip_vs_drop_entry = 2;
} else {
// 内存足时不用丢
atomic_set(&ip_vs_dropentry, 0);
}
break;
case 2:
if (nomem) {
// 内存不足时允许丢
atomic_set(&ip_vs_dropentry, 1);
} else {
// 内存足时不用丢
atomic_set(&ip_vs_dropentry, 0);
sysctl_ip_vs_drop_entry = 1;
};
break;
case 3:
// 允许丢
atomic_set(&ip_vs_dropentry, 1);
break;
}
spin_unlock(&__ip_vs_dropentry_lock);
/* drop_packet */
// 丢包率策略
spin_lock(&__ip_vs_droppacket_lock);
switch (sysctl_ip_vs_drop_packet) {
case 0:
// 不用丢
ip_vs_drop_rate = 0;
break;
case 1:
if (nomem) {
// 没内存时根据当前内存情况计算丢包率
ip_vs_drop_rate = ip_vs_drop_counter
= sysctl_ip_vs_amemthresh /
(sysctl_ip_vs_amemthresh-availmem);
// 更改丢包策略
sysctl_ip_vs_drop_packet = 2;
} else {
// 不用丢
ip_vs_drop_rate = 0;
}
break;
case 2:
if (nomem) {
// 没内存时根据当前内存情况计算丢包率
ip_vs_drop_rate = ip_vs_drop_counter
= sysctl_ip_vs_amemthresh /
(sysctl_ip_vs_amemthresh-availmem);
} else {
// 不用丢
ip_vs_drop_rate = 0;
sysctl_ip_vs_drop_packet = 1;
}
break;
case 3:
// 丢包率设为用户设置的丢包率
ip_vs_drop_rate = sysctl_ip_vs_am_droprate;
break;
}
spin_unlock(&__ip_vs_droppacket_lock);
/* secure_tcp */
// TCP加固处理
write_lock(&__ip_vs_securetcp_lock);
// 安全TCP参数
switch (sysctl_ip_vs_secure_tcp) {
case 0:
// 原模式是安全模式,不改
if (old_secure_tcp >= 2)
to_change = 0;
break;
case 1:
if (nomem) {
// 原模式非安全模式,改变
if (old_secure_tcp < 2)
to_change = 1;
sysctl_ip_vs_secure_tcp = 2;
} else {
// 原模式是安全模式,不改
if (old_secure_tcp >= 2)
to_change = 0;
}
break;
case 2:
if (nomem) {
// 原模式非安全模式,改变
if (old_secure_tcp < 2)
to_change = 1;
} else {
// 原模式是安全模式,不改
if (old_secure_tcp >= 2)
to_change = 0;
sysctl_ip_vs_secure_tcp = 1;
}
break;
case 3:
// 原模式非安全模式,改变
if (old_secure_tcp < 2)
to_change = 1;
break;
}
// 保存上次的安全TCP参数
old_secure_tcp = sysctl_ip_vs_secure_tcp;
// 需要时改变超时值
if (to_change >= 0)
ip_vs_protocol_timeout_change(sysctl_ip_vs_secure_tcp>1);
write_unlock(&__ip_vs_securetcp_lock);
local_bh_enable();
}
14. 结论
总的说,IPVS用不太长的代码量(1万多行)就完成了服务器均衡的这个任务,而且实现多种均衡算法和连接方式,可说是一个效率比较高
的模块。以后要完善的地方我觉得一是如何将IPVS连接和netfilter的连接合并处理,毕竟没必要维护两个连接表;而二是多连接协议的helper
支持,IPVS这里封装的不是很好。在NAT模式下IPVS和netfilter比较容易合并,IPVS就只体现在NAT转换时的目的地址的选取处理上即
可,和其他模块无关,而目前netfilter是HASH均衡方式来动态选择的,这样连接跟踪和NAT处理完全是由netfilter来完成的,也很好支
持了多连接协议;但其他两种模式支持起来相对麻烦点,要区别不同的状态转换表处理,连接跟踪处理需要修改。