我们在socket的第一节中看到了调用
来关闭socket.我们已经探讨过了从内核的task_struct到socket的钩子函数过程,如果你读到这里没有看到或者没有明白,请看前边章节中的read和write的分析内容。好了,我们还是直接从socket.c中的文件的钩子函数看起
static const struct file_operations socket_file_ops = { 。。。。。。 .release = sock_close, 。。。。。。 };
|
可以看到这里会转入sock_close去关闭socket
static int sock_close(struct inode *inode, struct file *filp) { /* * It was possible the inode is NULL we were * closing an unfinished socket. */
if (!inode) { printk(KERN_DEBUG "sock_close: NULL inode\n"); return 0; } sock_fasync(-1, filp, 0); sock_release(SOCKET_I(inode)); return 0; }
|
代码中先是对sock中的fasync_list队列进行处理,我们知道以前在连接和接收数据时,对方的socket可以采取“异步”的方式初始化一个fasync_struct结构挂入到这个队列中,现在既然要关闭socket了,所以要将这个队列中的fasync_struct全部释放掉
static int sock_fasync(int fd, struct file *filp, int on) { struct fasync_struct *fa, *fna = NULL, **prev; struct socket *sock; struct sock *sk;
if (on) { fna = kmalloc(sizeof(struct fasync_struct), GFP_KERNEL); if (fna == NULL) return -ENOMEM; }
sock = filp->private_data;
sk = sock->sk; if (sk == NULL) { kfree(fna); return -EINVAL; }
lock_sock(sk);
prev = &(sock->fasync_list);
for (fa = *prev; fa != NULL; prev = &fa->fa_next, fa = *prev) if (fa->fa_file == filp) break;
if (on) { if (fa != NULL) { write_lock_bh(&sk->sk_callback_lock); fa->fa_fd = fd; write_unlock_bh(&sk->sk_callback_lock);
kfree(fna); goto out; } fna->fa_file = filp; fna->fa_fd = fd; fna->magic = FASYNC_MAGIC; fna->fa_next = sock->fasync_list; write_lock_bh(&sk->sk_callback_lock); sock->fasync_list = fna; write_unlock_bh(&sk->sk_callback_lock); } else { if (fa != NULL) { write_lock_bh(&sk->sk_callback_lock); *prev = fa->fa_next; write_unlock_bh(&sk->sk_callback_lock); kfree(fa); } }
out: release_sock(sock->sk); return 0; }
|
我们注意on从上面传下的值是0,可以看到在一个for循环中,不断的从fasync_list中
if (fa != NULL) { write_lock_bh(&sk->sk_callback_lock); *prev = fa->fa_next; write_unlock_bh(&sk->sk_callback_lock); kfree(fa); }
|
函数中关键的是这段语句,这是在on为0时执行的,通过kfree()释放掉,kfree是与高速缓存相关的,我们将在内存管理章节介绍。回到sock_close()主函数中我们接着往下看,函数执行到sock_release()
void sock_release(struct socket *sock) { if (sock->ops) { struct module *owner = sock->ops->owner;
sock->ops->release(sock); sock->ops = NULL; module_put(owner); }
if (sock->fasync_list) printk(KERN_ERR "sock_release: fasync list not empty!\n");
get_cpu_var(sockets_in_use)--; put_cpu_var(sockets_in_use); if (!sock->file) { iput(SOCK_INODE(sock)); return; } sock->file = NULL; }
|
我们说过这里是针对unix的TCP和UDP连接进行分析的。所以这句sock->ops->release(sock);会执行unix中的钩子函数,我们重复了多次了,不再列出这些函数,在unix_stream_ops和unix_dgram_ops可以看到他们都挂入了同一个函数unix_release(),因此会进入这个函数执行
static int unix_release(struct socket *sock) { struct sock *sk = sock->sk;
if (!sk) return 0;
sock->sk = NULL;
return unix_release_sock (sk, 0); }
|
可以看出是到unix_release_sock中进一步运行的
static int unix_release_sock (struct sock *sk, int embrion) { struct unix_sock *u = unix_sk(sk); struct dentry *dentry; struct vfsmount *mnt; struct sock *skpair; struct sk_buff *skb; int state;
unix_remove_socket(sk);
/* Clear state */ unix_state_lock(sk); sock_orphan(sk); sk->sk_shutdown = SHUTDOWN_MASK; dentry = u->dentry; u->dentry = NULL; mnt = u->mnt; u->mnt = NULL; state = sk->sk_state; sk->sk_state = TCP_CLOSE; unix_state_unlock(sk);
wake_up_interruptible_all(&u->peer_wait);
skpair=unix_peer(sk);
if (skpair!=NULL) { if (sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_SEQPACKET) { unix_state_lock(skpair); /* No more writes */ skpair->sk_shutdown = SHUTDOWN_MASK; if (!skb_queue_empty(&sk->sk_receive_queue) || embrion) skpair->sk_err = ECONNRESET; unix_state_unlock(skpair); skpair->sk_state_change(skpair); read_lock(&skpair->sk_callback_lock); sk_wake_async(skpair, SOCK_WAKE_WAITD, POLL_HUP); read_unlock(&skpair->sk_callback_lock); } sock_put(skpair); /* It may now die */ unix_peer(sk) = NULL; }
|
函数中首先是调用的unix_remove_socket()
static inline void unix_remove_socket(struct sock *sk) { spin_lock(&unix_table_lock); __unix_remove_socket(sk); spin_unlock(&unix_table_lock); } static void __unix_remove_socket(struct sock *sk) { sk_del_node_init(sk); } static __inline__ int sk_del_node_init(struct sock *sk) { int rc = __sk_del_node_init(sk);
if (rc) { /* paranoid for a while -acme */ WARN_ON(atomic_read(&sk->sk_refcnt) == 1); __sock_put(sk); } return rc; } static __inline__ int __sk_del_node_init(struct sock *sk) { if (sk_hashed(sk)) { __sk_del_node(sk); sk_node_init(&sk->sk_node); return 1; } return 0; }
|
我们还应该记得在uinx创建socket的章节
http://blog.chinaunix.net/u2/64681/showart_1300200.html,那篇文章中我们讲到过内核有一个unix_sockets_unbound杂凑队列,这是在unix创建过程中在调用unix_create1()函数中将sock链入到这个队列中的,我们可以回忆一下,忘记的朋友回到那一节去看一下,这里我们看到是那里操作的反向过程。可以看到,我们可以看到插入时将sock的sk_node队列头链入到unix_sockets_unbound杂凑队列的,尽管上面的函数层层调用最终会执行到下面
static __inline__ void __sk_del_node(struct sock *sk) { __hlist_del(&sk->sk_node); } static inline void __hlist_del(struct hlist_node *n) { struct hlist_node *next = n->next; struct hlist_node **pprev = n->pprev; *pprev = next; if (next) next->pprev = pprev; }
|
我们看到他是将sk_node从队列中脱链了。我们回到unix_release_sock()函数,接下来是执行
static inline void sock_orphan(struct sock *sk) { write_lock_bh(&sk->sk_callback_lock); sock_set_flag(sk, SOCK_DEAD); sk->sk_socket = NULL; sk->sk_sleep = NULL; write_unlock_bh(&sk->sk_callback_lock); }
|
调用了设置了sock的标志符
static inline void sock_set_flag(struct sock *sk, enum sock_flags flag) { __set_bit(flag, &sk->sk_flags); }
|
就是说将sock的标志设置了SOCK_DEAD,我们在以前章节中多次看到发送和接收的过程要检测这个标志位,那时我们说过防止对一个已经删除的socket发送或者接收数据。这里就是表示sock已经处于删除状态了。接着sk->sk_socket = NULL;这句是将sock与socket脱钩了。这里注意一下对sk_callback_lock锁的执行,详细的过程我们讲过锁以后再重新来分析。再回到unix_release_sock()函数中,接下来调用wake_up_interruptible_all唤醒所有与socket有关的所有等待在peer_wait队列中的进程,等他们本来后他们会进一步判断SOCK_DEAD标志位,在那里退出。我们看到下边又一次调用了unix_peer来取得已经连接方的sock结构。会通过sock_put()减小连接方的sock使用计数。并且最后将sock的这个指针清空unix_peer(sk) = NULL;如果连接类型是TCP方式,那要有些特殊处理,把连接方的sk_err标记设置为ECONNRESET,使连接方的进程在执行时出错返回。我们以前讲过在创建socket时通过sock_init_data对新建立的sock结构初始化操作,在那里曾经对钩子函数操作sk->sk_state_change = sock_def_wakeup;所以这里skpair->sk_state_change(skpair);会执行sock_def_wakeup()函数来操作。
static void sock_def_wakeup(struct sock *sk) { read_lock(&sk->sk_callback_lock); if (sk->sk_sleep && waitqueue_active(sk->sk_sleep)) wake_up_interruptible_all(sk->sk_sleep); read_unlock(&sk->sk_callback_lock); }
|
我们看到在这里唤醒了发送方socket的中的睡眠进程,他们醒来后会执行出错返回。接着在还要通过sk_wake_async唤醒挂入发送方sock中的fasync_list异步队列。释放那些异步操作的fown_struct结构及等待操作的进程。
while ((skb = skb_dequeue(&sk->sk_receive_queue)) != NULL) { if (state==TCP_LISTEN) unix_release_sock(skb->sk, 1); /* passed fds are erased in the kfree_skb hook */ kfree_skb(skb); }
if (dentry) { dput(dentry); mntput(mnt); }
sock_put(sk);
/* ---- Socket is dead now and most probably destroyed ---- */
/* * Fixme: BSD difference: In BSD all sockets connected to use get * ECONNRESET and we die on the spot. In Linux we behave * like files and pipes do and wait for the last * dereference. * * Can't we simply set sock->err? * * What the above comment does talk about? --ANK(980817) */
if (unix_tot_inflight) unix_gc(); /* Garbage collect fds */
return 0; }
|
如果要关闭的socket有数据的话,就要通过skb_dequeue把他们摘链并释放。我们看到这里递归调用了unix_release_sock()这是因为在监听过程中我们看到服务器端的socket会为每一个到来的客户端连接不断的复制出新的socket用于连接,所以这里递归调用的作用就是为了把那些新创建的服务器端用于与客户端连接的socket也释放掉。接着下面是对文件系统相关的操作
if (dentry) { dput(dentry); mntput(mnt); }
|
在相关章节我们分析后再回来看这里,sock_put(sk);将这里的sock结构释放,我们知道在连接中会有打开文件的指针处于传递过程中,即“飞行模式”,所以这里要检测一下unix_tot_inflight有多少处于“飞行模式”中的文件授权。我们看到执行了unix_gc()函数
void unix_gc(void) { static bool gc_in_progress = false;
struct unix_sock *u; struct unix_sock *next; struct sk_buff_head hitlist; struct list_head cursor;
spin_lock(&unix_gc_lock);
/* Avoid a recursive GC. */ if (gc_in_progress) goto out;
gc_in_progress = true; /* * First, select candidates for garbage collection. Only * in-flight sockets are considered, and from those only ones * which don't have any external reference. * * Holding unix_gc_lock will protect these candidates from * being detached, and hence from gaining an external * reference. This also means, that since there are no * possible receivers, the receive queues of these sockets are * static during the GC, even though the dequeue is done * before the detach without atomicity guarantees. */ list_for_each_entry_safe(u, next, &gc_inflight_list, link) { int total_refs; int inflight_refs;
total_refs = file_count(u->sk.sk_socket->file); inflight_refs = atomic_read(&u->inflight);
BUG_ON(inflight_refs < 1); BUG_ON(total_refs < inflight_refs); if (total_refs == inflight_refs) { list_move_tail(&u->link, &gc_candidates); u->gc_candidate = 1; } }
|
代码中有一个宏
#define list_for_each_entry_safe(pos, n, head, member) \ for (pos = list_entry((head)->next, typeof(*pos), member), \ n = list_entry(pos->member.next, typeof(*pos), member); \ &pos->member != (head); \ pos = n, n = list_entry(n->member.next, typeof(*n), member))
|
我们今天继续分析代码,这句if (total_refs == inflight_refs)是判断文件的使用计数器是否与传递过程中的“飞行”计数器相等,如果二者相等的话,则说明我们的socket已经是“孤儿”了,为什么是“孤儿”,大家可以想一下如果我们的socket在传送文件权限,即以前我们提到的处于“飞行”时的文件授权,正确情况应该是文件的使用计数器大于飞行的计数器,这里我们暂不做细究因其牵扯过多的文件系统的内容,这里大家只需要简单的了解其正确的规则。所以上面的判断就是说明我们的socket已经处于孤儿状态了,要把这样的sock挂入到gc_candidates队列中进一步处理,我们接着看如何处理的,继续看unix_gc的代码
list_for_each_entry(u, &gc_candidates, link) scan_children(&u->sk, dec_inflight, NULL);
/* * Restore the references for children of all candidates, * which have remaining references. Do this recursively, so * only those remain, which form cyclic references. * * Use a "cursor" link, to make the list traversal safe, even * though elements might be moved about. */ list_add(&cursor, &gc_candidates); while (cursor.next != &gc_candidates) { u = list_entry(cursor.next, struct unix_sock, link);
/* Move cursor to after the current position. */ list_move(&cursor, &u->link);
if (atomic_read(&u->inflight) > 0) { list_move_tail(&u->link, &gc_inflight_list); u->gc_candidate = 0; scan_children(&u->sk, inc_inflight_move_tail, NULL); } } list_del(&cursor);
/* * Now gc_candidates contains only garbage. Restore original * inflight counters for these as well, and remove the skbuffs * which are creating the cycle(s). */ skb_queue_head_init(&hitlist); list_for_each_entry(u, &gc_candidates, link) scan_children(&u->sk, inc_inflight, &hitlist);
spin_unlock(&unix_gc_lock);
/* Here we are. Hitlist is filled. Die. */ __skb_queue_purge(&hitlist);
spin_lock(&unix_gc_lock);
/* All candidates should have been detached by now. */ BUG_ON(!list_empty(&gc_candidates)); gc_in_progress = false;
out: spin_unlock(&unix_gc_lock); }
|
上面代码调用了scan_children函数
static void scan_children(struct sock *x, void (*func)(struct unix_sock *), struct sk_buff_head *hitlist) { if (x->sk_state != TCP_LISTEN) scan_inflight(x, func, hitlist); else { struct sk_buff *skb; struct sk_buff *next; struct unix_sock *u; LIST_HEAD(embryos);
/* * For a listening socket collect the queued embryos * and perform a scan on them as well. */ spin_lock(&x->sk_receive_queue.lock); receive_queue_for_each_skb(x, next, skb) { u = unix_sk(skb->sk);
/* * An embryo cannot be in-flight, so it's safe * to use the list link. */ BUG_ON(!list_empty(&u->link)); list_add_tail(&u->link, &embryos); } spin_unlock(&x->sk_receive_queue.lock);
while (!list_empty(&embryos)) { u = list_entry(embryos.next, struct unix_sock, link); scan_inflight(&u->sk, func, hitlist); list_del_init(&u->link); } } }
|
我们这里会执行scan_inflight函数,而另一面是对处于监听过程的socket会因为会为每一个到来的客户端连接不断的复制出新的socket用于连接,所以我们看到这在else语句中执行了二次循环操作,在这二次循环中首先是找到新建立的sock结构,并把他挂入embryos队列中,然后第二次循环中再次调用
scan_inflight()函数,我们来看看这个函数的作用
static void scan_inflight(struct sock *x, void (*func)(struct unix_sock *), struct sk_buff_head *hitlist) { struct sk_buff *skb; struct sk_buff *next;
spin_lock(&x->sk_receive_queue.lock); receive_queue_for_each_skb(x, next, skb) { /* * Do we have file descriptors ? */ if (UNIXCB(skb).fp) { bool hit = false; /* * Process the descriptors of this socket */ int nfd = UNIXCB(skb).fp->count; struct file **fp = UNIXCB(skb).fp->fp; while (nfd--) { /* * Get the socket the fd matches * if it indeed does so */ struct sock *sk = unix_get_socket(*fp++); if (sk) { hit = true; func(unix_sk(sk)); } } if (hit && hitlist != NULL) { __skb_unlink(skb, &x->sk_receive_queue); __skb_queue_tail(hitlist, skb); } } } spin_unlock(&x->sk_receive_queue.lock); }
|
我们还是先看传递下来的参数,我们看到第二个参数,也就是函数指针
func,在上面unix_gc的代码中传递过来的是dec_inflight,而sock是从上面找到的“孤儿”socket中的结构,所以我们结合宏
#define receive_queue_for_each_skb(sk, next, skb) \ for (skb = sock_queue_head(sk)->next, next = skb->next; \ skb != sock_queue_head(sk); skb = next, next = skb->next)
|
很容易确定这段代码的作用就是循环扫描在这些孤儿socket中的数据包sk_receive_queue队列,分别对其载运着的文件符指针file进行处理,如果文件指针是用于socket的话还要调用func函数进行处理也就是我们传递过来的dec_inflight
static void dec_inflight(struct unix_sock *usk) { atomic_dec(&usk->inflight); }
|
我们看到他其实就是对其他的socket递减其传递的共享计数器。因为我们上面
hitlist传递来的是NULL所以下边的if语句不会执行。我们继续看unix_gc的代码,接着下面有一个while循环,在这个循环过程再次对gc_candidates队列进行扫描,如果队列中的sock的inflight计数器仍旧大于0的话一方面要将sock挂入gc_inflight_list队列,另一方面会再次调用scan_children函数,只不过这次会执行
static void inc_inflight_move_tail(struct unix_sock *u) { atomic_inc(&u->inflight); /* * If this is still a candidate, move it to the end of the * list, so that it's checked even if it was already passed * over */ if (u->gc_candidate) list_move_tail(&u->link, &gc_candidates); }
|
我们看到这里是与dec_inflight的逆操作,增加飞行计数器,并且多了一句判断,如果仍旧是孤儿状态的话,还会再次挂入gc_candidates队列在下一次循环中再次处理。我们看到在unix_gc接着还是调用了scan_children函数,只不过这次初始化了一个hitlist队列头,经过多次扫描在hitlist中的skb是最终确定的“可回收的垃圾”,最后释放这些skb结构。
阅读(4435) | 评论(3) | 转发(1) |