Chinaunix首页 | 论坛 | 博客
  • 博客访问: 1312240
  • 博文数量: 107
  • 博客积分: 10155
  • 博客等级: 上将
  • 技术积分: 2166
  • 用 户 组: 普通用户
  • 注册时间: 2008-03-25 16:57
文章分类

全部博文(107)

文章存档

2010年(1)

2009年(1)

2008年(105)

分类: LINUX

2008-11-13 09:44:24

我们继续昨天的socket的连接过程,继续看tcp_v4_connect()函数余下的代码,昨天我们已经看了前边的一段,现在看后面

if (tcp_death_row.sysctl_tw_recycle &&
     !tp->rx_opt.ts_recent_stamp && rt->rt_dst == daddr) {
        struct inet_peer *peer = rt_get_peer(rt);
        /*
         * VJ's idea. We save last timestamp seen from
         * the destination in peer table, when entering state
         * TIME-WAIT * and initialize rx_opt.ts_recent from it,
         * when trying new connection.
         */

        if (peer != NULL &&
         peer->tcp_ts_stamp + TCP_PAWS_MSL >= get_seconds()) {
            tp->rx_opt.ts_recent_stamp = peer->tcp_ts_stamp;
            tp->rx_opt.ts_recent = peer->tcp_ts;
        }
    }

    inet->dport = usin->sin_port;
    inet->daddr = daddr;

    inet_csk(sk)->icsk_ext_hdr_len = 0;
    if (inet->opt)
        inet_csk(sk)->icsk_ext_hdr_len = inet->opt->optlen;

    tp->rx_opt.mss_clamp = 536;

    /* Socket identity is still unknown (sport may be zero).
     * However we set state to SYN-SENT and not releasing socket
     * lock select source port, enter ourselves into the hash tables and
     * complete initialization after this.
     */

    tcp_set_state(sk, TCP_SYN_SENT);
    err = inet_hash_connect(&tcp_death_row, sk);
    if (err)
        goto failure;

    err = ip_route_newports(&rt, IPPROTO_TCP,
                inet->sport, inet->dport, sk);
    if (err)
        goto failure;

    /* OK, now commit destination to socket. */
    sk->sk_gso_type = SKB_GSO_TCPV4;
    sk_setup_caps(sk, &rt->u.dst);

    if (!tp->write_seq)
        tp->write_seq = secure_tcp_sequence_number(inet->saddr,
                             inet->daddr,
                             inet->sport,
                             usin->sin_port);

    inet->id = tp->write_seq ^ jiffies;

    err = tcp_connect(sk);
    rt = NULL;
    if (err)
        goto failure;

    return 0;

failure:
    /*
     * This unhashes the socket and releases the local port,
     * if necessary.
     */

    tcp_set_state(sk, TCP_CLOSE);
    ip_rt_put(rt);
    sk->sk_route_caps = 0;
    inet->dport = 0;
    return err;
}

在开始分析之前我们回忆一下tcp_v4_connect()函数的一开始部分

struct inet_sock *inet = inet_sk(sk);
    struct tcp_sock *tp = tcp_sk(sk);

从这里调用的二个函数inet_sktcp_sk来看都是直接把sock转换成TCPsock结构inet_sock和另一个我们还没涉及到细节的tcp_sock,如果朋友们会对这里能够将sock转换成上述二种结构的指针感到非常奇怪,如果我们看一下inet_sock的结构会发现他的开头就是sock结构,也就是取得了sock结构指针就相当于取得了inet_sock的起始地址,所以这很容易理解,再看tcp_sock结构,我们还没有看过这个结构,这个结构体非常大,暂且不贴出来的,因为它包含的内容过于全面,我们随时看到随时分析,这里也是需要知道sock是不是在其头部呢?但是我们看到开始却是struct inet_connection_sock inet_conn;如果朋友们看过前边几节的话回想到这个结构也是经常用sock来转换得来其结构指针,为什么呢,因为struct inet_connection_sock开始部分是inet_sock,而上边我们刚说过了inet_sock的开始部分是sock,所以sock就是想当于在tcp_sock的开始处,这样说来明白多了。所以tcp_sock结构是对tcp专用的socket结构的代表。我们看上面的代码

首先是对tcp_sock结构变量的一些检测和设置。接着我们看到一个新结构体变量tcp_death_row

struct inet_timewait_death_row tcp_death_row = {
    .sysctl_max_tw_buckets = NR_FILE * 2,
    .period        = TCP_TIMEWAIT_LEN / INET_TWDR_TWKILL_SLOTS,
    .death_lock    = __SPIN_LOCK_UNLOCKED(tcp_death_row.death_lock),
    .hashinfo    = &tcp_hashinfo,
    .tw_timer    = TIMER_INITIALIZER(inet_twdr_hangman, 0,
                     (unsigned long)&tcp_death_row),
    .twkill_work    = __WORK_INITIALIZER(tcp_death_row.twkill_work,
                     inet_twdr_twkill_work),
/* Short-time timewait calendar */

    .twcal_hand    = -1,
    .twcal_timer    = TIMER_INITIALIZER(inet_twdr_twcal_tick, 0,
                     (unsigned long)&tcp_death_row),
};
struct inet_timewait_death_row {
    /* Short-time timewait calendar */
    int            twcal_hand;
    unsigned long        twcal_jiffie;
    struct timer_list    twcal_timer;
    struct hlist_head    twcal_row[INET_TWDR_RECYCLE_SLOTS];

    spinlock_t        death_lock;
    int            tw_count;
    int            period;
    u32            thread_slots;
    struct work_struct    twkill_work;
    struct timer_list    tw_timer;
    int            slot;
    struct hlist_head    cells[INET_TWDR_TWKILL_SLOTS];
    struct inet_hashinfo     *hashinfo;
    int            sysctl_tw_recycle;
    int            sysctl_max_tw_buckets;
};

这个结构体变量是用于tcp定时器使用的,它也是为了tcp超时重传的依据。我们看到tcp_death_row结构体变量初始化了一些设置,我们暂且不管它,看代码中首先是检查其sysctl_tw_recycle是否存在。它指示是否有已经存在的定时器可以回收利用,我们看到上边的数据结构变量未设置此处。所以这段if代码会跳过继续往下执行,其主要的作用就是快速的从路由表中得到对方的一些信息,而这些信息都封装在一个struct inet_peer结构体中,我们暂时放一放这个结构。接着我们看到代码中对tcpsocket变量inet进行了设置,包括目标端口和目标地址的设置。注意目标地址是我们上面从路由查询表中得到的目标地址。如果朋友们忘记了请回过头去看一下上一节http://blog.chinaunix.net/u2/64681/showart_1408613.html 我是无名小卒,转载的朋友请注明出处。接着代码中通过inet_csk()这个inline函数取得了struct inet_connection_sock结构指针,根据inetoptip的选项内容对其进行了设置。tp->rx_opt.mss_clamp = 536;这句是对mss最大消息传输段的限制要求。此后调用了tcp_set_state()函数

void tcp_set_state(struct sock *sk, int state)
{
    int oldstate = sk->sk_state;

    switch (state) {
    case TCP_ESTABLISHED:
        if (oldstate != TCP_ESTABLISHED)
            TCP_INC_STATS(TCP_MIB_CURRESTAB);
        break;

    case TCP_CLOSE:
        if (oldstate == TCP_CLOSE_WAIT || oldstate == TCP_ESTABLISHED)
            TCP_INC_STATS(TCP_MIB_ESTABRESETS);

        sk->sk_prot->unhash(sk);
        if (inet_csk(sk)->icsk_bind_hash &&
         !(sk->sk_userlocks & SOCK_BINDPORT_LOCK))
            inet_put_port(sk);
        /* fall through */
    default:
        if (oldstate==TCP_ESTABLISHED)
            TCP_DEC_STATS(TCP_MIB_CURRESTAB);
    }

    /* Change state AFTER socket is unhashed to avoid closed
     * socket sitting in hash tables.
     */

    sk->sk_state = state;

#ifdef STATE_TRACE
    SOCK_DEBUG(sk, "TCP sk=%p, State %s -> %s\n",sk, statename[oldstate],statename[state]);
#endif
}

我们要注意参数state是传递下来的TCP_SYN_SENT,所以这段代码只是将socksk_state状态设置为TCP_SYN_SENT后就返回了。接下看到代码中调用了inet_hash_connect()函数

int inet_hash_connect(struct inet_timewait_death_row *death_row,
         struct sock *sk)
{
    return __inet_hash_connect(death_row, sk, inet_sk_port_offset(sk),
            __inet_check_established, __inet_hash_nolisten);
}
int __inet_hash_connect(struct inet_timewait_death_row *death_row,
        struct sock *sk, u32 port_offset,
        int (*check_established)(struct inet_timewait_death_row *,
            struct sock *, __u16, struct inet_timewait_sock **),
        void (*hash)(struct sock *sk))
{
    struct inet_hashinfo *hinfo = death_row->hashinfo;
    const unsigned short snum = inet_sk(sk)->num;
    struct inet_bind_hashbucket *head;
    struct inet_bind_bucket *tb;
    int ret;
    struct net *net = sock_net(sk);

    if (!snum) {
        int i, remaining, low, high, port;
        static u32 hint;
        u32 offset = hint + port_offset;
        struct hlist_node *node;
        struct inet_timewait_sock *tw = NULL;

        inet_get_local_port_range(&low, &high);
        remaining = (high - low) + 1;

        local_bh_disable();
        for (i = 1; i <= remaining; i++) {
            port = low + (i + offset) % remaining;
            head = &hinfo->bhash[inet_bhashfn(port, hinfo->bhash_size)];
            spin_lock(&head->lock);

            /* Does not bother with rcv_saddr checks,
             * because the established check is already
             * unique enough.
             */

            inet_bind_bucket_for_each(tb, node, &head->chain) {
                if (tb->ib_net == net && tb->port == port) {
                    BUG_TRAP(!hlist_empty(&tb->owners));
                    if (tb->fastreuse >= 0)
                        goto next_port;
                    if (!check_established(death_row, sk,
                                port, &tw))
                        goto ok;
                    goto next_port;
                }
            }

            tb = inet_bind_bucket_create(hinfo->bind_bucket_cachep,
                    net, head, port);
            if (!tb) {
                spin_unlock(&head->lock);
                break;
            }
            tb->fastreuse = -1;
            goto ok;

        next_port:
            spin_unlock(&head->lock);
        }
        local_bh_enable();

        return -EADDRNOTAVAIL;

ok:
        hint += i;

        /* Head lock still held and bh's disabled */
        inet_bind_hash(sk, tb, port);
        if (sk_unhashed(sk)) {
            inet_sk(sk)->sport = htons(port);
            hash(sk);
        }
        spin_unlock(&head->lock);

        if (tw) {
            inet_twsk_deschedule(tw, death_row);
            inet_twsk_put(tw);
        }

        ret = 0;
        goto out;
    }

    head = &hinfo->bhash[inet_bhashfn(snum, hinfo->bhash_size)];
    tb = inet_csk(sk)->icsk_bind_hash;
    spin_lock_bh(&head->lock);
    if (sk_head(&tb->owners) == sk && !sk->sk_bind_node.next) {
        hash(sk);
        spin_unlock_bh(&head->lock);
        return 0;
    } else {
        spin_unlock(&head->lock);
        /* No definite answer... Walk to established hash table */
        ret = check_established(death_row, sk, snum, NULL);
out:
        local_bh_enable();
        return ret;
    }
}

上面函数看似非常复杂,但是结合我们练习中的设置的端口所以这里只会执行后半部将sock挂入到hash队列的hash桶中。这里是执行的我们上面在inet_hash_connect传递下来的函数指针__inet_hash_nolisten()实现的

void __inet_hash_nolisten(struct sock *sk)
{
    struct inet_hashinfo *hashinfo = sk->sk_prot->h.hashinfo;
    struct hlist_head *list;
    rwlock_t *lock;
    struct inet_ehash_bucket *head;

    BUG_TRAP(sk_unhashed(sk));

    sk->sk_hash = inet_sk_ehashfn(sk);
    head = inet_ehash_bucket(hashinfo, sk->sk_hash);
    list = &head->chain;
    lock = inet_ehash_lockp(hashinfo, sk->sk_hash);

    write_lock(lock);
    __sk_add_node(sk, list);
    sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
    write_unlock(lock);
}

关于hash表信息结构和hash桶我们在http://blog.chinaunix.net/u2/64681/showart_1387214.html 章节中列出了。

注意在inet_hash_connect()函数中使用的是我们上面的tcp_death_row定时结构中的hash表信息结构,但是在__inet_hash_connect()函数中我们并没有进入if语句中执行inet_bind_bucket_create()与其相关的函数。

回到tcp_v4_connect()函数中我们已经将sock结构绑定到了tcphash表中了,接下来要进入ip_route_newports()函数

static inline int ip_route_newports(struct rtable **rp, u8 protocol,
                 __be16 sport, __be16 dport, struct sock *sk)
{
    if (sport != (*rp)->fl.fl_ip_sport ||
     dport != (*rp)->fl.fl_ip_dport) {
        struct flowi fl;

        memcpy(&fl, &(*rp)->fl, sizeof(fl));
        fl.fl_ip_sport = sport;
        fl.fl_ip_dport = dport;
        fl.proto = protocol;
        ip_rt_put(*rp);
        *rp = NULL;
        security_sk_classify_flow(sk, &fl);
        return ip_route_output_flow(sock_net(sk), rp, &fl, sk, 0);
    }
    return 0;
}

这是是根据我们本地端口和端口是否与路由查询表中的相同来决定新建一个路由键值,并且调整路由表。我们看到函数最后从ip_route_output_flow()函数返回的,这个函数在上一节http://blog.chinaunix.net/u2/64681/showart_1408613.html 《内核中的TCP的追踪分析-8-TCPIPV4)socket连接 》中看到了。

接着我们在代码中看到

sk->sk_gso_type = SKB_GSO_TCPV4;

这里是涉及到了Generic Segmentation Offload GSO)的类型,关于GSO我们节选了从linuxforum论坛上一句简短介绍:

协议栈的效率提高一个策略: 尽可能晚的推迟分段(segmentation), 最理想的是在网卡驱动里分段,在网卡驱动里把大包(super-packet)拆开,组成SG list,或在一块预先分配好的内存中重组各段,然后交给网卡。

然后我们看到代码中接下来调用了

sk_setup_caps(sk, &rt->u.dst);

我们看一下这个函数

void sk_setup_caps(struct sock *sk, struct dst_entry *dst)
{
    __sk_dst_set(sk, dst);
    sk->sk_route_caps = dst->dev->features;
    if (sk->sk_route_caps & NETIF_F_GSO)
        sk->sk_route_caps |= NETIF_F_GSO_SOFTWARE;
    if (sk_can_gso(sk)) {
        if (dst->header_len) {
            sk->sk_route_caps &= ~NETIF_F_GSO_MASK;
        } else {
            sk->sk_route_caps |= NETIF_F_SG | NETIF_F_HW_CSUM;
            sk->sk_gso_max_size = dst->dev->gso_max_size;
        }
    }
}

这个函数中把sk->sk_route_caps设置成了网卡的features,这是在路由初始化中设置到struct dst_entry中关于网上的信息的。我们看到上边已经将sksk_gso_type 设置成了SKB_GSO_TCPV4所以if (sk_can_gso(sk))代码段会执行。最后我们看到tcp_v4_connect()函数执行了tcp_connect()函数

int tcp_connect(struct sock *sk)
{
    struct tcp_sock *tp = tcp_sk(sk);
    struct sk_buff *buff;

    tcp_connect_init(sk);

    buff = alloc_skb_fclone(MAX_TCP_HEADER + 15, sk->sk_allocation);
    if (unlikely(buff == NULL))
        return -ENOBUFS;

    /* Reserve space for headers. */
    skb_reserve(buff, MAX_TCP_HEADER);

    tp->snd_nxt = tp->write_seq;
    tcp_init_nondata_skb(buff, tp->write_seq++, TCPCB_FLAG_SYN);
    TCP_ECN_send_syn(sk, buff);

    /* Send it off. */
    TCP_SKB_CB(buff)->when = tcp_time_stamp;
    tp->retrans_stamp = TCP_SKB_CB(buff)->when;
    skb_header_release(buff);
    __tcp_add_write_queue_tail(sk, buff);
    sk->sk_wmem_queued += buff->truesize;
    sk_mem_charge(sk, buff->truesize);
    tp->packets_out += tcp_skb_pcount(buff);
    tcp_transmit_skb(sk, buff, 1, GFP_KERNEL);

    /* We change tp->snd_nxt after the tcp_transmit_skb() call
     * in order to make this packet get counted in tcpOutSegs.
     */

    tp->snd_nxt = tp->write_seq;
    tp->pushed_seq = tp->write_seq;
    TCP_INC_STATS(TCP_MIB_ACTIVEOPENS);

    /* Timer for repeating the SYN until an answer. */
    inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
                 inet_csk(sk)->icsk_rto, TCP_RTO_MAX);
    return 0;
}

这个函数非常重要,我们明天继续

阅读(4725) | 评论(0) | 转发(0) |
给主人留下些什么吧!~~