716 |
716 |
717 if (cb->args[0] == 0) { |
717 if (cb->args[0] == 0) { |
718 if (!(r->idiag_states & (TCPF_LISTEN | TCPF_SYN_RECV))) |
718 if (!(r->idiag_states & (TCPF_LISTEN | TCPF_SYN_RECV))) |
719 goto skip_listen_ht; |
719 goto skip_listen_ht; |
720 |
720 |
721 inet_listen_lock(hashinfo); |
|
722 for (i = s_i; i < INET_LHTABLE_SIZE; i++) { |
721 for (i = s_i; i < INET_LHTABLE_SIZE; i++) { |
723 struct sock *sk; |
722 struct sock *sk; |
724 struct hlist_node *node; |
723 struct hlist_nulls_node *node; |
|
724 struct inet_listen_hashbucket *ilb; |
725 |
725 |
726 num = 0; |
726 num = 0; |
727 sk_for_each(sk, node, &hashinfo->listening_hash[i]) { |
727 ilb = &hashinfo->listening_hash[i]; |
|
728 spin_lock_bh(&ilb->lock); |
|
729 sk_nulls_for_each(sk, node, &ilb->head) { |
728 struct inet_sock *inet = inet_sk(sk); |
730 struct inet_sock *inet = inet_sk(sk); |
729 |
731 |
730 if (num < s_num) { |
732 if (num < s_num) { |
731 num++; |
733 num++; |
732 continue; |
734 continue; |
740 r->id.idiag_dport || |
742 r->id.idiag_dport || |
741 cb->args[3] > 0) |
743 cb->args[3] > 0) |
742 goto syn_recv; |
744 goto syn_recv; |
743 |
745 |
744 if (inet_csk_diag_dump(sk, skb, cb) < 0) { |
746 if (inet_csk_diag_dump(sk, skb, cb) < 0) { |
745 inet_listen_unlock(hashinfo); |
747 spin_unlock_bh(&ilb->lock); |
746 goto done; |
748 goto done; |
747 } |
749 } |
748 |
750 |
749 syn_recv: |
751 syn_recv: |
750 if (!(r->idiag_states & TCPF_SYN_RECV)) |
752 if (!(r->idiag_states & TCPF_SYN_RECV)) |
751 goto next_listen; |
753 goto next_listen; |
752 |
754 |
753 if (inet_diag_dump_reqs(skb, sk, cb) < 0) { |
755 if (inet_diag_dump_reqs(skb, sk, cb) < 0) { |
754 inet_listen_unlock(hashinfo); |
756 spin_unlock_bh(&ilb->lock); |
755 goto done; |
757 goto done; |
756 } |
758 } |
757 |
759 |
758 next_listen: |
760 next_listen: |
759 cb->args[3] = 0; |
761 cb->args[3] = 0; |
760 cb->args[4] = 0; |
762 cb->args[4] = 0; |
761 ++num; |
763 ++num; |
762 } |
764 } |
|
765 spin_unlock_bh(&ilb->lock); |
763 |
766 |
764 s_num = 0; |
767 s_num = 0; |
765 cb->args[3] = 0; |
768 cb->args[3] = 0; |
766 cb->args[4] = 0; |
769 cb->args[4] = 0; |
767 } |
770 } |
768 inet_listen_unlock(hashinfo); |
|
769 skip_listen_ht: |
771 skip_listen_ht: |
770 cb->args[0] = 1; |
772 cb->args[0] = 1; |
771 s_i = num = s_num = 0; |
773 s_i = num = s_num = 0; |
772 } |
774 } |
773 |
775 |
774 if (!(r->idiag_states & ~(TCPF_LISTEN | TCPF_SYN_RECV))) |
776 if (!(r->idiag_states & ~(TCPF_LISTEN | TCPF_SYN_RECV))) |
775 goto unlock; |
777 goto unlock; |
776 |
778 |
777 for (i = s_i; i < hashinfo->ehash_size; i++) { |
779 for (i = s_i; i < hashinfo->ehash_size; i++) { |
778 struct inet_ehash_bucket *head = &hashinfo->ehash[i]; |
780 struct inet_ehash_bucket *head = &hashinfo->ehash[i]; |
779 rwlock_t *lock = inet_ehash_lockp(hashinfo, i); |
781 spinlock_t *lock = inet_ehash_lockp(hashinfo, i); |
780 struct sock *sk; |
782 struct sock *sk; |
781 struct hlist_node *node; |
783 struct hlist_nulls_node *node; |
782 |
784 |
783 num = 0; |
785 num = 0; |
784 |
786 |
785 if (hlist_empty(&head->chain) && hlist_empty(&head->twchain)) |
787 if (hlist_nulls_empty(&head->chain) && |
|
788 hlist_nulls_empty(&head->twchain)) |
786 continue; |
789 continue; |
787 |
790 |
788 if (i > s_i) |
791 if (i > s_i) |
789 s_num = 0; |
792 s_num = 0; |
790 |
793 |
791 read_lock_bh(lock); |
794 spin_lock_bh(lock); |
792 sk_for_each(sk, node, &head->chain) { |
795 sk_nulls_for_each(sk, node, &head->chain) { |
793 struct inet_sock *inet = inet_sk(sk); |
796 struct inet_sock *inet = inet_sk(sk); |
794 |
797 |
795 if (num < s_num) |
798 if (num < s_num) |
796 goto next_normal; |
799 goto next_normal; |
797 if (!(r->idiag_states & (1 << sk->sk_state))) |
800 if (!(r->idiag_states & (1 << sk->sk_state))) |