int (*send_synack)(struct sock *sk, struct dst_entry *dst,
struct flowi *fl, struct request_sock *req,
u16 queue_mapping, struct tcp_fastopen_cookie *foc);
+ void (*queue_hash_add)(struct sock *sk, struct request_sock *req,
+ const unsigned long timeout);
};
#ifdef CONFIG_SYN_COOKIES
.route_req = tcp_v4_route_req,
.init_seq = tcp_v4_init_sequence,
.send_synack = tcp_v4_send_synack,
+ .queue_hash_add = inet_csk_reqsk_queue_hash_add,
};
int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
goto drop_and_free;
tcp_rsk(req)->listener = NULL;
- inet_csk_reqsk_queue_hash_add(sk, req, TCP_TIMEOUT_INIT);
+ af_ops->queue_hash_add(sk, req, TCP_TIMEOUT_INIT);
}
return 0;
.route_req = tcp_v6_route_req,
.init_seq = tcp_v6_init_sequence,
.send_synack = tcp_v6_send_synack,
+ .queue_hash_add = inet6_csk_reqsk_queue_hash_add,
};
static void tcp_v6_send_response(struct sk_buff *skb, u32 seq, u32 ack, u32 win,
goto drop_and_free;
tcp_rsk(req)->listener = NULL;
- inet6_csk_reqsk_queue_hash_add(sk, req, TCP_TIMEOUT_INIT);
+ af_ops->queue_hash_add(sk, req, TCP_TIMEOUT_INIT);
}
return 0;