VirtualBox

source: vbox/trunk/src/VBox/Devices/Network/slirp/tcp_input.c@ 63013

Last change on this file since 63013 was 63013, checked in by vboxsync, 8 years ago

slirp: warnings

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 66.5 KB
Line 
1/* $Id: tcp_input.c 63013 2016-08-04 21:42:42Z vboxsync $ */
2/** @file
3 * NAT - TCP input.
4 */
5
6/*
7 * Copyright (C) 2006-2016 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18/*
19 * This code is based on:
20 *
21 * Copyright (c) 1982, 1986, 1988, 1990, 1993, 1994
22 * The Regents of the University of California. All rights reserved.
23 *
24 * Redistribution and use in source and binary forms, with or without
25 * modification, are permitted provided that the following conditions
26 * are met:
27 * 1. Redistributions of source code must retain the above copyright
28 * notice, this list of conditions and the following disclaimer.
29 * 2. Redistributions in binary form must reproduce the above copyright
30 * notice, this list of conditions and the following disclaimer in the
31 * documentation and/or other materials provided with the distribution.
32 * 3. All advertising materials mentioning features or use of this software
33 * must display the following acknowledgement:
34 * This product includes software developed by the University of
35 * California, Berkeley and its contributors.
36 * 4. Neither the name of the University nor the names of its contributors
37 * may be used to endorse or promote products derived from this software
38 * without specific prior written permission.
39 *
40 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
41 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
42 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
43 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
44 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
45 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
46 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
47 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
48 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
49 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
50 * SUCH DAMAGE.
51 *
52 * @(#)tcp_input.c 8.5 (Berkeley) 4/10/94
53 * tcp_input.c,v 1.10 1994/10/13 18:36:32 wollman Exp
54 */
55
56/*
57 * Changes and additions relating to SLiRP
58 * Copyright (c) 1995 Danny Gasparovski.
59 *
60 * Please read the file COPYRIGHT for the
61 * terms and conditions of the copyright.
62 */
63
64#include <slirp.h>
65#include "ip_icmp.h"
66
67
68#if 0 /* code using this macroses is commented out */
69# define TCP_PAWS_IDLE (24 * 24 * 60 * 60 * PR_SLOWHZ)
70
71/* for modulo comparisons of timestamps */
72# define TSTMP_LT(a, b) ((int)((a)-(b)) < 0)
73# define TSTMP_GEQ(a, b) ((int)((a)-(b)) >= 0)
74#endif
75
76#ifndef TCP_ACK_HACK
77#define DELAY_ACK(tp, ti) \
78 if (ti->ti_flags & TH_PUSH) \
79 tp->t_flags |= TF_ACKNOW; \
80 else \
81 tp->t_flags |= TF_DELACK;
82#else /* !TCP_ACK_HACK */
83#define DELAY_ACK(tp, ign) \
84 tp->t_flags |= TF_DELACK;
85#endif /* TCP_ACK_HACK */
86
87
88/*
89 * deps: netinet/tcp_reass.c
90 * tcp_reass_maxqlen = 48 (deafault)
91 * tcp_reass_maxseg = nmbclusters/16 (nmbclusters = 1024 + maxusers * 64 from kern/kern_mbuf.c let's say 256)
92 */
93int
94tcp_reass(PNATState pData, struct tcpcb *tp, struct tcphdr *th, int *tlenp, struct mbuf *m)
95{
96 struct tseg_qent *q;
97 struct tseg_qent *p = NULL;
98 struct tseg_qent *nq;
99 struct tseg_qent *te = NULL;
100 struct socket *so = tp->t_socket;
101 int flags;
102 STAM_PROFILE_START(&pData->StatTCP_reassamble, tcp_reassamble);
103 LogFlowFunc(("ENTER: pData:%p, tp:%R[tcpcb793], th:%p, tlenp:%p, m:%p\n", pData, tp, th, tlenp, m));
104
105 /*
106 * XXX: tcp_reass() is rather inefficient with its data structures
107 * and should be rewritten (see NetBSD for optimizations). While
108 * doing that it should move to its own file tcp_reass.c.
109 */
110
111 /*
112 * Call with th==NULL after become established to
113 * force pre-ESTABLISHED data up to user socket.
114 */
115 if (th == NULL)
116 {
117 LogFlowFunc(("%d -> present\n", __LINE__));
118 goto present;
119 }
120
121 /*
122 * Limit the number of segments in the reassembly queue to prevent
123 * holding on to too many segments (and thus running out of mbufs).
124 * Make sure to let the missing segment through which caused this
125 * queue. Always keep one global queue entry spare to be able to
126 * process the missing segment.
127 */
128 if ( th->th_seq != tp->rcv_nxt
129 && ( tcp_reass_qsize + 1 >= tcp_reass_maxseg
130 || tp->t_segqlen >= tcp_reass_maxqlen))
131 {
132 tcp_reass_overflows++;
133 tcpstat.tcps_rcvmemdrop++;
134 m_freem(pData, m);
135 *tlenp = 0;
136 STAM_PROFILE_STOP(&pData->StatTCP_reassamble, tcp_reassamble);
137 LogFlowFuncLeave();
138 return (0);
139 }
140
141 /*
142 * Allocate a new queue entry. If we can't, or hit the zone limit
143 * just drop the pkt.
144 */
145 te = RTMemAlloc(sizeof(struct tseg_qent));
146 if (te == NULL)
147 {
148 tcpstat.tcps_rcvmemdrop++;
149 m_freem(pData, m);
150 *tlenp = 0;
151 STAM_PROFILE_STOP(&pData->StatTCP_reassamble, tcp_reassamble);
152 LogFlowFuncLeave();
153 return (0);
154 }
155 tp->t_segqlen++;
156 tcp_reass_qsize++;
157
158 /*
159 * Find a segment which begins after this one does.
160 */
161 LIST_FOREACH(q, &tp->t_segq, tqe_q)
162 {
163 if (SEQ_GT(q->tqe_th->th_seq, th->th_seq))
164 break;
165 p = q;
166 }
167
168 /*
169 * If there is a preceding segment, it may provide some of
170 * our data already. If so, drop the data from the incoming
171 * segment. If it provides all of our data, drop us.
172 */
173 if (p != NULL)
174 {
175 int i;
176 /* conversion to int (in i) handles seq wraparound */
177 i = p->tqe_th->th_seq + p->tqe_len - th->th_seq;
178 if (i > 0)
179 {
180 if (i >= *tlenp)
181 {
182 tcpstat.tcps_rcvduppack++;
183 tcpstat.tcps_rcvdupbyte += *tlenp;
184 m_freem(pData, m);
185 RTMemFree(te);
186 tp->t_segqlen--;
187 tcp_reass_qsize--;
188 /*
189 * Try to present any queued data
190 * at the left window edge to the user.
191 * This is needed after the 3-WHS
192 * completes.
193 */
194 LogFlowFunc(("%d -> present\n", __LINE__));
195 goto present; /* ??? */
196 }
197 m_adj(m, i);
198 *tlenp -= i;
199 th->th_seq += i;
200 }
201 }
202 tcpstat.tcps_rcvoopack++;
203 tcpstat.tcps_rcvoobyte += *tlenp;
204
205 /*
206 * While we overlap succeeding segments trim them or,
207 * if they are completely covered, dequeue them.
208 */
209 while (q)
210 {
211 int i = (th->th_seq + *tlenp) - q->tqe_th->th_seq;
212 if (i <= 0)
213 break;
214 if (i < q->tqe_len)
215 {
216 q->tqe_th->th_seq += i;
217 q->tqe_len -= i;
218 m_adj(q->tqe_m, i);
219 break;
220 }
221
222 nq = LIST_NEXT(q, tqe_q);
223 LIST_REMOVE(q, tqe_q);
224 m_freem(pData, q->tqe_m);
225 RTMemFree(q);
226 tp->t_segqlen--;
227 tcp_reass_qsize--;
228 q = nq;
229 }
230
231 /* Insert the new segment queue entry into place. */
232 te->tqe_m = m;
233 te->tqe_th = th;
234 te->tqe_len = *tlenp;
235
236 if (p == NULL)
237 {
238 LIST_INSERT_HEAD(&tp->t_segq, te, tqe_q);
239 }
240 else
241 {
242 LIST_INSERT_AFTER(p, te, tqe_q);
243 }
244
245present:
246 /*
247 * Present data to user, advancing rcv_nxt through
248 * completed sequence space.
249 */
250 if (!TCPS_HAVEESTABLISHED(tp->t_state))
251 {
252 STAM_PROFILE_STOP(&pData->StatTCP_reassamble, tcp_reassamble);
253 return (0);
254 }
255 q = LIST_FIRST(&tp->t_segq);
256 if (!q || q->tqe_th->th_seq != tp->rcv_nxt)
257 {
258 STAM_PROFILE_STOP(&pData->StatTCP_reassamble, tcp_reassamble);
259 return (0);
260 }
261 do
262 {
263 tp->rcv_nxt += q->tqe_len;
264 flags = q->tqe_th->th_flags & TH_FIN;
265 nq = LIST_NEXT(q, tqe_q);
266 LIST_REMOVE(q, tqe_q);
267 /* XXX: This place should be checked for the same code in
268 * original BSD code for Slirp and current BSD used SS_FCANTRCVMORE
269 */
270 if (so->so_state & SS_FCANTSENDMORE)
271 m_freem(pData, q->tqe_m);
272 else
273 sbappend(pData, so, q->tqe_m);
274 RTMemFree(q);
275 tp->t_segqlen--;
276 tcp_reass_qsize--;
277 q = nq;
278 }
279 while (q && q->tqe_th->th_seq == tp->rcv_nxt);
280
281 STAM_PROFILE_STOP(&pData->StatTCP_reassamble, tcp_reassamble);
282 return flags;
283}
284
285/*
286 * TCP input routine, follows pages 65-76 of the
287 * protocol specification dated September, 1981 very closely.
288 */
289void
290tcp_input(PNATState pData, register struct mbuf *m, int iphlen, struct socket *inso)
291{
292 struct ip *ip, *save_ip;
293 register struct tcpiphdr *ti;
294 caddr_t optp = NULL;
295 int optlen = 0;
296 int len, off;
297 int tlen = 0; /* Shut up MSC (didn't check whether MSC was right). */
298 register struct tcpcb *tp = 0;
299 register int tiflags;
300 struct socket *so = 0;
301 int todrop, acked, ourfinisacked, needoutput = 0;
302/* int dropsocket = 0; */
303 int iss = 0;
304 u_long tiwin;
305/* int ts_present = 0; */
306 unsigned ohdrlen;
307 uint8_t ohdr[60 + 8]; /* max IP header plus 8 bytes of payload for icmp */
308
309 STAM_PROFILE_START(&pData->StatTCP_input, counter_input);
310
311 LogFlow(("tcp_input: m = %8lx, iphlen = %2d, inso = %R[natsock]\n",
312 (long)m, iphlen, inso));
313
314 if (inso != NULL)
315 {
316 QSOCKET_LOCK(tcb);
317 SOCKET_LOCK(inso);
318 QSOCKET_UNLOCK(tcb);
319 }
320 /*
321 * If called with m == 0, then we're continuing the connect
322 */
323 if (m == NULL)
324 {
325 so = inso;
326 Log4(("NAT: tcp_input: %R[natsock]\n", so));
327 /* Re-set a few variables */
328 tp = sototcpcb(so);
329 m = so->so_m;
330 so->so_m = 0;
331
332 if (RT_LIKELY(so->so_ohdr != NULL))
333 {
334 RTMemFree(so->so_ohdr);
335 so->so_ohdr = NULL;
336 }
337
338 ti = so->so_ti;
339
340 /** @todo (vvl) clarify why it might happens */
341 if (ti == NULL)
342 {
343 LogRel(("NAT: ti is null. can't do any reseting connection actions\n"));
344 /* mbuf should be cleared in sofree called from tcp_close */
345 tcp_close(pData, tp);
346 STAM_PROFILE_STOP(&pData->StatTCP_input, counter_input);
347 LogFlowFuncLeave();
348 return;
349 }
350
351 tiwin = ti->ti_win;
352 tiflags = ti->ti_flags;
353
354 LogFlowFunc(("%d -> cont_conn\n", __LINE__));
355 goto cont_conn;
356 }
357
358 tcpstat.tcps_rcvtotal++;
359
360 ip = mtod(m, struct ip *);
361
362 /* ip_input() subtracts iphlen from ip::ip_len */
363 AssertStmt(ip->ip_len + iphlen == (ssize_t)m_length(m, NULL), goto drop);
364 if (RT_UNLIKELY(ip->ip_len < sizeof(struct tcphdr)))
365 {
366 /* tcps_rcvshort++; */
367 goto drop;
368 }
369
370 /*
371 * Save a copy of the IP header in case we want to restore it for
372 * sending an ICMP error message in response.
373 *
374 * XXX: This function should really be fixed to not strip IP
375 * options, to not overwrite IP header and to use "tlen" local
376 * variable (instead of ti->ti_len), then "m" could be passed to
377 * icmp_error() directly.
378 */
379 ohdrlen = iphlen + 8;
380 m_copydata(m, 0, ohdrlen, (caddr_t)ohdr);
381 save_ip = (struct ip *)ohdr;
382 save_ip->ip_len += iphlen; /* undo change by ip_input() */
383
384
385 /*
386 * Get IP and TCP header together in first mbuf.
387 * Note: IP leaves IP header in first mbuf.
388 */
389 ti = mtod(m, struct tcpiphdr *);
390 if (iphlen > sizeof(struct ip))
391 {
392 ip_stripoptions(m, (struct mbuf *)0);
393 iphlen = sizeof(struct ip);
394 }
395
396 /*
397 * Checksum extended TCP header and data.
398 */
399 tlen = ((struct ip *)ti)->ip_len;
400 memset(ti->ti_x1, 0, 9);
401 ti->ti_len = RT_H2N_U16((u_int16_t)tlen);
402 len = sizeof(struct ip) + tlen;
403 /* keep checksum for ICMP reply
404 * ti->ti_sum = cksum(m, len);
405 * if (ti->ti_sum) { */
406 if (cksum(m, len))
407 {
408 tcpstat.tcps_rcvbadsum++;
409 LogFlowFunc(("%d -> drop\n", __LINE__));
410 goto drop;
411 }
412
413 /*
414 * Check that TCP offset makes sense,
415 * pull out TCP options and adjust length. XXX
416 */
417 off = ti->ti_off << 2;
418 if ( off < sizeof (struct tcphdr)
419 || off > tlen)
420 {
421 tcpstat.tcps_rcvbadoff++;
422 LogFlowFunc(("%d -> drop\n", __LINE__));
423 goto drop;
424 }
425 tlen -= off;
426 ti->ti_len = tlen;
427 if (off > sizeof (struct tcphdr))
428 {
429 optlen = off - sizeof (struct tcphdr);
430 optp = mtod(m, caddr_t) + sizeof (struct tcpiphdr);
431
432 /*
433 * Do quick retrieval of timestamp options ("options
434 * prediction?"). If timestamp is the only option and it's
435 * formatted as recommended in RFC 1323 appendix A, we
436 * quickly get the values now and not bother calling
437 * tcp_dooptions(), etc.
438 */
439#if 0
440 if (( optlen == TCPOLEN_TSTAMP_APPA
441 || ( optlen > TCPOLEN_TSTAMP_APPA
442 && optp[TCPOLEN_TSTAMP_APPA] == TCPOPT_EOL)) &&
443 *(u_int32_t *)optp == RT_H2N_U32_C(TCPOPT_TSTAMP_HDR) &&
444 (ti->ti_flags & TH_SYN) == 0)
445 {
446 ts_present = 1;
447 ts_val = RT_N2H_U32(*(u_int32_t *)(optp + 4));
448 ts_ecr = RT_N2H_U32(*(u_int32_t *)(optp + 8));
449 optp = NULL; / * we have parsed the options * /
450 }
451#endif
452 }
453 tiflags = ti->ti_flags;
454
455 /*
456 * Convert TCP protocol specific fields to host format.
457 */
458 NTOHL(ti->ti_seq);
459 NTOHL(ti->ti_ack);
460 NTOHS(ti->ti_win);
461 NTOHS(ti->ti_urp);
462
463 /*
464 * Drop TCP, IP headers and TCP options.
465 */
466 m->m_data += sizeof(struct tcpiphdr)+off-sizeof(struct tcphdr);
467 m->m_len -= sizeof(struct tcpiphdr)+off-sizeof(struct tcphdr);
468
469 /*
470 * Locate pcb for segment.
471 */
472findso:
473 LogFlowFunc(("(enter) findso: %R[natsock]\n", so));
474 if (so != NULL && so != &tcb)
475 SOCKET_UNLOCK(so);
476 QSOCKET_LOCK(tcb);
477 so = tcp_last_so;
478 if ( so->so_fport != ti->ti_dport
479 || so->so_lport != ti->ti_sport
480 || so->so_laddr.s_addr != ti->ti_src.s_addr
481 || so->so_faddr.s_addr != ti->ti_dst.s_addr)
482 {
483 QSOCKET_UNLOCK(tcb);
484 /* @todo fix SOLOOKUP macrodefinition to be usable here */
485 so = solookup(&tcb, ti->ti_src, ti->ti_sport,
486 ti->ti_dst, ti->ti_dport);
487 if (so)
488 {
489 tcp_last_so = so;
490 }
491 ++tcpstat.tcps_socachemiss;
492 }
493 else
494 {
495 SOCKET_LOCK(so);
496 QSOCKET_UNLOCK(tcb);
497 }
498 LogFlowFunc(("(leave) findso: %R[natsock]\n", so));
499
500 /*
501 * If the state is CLOSED (i.e., TCB does not exist) then
502 * all data in the incoming segment is discarded.
503 * If the TCB exists but is in CLOSED state, it is embryonic,
504 * but should either do a listen or a connect soon.
505 *
506 * state == CLOSED means we've done socreate() but haven't
507 * attached it to a protocol yet...
508 *
509 * XXX If a TCB does not exist, and the TH_SYN flag is
510 * the only flag set, then create a session, mark it
511 * as if it was LISTENING, and continue...
512 */
513 if (so == 0)
514 {
515 if ((tiflags & (TH_SYN|TH_FIN|TH_RST|TH_URG|TH_ACK)) != TH_SYN)
516 {
517 LogFlowFunc(("%d -> dropwithreset\n", __LINE__));
518 goto dropwithreset;
519 }
520
521 if ((so = socreate()) == NULL)
522 {
523 LogFlowFunc(("%d -> dropwithreset\n", __LINE__));
524 goto dropwithreset;
525 }
526 if (tcp_attach(pData, so) < 0)
527 {
528 RTMemFree(so); /* Not sofree (if it failed, it's not insqued) */
529 LogFlowFunc(("%d -> dropwithreset\n", __LINE__));
530 goto dropwithreset;
531 }
532 SOCKET_LOCK(so);
533 sbreserve(pData, &so->so_snd, tcp_sndspace);
534 sbreserve(pData, &so->so_rcv, tcp_rcvspace);
535
536/* tcp_last_so = so; */ /* XXX ? */
537/* tp = sototcpcb(so); */
538
539 so->so_laddr = ti->ti_src;
540 so->so_lport = ti->ti_sport;
541 so->so_faddr = ti->ti_dst;
542 so->so_fport = ti->ti_dport;
543
544 so->so_iptos = ((struct ip *)ti)->ip_tos;
545
546 tp = sototcpcb(so);
547 TCP_STATE_SWITCH_TO(tp, TCPS_LISTEN);
548 }
549
550 /*
551 * If this is a still-connecting socket, this probably
552 * a retransmit of the SYN. Whether it's a retransmit SYN
553 * or something else, we nuke it.
554 */
555 if (so->so_state & SS_ISFCONNECTING)
556 {
557 LogFlowFunc(("%d -> drop\n", __LINE__));
558 goto drop;
559 }
560
561 tp = sototcpcb(so);
562
563 /* XXX Should never fail */
564 if (tp == 0)
565 {
566 LogFlowFunc(("%d -> dropwithreset\n", __LINE__));
567 goto dropwithreset;
568 }
569 if (tp->t_state == TCPS_CLOSED)
570 {
571 LogFlowFunc(("%d -> drop\n", __LINE__));
572 goto drop;
573 }
574
575 /* Unscale the window into a 32-bit value. */
576/* if ((tiflags & TH_SYN) == 0)
577 * tiwin = ti->ti_win << tp->snd_scale;
578 * else
579 */
580 tiwin = ti->ti_win;
581
582 /*
583 * Segment received on connection.
584 * Reset idle time and keep-alive timer.
585 */
586 tp->t_idle = 0;
587 if (so_options)
588 tp->t_timer[TCPT_KEEP] = tcp_keepintvl;
589 else
590 tp->t_timer[TCPT_KEEP] = tcp_keepidle;
591
592 /*
593 * Process options if not in LISTEN state,
594 * else do it below (after getting remote address).
595 */
596 if (optp && tp->t_state != TCPS_LISTEN)
597 tcp_dooptions(pData, tp, (u_char *)optp, optlen, ti);
598/* , */
599/* &ts_present, &ts_val, &ts_ecr); */
600
601 /*
602 * Header prediction: check for the two common cases
603 * of a uni-directional data xfer. If the packet has
604 * no control flags, is in-sequence, the window didn't
605 * change and we're not retransmitting, it's a
606 * candidate. If the length is zero and the ack moved
607 * forward, we're the sender side of the xfer. Just
608 * free the data acked & wake any higher level process
609 * that was blocked waiting for space. If the length
610 * is non-zero and the ack didn't move, we're the
611 * receiver side. If we're getting packets in-order
612 * (the reassembly queue is empty), add the data to
613 * the socket buffer and note that we need a delayed ack.
614 *
615 * XXX Some of these tests are not needed
616 * eg: the tiwin == tp->snd_wnd prevents many more
617 * predictions.. with no *real* advantage..
618 */
619 if ( tp->t_state == TCPS_ESTABLISHED
620 && (tiflags & (TH_SYN|TH_FIN|TH_RST|TH_URG|TH_ACK)) == TH_ACK
621/* && (!ts_present || TSTMP_GEQ(ts_val, tp->ts_recent)) */
622 && ti->ti_seq == tp->rcv_nxt
623 && tiwin && tiwin == tp->snd_wnd
624 && tp->snd_nxt == tp->snd_max)
625 {
626 /*
627 * If last ACK falls within this segment's sequence numbers,
628 * record the timestamp.
629 */
630#if 0
631 if (ts_present && SEQ_LEQ(ti->ti_seq, tp->last_ack_sent) &&
632 SEQ_LT(tp->last_ack_sent, ti->ti_seq + ti->ti_len))
633 {
634 tp->ts_recent_age = tcp_now;
635 tp->ts_recent = ts_val;
636 }
637#endif
638
639 if (ti->ti_len == 0)
640 {
641 if ( SEQ_GT(ti->ti_ack, tp->snd_una)
642 && SEQ_LEQ(ti->ti_ack, tp->snd_max)
643 && tp->snd_cwnd >= tp->snd_wnd)
644 {
645 /*
646 * this is a pure ack for outstanding data.
647 */
648 ++tcpstat.tcps_predack;
649#if 0
650 if (ts_present)
651 tcp_xmit_timer(tp, tcp_now-ts_ecr+1);
652 else
653#endif
654 if ( tp->t_rtt
655 && SEQ_GT(ti->ti_ack, tp->t_rtseq))
656 tcp_xmit_timer(pData, tp, tp->t_rtt);
657 acked = ti->ti_ack - tp->snd_una;
658 tcpstat.tcps_rcvackpack++;
659 tcpstat.tcps_rcvackbyte += acked;
660 sbdrop(&so->so_snd, acked);
661 tp->snd_una = ti->ti_ack;
662 m_freem(pData, m);
663
664 /*
665 * If all outstanding data are acked, stop
666 * retransmit timer, otherwise restart timer
667 * using current (possibly backed-off) value.
668 * If process is waiting for space,
669 * wakeup/selwakeup/signal. If data
670 * are ready to send, let tcp_output
671 * decide between more output or persist.
672 */
673 if (tp->snd_una == tp->snd_max)
674 tp->t_timer[TCPT_REXMT] = 0;
675 else if (tp->t_timer[TCPT_PERSIST] == 0)
676 tp->t_timer[TCPT_REXMT] = tp->t_rxtcur;
677
678 /*
679 * There's room in so_snd, sowwakup will read()
680 * from the socket if we can
681 */
682#if 0
683 if (so->so_snd.sb_flags & SB_NOTIFY)
684 sowwakeup(so);
685#endif
686 /*
687 * This is called because sowwakeup might have
688 * put data into so_snd. Since we don't so sowwakeup,
689 * we don't need this.. XXX???
690 */
691 if (SBUF_LEN(&so->so_snd))
692 (void) tcp_output(pData, tp);
693
694 SOCKET_UNLOCK(so);
695 STAM_PROFILE_STOP(&pData->StatTCP_input, counter_input);
696 return;
697 }
698 }
699 else if ( ti->ti_ack == tp->snd_una
700 && LIST_EMPTY(&tp->t_segq)
701 && ti->ti_len <= sbspace(&so->so_rcv))
702 {
703 /*
704 * this is a pure, in-sequence data packet
705 * with nothing on the reassembly queue and
706 * we have enough buffer space to take it.
707 */
708 ++tcpstat.tcps_preddat;
709 tp->rcv_nxt += ti->ti_len;
710 tcpstat.tcps_rcvpack++;
711 tcpstat.tcps_rcvbyte += ti->ti_len;
712 /*
713 * Add data to socket buffer.
714 */
715 sbappend(pData, so, m);
716
717 /*
718 * XXX This is called when data arrives. Later, check
719 * if we can actually write() to the socket
720 * XXX Need to check? It's be NON_BLOCKING
721 */
722/* sorwakeup(so); */
723
724 /*
725 * If this is a short packet, then ACK now - with Nagel
726 * congestion avoidance sender won't send more until
727 * he gets an ACK.
728 *
729 * It is better to not delay acks at all to maximize
730 * TCP throughput. See RFC 2581.
731 */
732 tp->t_flags |= TF_ACKNOW;
733 tcp_output(pData, tp);
734 SOCKET_UNLOCK(so);
735 STAM_PROFILE_STOP(&pData->StatTCP_input, counter_input);
736 return;
737 }
738 } /* header prediction */
739 /*
740 * Calculate amount of space in receive window,
741 * and then do TCP input processing.
742 * Receive window is amount of space in rcv queue,
743 * but not less than advertised window.
744 */
745 {
746 int win;
747 win = sbspace(&so->so_rcv);
748 if (win < 0)
749 win = 0;
750 tp->rcv_wnd = max(win, (int)(tp->rcv_adv - tp->rcv_nxt));
751 }
752
753 switch (tp->t_state)
754 {
755 /*
756 * If the state is LISTEN then ignore segment if it contains an RST.
757 * If the segment contains an ACK then it is bad and send a RST.
758 * If it does not contain a SYN then it is not interesting; drop it.
759 * Don't bother responding if the destination was a broadcast.
760 * Otherwise initialize tp->rcv_nxt, and tp->irs, select an initial
761 * tp->iss, and send a segment:
762 * <SEQ=ISS><ACK=RCV_NXT><CTL=SYN,ACK>
763 * Also initialize tp->snd_nxt to tp->iss+1 and tp->snd_una to tp->iss.
764 * Fill in remote peer address fields if not previously specified.
765 * Enter SYN_RECEIVED state, and process any other fields of this
766 * segment in this state.
767 */
768 case TCPS_LISTEN:
769 {
770 if (tiflags & TH_RST)
771 {
772 LogFlowFunc(("%d -> drop\n", __LINE__));
773 goto drop;
774 }
775 if (tiflags & TH_ACK)
776 {
777 LogFlowFunc(("%d -> dropwithreset\n", __LINE__));
778 goto dropwithreset;
779 }
780 if ((tiflags & TH_SYN) == 0)
781 {
782 LogFlowFunc(("%d -> drop\n", __LINE__));
783 goto drop;
784 }
785
786 /*
787 * This has way too many gotos...
788 * But a bit of spaghetti code never hurt anybody :)
789 */
790 if ( (tcp_fconnect(pData, so) == -1)
791 && errno != EINPROGRESS
792 && errno != EWOULDBLOCK)
793 {
794 u_char code = ICMP_UNREACH_NET;
795 Log2((" tcp fconnect errno = %d (%s)\n", errno, strerror(errno)));
796 if (errno == ECONNREFUSED)
797 {
798 /* ACK the SYN, send RST to refuse the connection */
799 tcp_respond(pData, tp, ti, m, ti->ti_seq+1, (tcp_seq)0,
800 TH_RST|TH_ACK);
801 }
802 else
803 {
804 if (errno == EHOSTUNREACH)
805 code = ICMP_UNREACH_HOST;
806 HTONL(ti->ti_seq); /* restore tcp header */
807 HTONL(ti->ti_ack);
808 HTONS(ti->ti_win);
809 HTONS(ti->ti_urp);
810 m->m_data -= sizeof(struct tcpiphdr)+off-sizeof(struct tcphdr);
811 m->m_len += sizeof(struct tcpiphdr)+off-sizeof(struct tcphdr);
812 *ip = *save_ip;
813 icmp_error(pData, m, ICMP_UNREACH, code, 0, strerror(errno));
814 tp->t_socket->so_m = NULL;
815 }
816 tp = tcp_close(pData, tp);
817 }
818 else
819 {
820 /*
821 * Haven't connected yet, save the current mbuf
822 * and ti, and return
823 * XXX Some OS's don't tell us whether the connect()
824 * succeeded or not. So we must time it out.
825 */
826 so->so_m = m;
827 so->so_ti = ti;
828 so->so_ohdr = RTMemDup(ohdr, ohdrlen);
829 tp->t_timer[TCPT_KEEP] = TCPTV_KEEP_INIT;
830 TCP_STATE_SWITCH_TO(tp, TCPS_SYN_RECEIVED);
831 }
832 SOCKET_UNLOCK(so);
833 STAM_PROFILE_STOP(&pData->StatTCP_input, counter_input);
834 LogFlowFuncLeave();
835 return;
836
837cont_conn:
838 /* m==NULL
839 * Check if the connect succeeded
840 */
841 LogFlowFunc(("cont_conn:\n"));
842 if (so->so_state & SS_NOFDREF)
843 {
844 tp = tcp_close(pData, tp);
845 LogFlowFunc(("%d -> dropwithreset\n", __LINE__));
846 goto dropwithreset;
847 }
848
849 tcp_template(tp);
850
851 if (optp)
852 tcp_dooptions(pData, tp, (u_char *)optp, optlen, ti);
853
854 if (iss)
855 tp->iss = iss;
856 else
857 tp->iss = tcp_iss;
858 tcp_iss += TCP_ISSINCR/2;
859 tp->irs = ti->ti_seq;
860 tcp_sendseqinit(tp);
861 tcp_rcvseqinit(tp);
862 tp->t_flags |= TF_ACKNOW;
863 TCP_STATE_SWITCH_TO(tp, TCPS_SYN_RECEIVED);
864 tp->t_timer[TCPT_KEEP] = TCPTV_KEEP_INIT;
865 tcpstat.tcps_accepts++;
866 LogFlowFunc(("%d -> trimthenstep6\n", __LINE__));
867 goto trimthenstep6;
868 } /* case TCPS_LISTEN */
869
870 /*
871 * If the state is SYN_SENT:
872 * if seg contains an ACK, but not for our SYN, drop the input.
873 * if seg contains a RST, then drop the connection.
874 * if seg does not contain SYN, then drop it.
875 * Otherwise this is an acceptable SYN segment
876 * initialize tp->rcv_nxt and tp->irs
877 * if seg contains ack then advance tp->snd_una
878 * if SYN has been acked change to ESTABLISHED else SYN_RCVD state
879 * arrange for segment to be acked (eventually)
880 * continue processing rest of data/controls, beginning with URG
881 */
882 case TCPS_SYN_SENT:
883 if ( (tiflags & TH_ACK)
884 && ( SEQ_LEQ(ti->ti_ack, tp->iss)
885 || SEQ_GT(ti->ti_ack, tp->snd_max)))
886 {
887 LogFlowFunc(("%d -> dropwithreset\n", __LINE__));
888 goto dropwithreset;
889 }
890
891 if (tiflags & TH_RST)
892 {
893 if (tiflags & TH_ACK)
894 tp = tcp_drop(pData, tp, 0); /* XXX Check t_softerror! */
895 LogFlowFunc(("%d -> drop\n", __LINE__));
896 goto drop;
897 }
898
899 if ((tiflags & TH_SYN) == 0)
900 {
901 LogFlowFunc(("%d -> drop\n", __LINE__));
902 goto drop;
903 }
904 if (tiflags & TH_ACK)
905 {
906 tp->snd_una = ti->ti_ack;
907 if (SEQ_LT(tp->snd_nxt, tp->snd_una))
908 tp->snd_nxt = tp->snd_una;
909 }
910
911 tp->t_timer[TCPT_REXMT] = 0;
912 tp->irs = ti->ti_seq;
913 tcp_rcvseqinit(tp);
914 tp->t_flags |= TF_ACKNOW;
915 if (tiflags & TH_ACK && SEQ_GT(tp->snd_una, tp->iss))
916 {
917 tcpstat.tcps_connects++;
918 soisfconnected(so);
919 TCP_STATE_SWITCH_TO(tp, TCPS_ESTABLISHED);
920
921 /* Do window scaling on this connection? */
922#if 0
923 if (( tp->t_flags & (TF_RCVD_SCALE|TF_REQ_SCALE))
924 == (TF_RCVD_SCALE|TF_REQ_SCALE))
925 {
926 tp->snd_scale = tp->requested_s_scale;
927 tp->rcv_scale = tp->request_r_scale;
928 }
929#endif
930 (void) tcp_reass(pData, tp, (struct tcphdr *)0, NULL, (struct mbuf *)0);
931 /*
932 * if we didn't have to retransmit the SYN,
933 * use its rtt as our initial srtt & rtt var.
934 */
935 if (tp->t_rtt)
936 tcp_xmit_timer(pData, tp, tp->t_rtt);
937 }
938 else
939 TCP_STATE_SWITCH_TO(tp, TCPS_SYN_RECEIVED);
940
941trimthenstep6:
942 LogFlowFunc(("trimthenstep6:\n"));
943 /*
944 * Advance ti->ti_seq to correspond to first data byte.
945 * If data, trim to stay within window,
946 * dropping FIN if necessary.
947 */
948 ti->ti_seq++;
949 if (ti->ti_len > tp->rcv_wnd)
950 {
951 todrop = ti->ti_len - tp->rcv_wnd;
952 m_adj(m, -todrop);
953 ti->ti_len = tp->rcv_wnd;
954 tiflags &= ~TH_FIN;
955 tcpstat.tcps_rcvpackafterwin++;
956 tcpstat.tcps_rcvbyteafterwin += todrop;
957 }
958 tp->snd_wl1 = ti->ti_seq - 1;
959 tp->rcv_up = ti->ti_seq;
960 LogFlowFunc(("%d -> step6\n", __LINE__));
961 goto step6;
962 } /* switch tp->t_state */
963 /*
964 * States other than LISTEN or SYN_SENT.
965 * First check timestamp, if present.
966 * Then check that at least some bytes of segment are within
967 * receive window. If segment begins before rcv_nxt,
968 * drop leading data (and SYN); if nothing left, just ack.
969 *
970 * RFC 1323 PAWS: If we have a timestamp reply on this segment
971 * and it's less than ts_recent, drop it.
972 */
973#if 0
974 if ( ts_present
975 && (tiflags & TH_RST) == 0
976 && tp->ts_recent
977 && TSTMP_LT(ts_val, tp->ts_recent))
978 {
979 /* Check to see if ts_recent is over 24 days old. */
980 if ((int)(tcp_now - tp->ts_recent_age) > TCP_PAWS_IDLE)
981 {
982 /*
983 * Invalidate ts_recent. If this segment updates
984 * ts_recent, the age will be reset later and ts_recent
985 * will get a valid value. If it does not, setting
986 * ts_recent to zero will at least satisfy the
987 * requirement that zero be placed in the timestamp
988 * echo reply when ts_recent isn't valid. The
989 * age isn't reset until we get a valid ts_recent
990 * because we don't want out-of-order segments to be
991 * dropped when ts_recent is old.
992 */
993 tp->ts_recent = 0;
994 }
995 else
996 {
997 tcpstat.tcps_rcvduppack++;
998 tcpstat.tcps_rcvdupbyte += ti->ti_len;
999 tcpstat.tcps_pawsdrop++;
1000 goto dropafterack;
1001 }
1002 }
1003#endif
1004
1005 todrop = tp->rcv_nxt - ti->ti_seq;
1006 if (todrop > 0)
1007 {
1008 if (tiflags & TH_SYN)
1009 {
1010 tiflags &= ~TH_SYN;
1011 ti->ti_seq++;
1012 if (ti->ti_urp > 1)
1013 ti->ti_urp--;
1014 else
1015 tiflags &= ~TH_URG;
1016 todrop--;
1017 }
1018 /*
1019 * Following if statement from Stevens, vol. 2, p. 960.
1020 */
1021 if ( todrop > ti->ti_len
1022 || ( todrop == ti->ti_len
1023 && (tiflags & TH_FIN) == 0))
1024 {
1025 /*
1026 * Any valid FIN must be to the left of the window.
1027 * At this point the FIN must be a duplicate or out
1028 * of sequence; drop it.
1029 */
1030 tiflags &= ~TH_FIN;
1031
1032 /*
1033 * Send an ACK to resynchronize and drop any data.
1034 * But keep on processing for RST or ACK.
1035 */
1036 tp->t_flags |= TF_ACKNOW;
1037 todrop = ti->ti_len;
1038 tcpstat.tcps_rcvduppack++;
1039 tcpstat.tcps_rcvdupbyte += todrop;
1040 }
1041 else
1042 {
1043 tcpstat.tcps_rcvpartduppack++;
1044 tcpstat.tcps_rcvpartdupbyte += todrop;
1045 }
1046 m_adj(m, todrop);
1047 ti->ti_seq += todrop;
1048 ti->ti_len -= todrop;
1049 if (ti->ti_urp > todrop)
1050 ti->ti_urp -= todrop;
1051 else
1052 {
1053 tiflags &= ~TH_URG;
1054 ti->ti_urp = 0;
1055 }
1056 }
1057 /*
1058 * If new data are received on a connection after the
1059 * user processes are gone, then RST the other end.
1060 */
1061 if ( (so->so_state & SS_NOFDREF)
1062 && tp->t_state > TCPS_CLOSE_WAIT && ti->ti_len)
1063 {
1064 tp = tcp_close(pData, tp);
1065 tcpstat.tcps_rcvafterclose++;
1066 LogFlowFunc(("%d -> dropwithreset\n", __LINE__));
1067 goto dropwithreset;
1068 }
1069
1070 /*
1071 * If segment ends after window, drop trailing data
1072 * (and PUSH and FIN); if nothing left, just ACK.
1073 */
1074 todrop = (ti->ti_seq+ti->ti_len) - (tp->rcv_nxt+tp->rcv_wnd);
1075 if (todrop > 0)
1076 {
1077 tcpstat.tcps_rcvpackafterwin++;
1078 if (todrop >= ti->ti_len)
1079 {
1080 tcpstat.tcps_rcvbyteafterwin += ti->ti_len;
1081 /*
1082 * If a new connection request is received
1083 * while in TIME_WAIT, drop the old connection
1084 * and start over if the sequence numbers
1085 * are above the previous ones.
1086 */
1087 if ( tiflags & TH_SYN
1088 && tp->t_state == TCPS_TIME_WAIT
1089 && SEQ_GT(ti->ti_seq, tp->rcv_nxt))
1090 {
1091 iss = tp->rcv_nxt + TCP_ISSINCR;
1092 tp = tcp_close(pData, tp);
1093 SOCKET_UNLOCK(tp->t_socket);
1094 LogFlowFunc(("%d -> findso\n", __LINE__));
1095 goto findso;
1096 }
1097 /*
1098 * If window is closed can only take segments at
1099 * window edge, and have to drop data and PUSH from
1100 * incoming segments. Continue processing, but
1101 * remember to ack. Otherwise, drop segment
1102 * and ack.
1103 */
1104 if (tp->rcv_wnd == 0 && ti->ti_seq == tp->rcv_nxt)
1105 {
1106 tp->t_flags |= TF_ACKNOW;
1107 tcpstat.tcps_rcvwinprobe++;
1108 }
1109 else
1110 {
1111 LogFlowFunc(("%d -> dropafterack\n", __LINE__));
1112 goto dropafterack;
1113 }
1114 }
1115 else
1116 tcpstat.tcps_rcvbyteafterwin += todrop;
1117 m_adj(m, -todrop);
1118 ti->ti_len -= todrop;
1119 tiflags &= ~(TH_PUSH|TH_FIN);
1120 }
1121
1122 /*
1123 * If last ACK falls within this segment's sequence numbers,
1124 * record its timestamp.
1125 */
1126#if 0
1127 if ( ts_present
1128 && SEQ_LEQ(ti->ti_seq, tp->last_ack_sent)
1129 && SEQ_LT(tp->last_ack_sent, ti->ti_seq + ti->ti_len + ((tiflags & (TH_SYN|TH_FIN)) != 0)))
1130 {
1131 tp->ts_recent_age = tcp_now;
1132 tp->ts_recent = ts_val;
1133 }
1134#endif
1135
1136 /*
1137 * If the RST bit is set examine the state:
1138 * SYN_RECEIVED STATE:
1139 * If passive open, return to LISTEN state.
1140 * If active open, inform user that connection was refused.
1141 * ESTABLISHED, FIN_WAIT_1, FIN_WAIT2, CLOSE_WAIT STATES:
1142 * Inform user that connection was reset, and close tcb.
1143 * CLOSING, LAST_ACK, TIME_WAIT STATES
1144 * Close the tcb.
1145 */
1146 if (tiflags&TH_RST)
1147 switch (tp->t_state)
1148 {
1149 case TCPS_SYN_RECEIVED:
1150/* so->so_error = ECONNREFUSED; */
1151 LogFlowFunc(("%d -> close\n", __LINE__));
1152 goto close;
1153
1154 case TCPS_ESTABLISHED:
1155 case TCPS_FIN_WAIT_1:
1156 case TCPS_FIN_WAIT_2:
1157 case TCPS_CLOSE_WAIT:
1158/* so->so_error = ECONNRESET; */
1159close:
1160 LogFlowFunc(("close:\n"));
1161 TCP_STATE_SWITCH_TO(tp, TCPS_CLOSED);
1162 tcpstat.tcps_drops++;
1163 tp = tcp_close(pData, tp);
1164 LogFlowFunc(("%d -> drop\n", __LINE__));
1165 goto drop;
1166
1167 case TCPS_CLOSING:
1168 case TCPS_LAST_ACK:
1169 case TCPS_TIME_WAIT:
1170 tp = tcp_close(pData, tp);
1171 LogFlowFunc(("%d -> drop\n", __LINE__));
1172 goto drop;
1173 }
1174
1175 /*
1176 * If a SYN is in the window, then this is an
1177 * error and we send an RST and drop the connection.
1178 */
1179 if (tiflags & TH_SYN)
1180 {
1181 tp = tcp_drop(pData, tp, 0);
1182 LogFlowFunc(("%d -> dropwithreset\n", __LINE__));
1183 goto dropwithreset;
1184 }
1185
1186 /*
1187 * If the ACK bit is off we drop the segment and return.
1188 */
1189 if ((tiflags & TH_ACK) == 0)
1190 {
1191 LogFlowFunc(("%d -> drop\n", __LINE__));
1192 goto drop;
1193 }
1194
1195 /*
1196 * Ack processing.
1197 */
1198 switch (tp->t_state)
1199 {
1200 /*
1201 * In SYN_RECEIVED state if the ack ACKs our SYN then enter
1202 * ESTABLISHED state and continue processing, otherwise
1203 * send an RST. una<=ack<=max
1204 */
1205 case TCPS_SYN_RECEIVED:
1206 LogFlowFunc(("%d -> TCPS_SYN_RECEIVED\n", __LINE__));
1207 if ( SEQ_GT(tp->snd_una, ti->ti_ack)
1208 || SEQ_GT(ti->ti_ack, tp->snd_max))
1209 goto dropwithreset;
1210 tcpstat.tcps_connects++;
1211 TCP_STATE_SWITCH_TO(tp, TCPS_ESTABLISHED);
1212 /*
1213 * The sent SYN is ack'ed with our sequence number +1
1214 * The first data byte already in the buffer will get
1215 * lost if no correction is made. This is only needed for
1216 * SS_CTL since the buffer is empty otherwise.
1217 * tp->snd_una++; or:
1218 */
1219 tp->snd_una = ti->ti_ack;
1220 soisfconnected(so);
1221
1222 /* Do window scaling? */
1223#if 0
1224 if ( (tp->t_flags & (TF_RCVD_SCALE|TF_REQ_SCALE))
1225 == (TF_RCVD_SCALE|TF_REQ_SCALE))
1226 {
1227 tp->snd_scale = tp->requested_s_scale;
1228 tp->rcv_scale = tp->request_r_scale;
1229 }
1230#endif
1231 (void) tcp_reass(pData, tp, (struct tcphdr *)0, (int *)0, (struct mbuf *)0);
1232 tp->snd_wl1 = ti->ti_seq - 1;
1233 /* Avoid ack processing; snd_una==ti_ack => dup ack */
1234 LogFlowFunc(("%d -> synrx_to_est\n", __LINE__));
1235 goto synrx_to_est;
1236 /* fall into ... */
1237
1238 /*
1239 * In ESTABLISHED state: drop duplicate ACKs; ACK out of range
1240 * ACKs. If the ack is in the range
1241 * tp->snd_una < ti->ti_ack <= tp->snd_max
1242 * then advance tp->snd_una to ti->ti_ack and drop
1243 * data from the retransmission queue. If this ACK reflects
1244 * more up to date window information we update our window information.
1245 */
1246 case TCPS_ESTABLISHED:
1247 case TCPS_FIN_WAIT_1:
1248 case TCPS_FIN_WAIT_2:
1249 case TCPS_CLOSE_WAIT:
1250 case TCPS_CLOSING:
1251 case TCPS_LAST_ACK:
1252 case TCPS_TIME_WAIT:
1253 LogFlowFunc(("%d -> TCPS_ESTABLISHED|TCPS_FIN_WAIT_1|TCPS_FIN_WAIT_2|TCPS_CLOSE_WAIT|"
1254 "TCPS_CLOSING|TCPS_LAST_ACK|TCPS_TIME_WAIT\n", __LINE__));
1255 if (SEQ_LEQ(ti->ti_ack, tp->snd_una))
1256 {
1257 if (ti->ti_len == 0 && tiwin == tp->snd_wnd)
1258 {
1259 tcpstat.tcps_rcvdupack++;
1260 Log2((" dup ack m = %lx, so = %lx\n", (long)m, (long)so));
1261 /*
1262 * If we have outstanding data (other than
1263 * a window probe), this is a completely
1264 * duplicate ack (ie, window info didn't
1265 * change), the ack is the biggest we've
1266 * seen and we've seen exactly our rexmt
1267 * threshold of them, assume a packet
1268 * has been dropped and retransmit it.
1269 * Kludge snd_nxt & the congestion
1270 * window so we send only this one
1271 * packet.
1272 *
1273 * We know we're losing at the current
1274 * window size so do congestion avoidance
1275 * (set ssthresh to half the current window
1276 * and pull our congestion window back to
1277 * the new ssthresh).
1278 *
1279 * Dup acks mean that packets have left the
1280 * network (they're now cached at the receiver)
1281 * so bump cwnd by the amount in the receiver
1282 * to keep a constant cwnd packets in the
1283 * network.
1284 */
1285 if ( tp->t_timer[TCPT_REXMT] == 0
1286 || ti->ti_ack != tp->snd_una)
1287 tp->t_dupacks = 0;
1288 else if (++tp->t_dupacks == tcprexmtthresh)
1289 {
1290 tcp_seq onxt = tp->snd_nxt;
1291 u_int win = min(tp->snd_wnd, tp->snd_cwnd) / 2 / tp->t_maxseg;
1292 if (win < 2)
1293 win = 2;
1294 tp->snd_ssthresh = win * tp->t_maxseg;
1295 tp->t_timer[TCPT_REXMT] = 0;
1296 tp->t_rtt = 0;
1297 tp->snd_nxt = ti->ti_ack;
1298 tp->snd_cwnd = tp->t_maxseg;
1299 (void) tcp_output(pData, tp);
1300 tp->snd_cwnd = tp->snd_ssthresh +
1301 tp->t_maxseg * tp->t_dupacks;
1302 if (SEQ_GT(onxt, tp->snd_nxt))
1303 tp->snd_nxt = onxt;
1304 LogFlowFunc(("%d -> drop\n", __LINE__));
1305 goto drop;
1306 }
1307 else if (tp->t_dupacks > tcprexmtthresh)
1308 {
1309 tp->snd_cwnd += tp->t_maxseg;
1310 (void) tcp_output(pData, tp);
1311 LogFlowFunc(("%d -> drop\n", __LINE__));
1312 goto drop;
1313 }
1314 }
1315 else
1316 tp->t_dupacks = 0;
1317 break;
1318 }
1319synrx_to_est:
1320 LogFlowFunc(("synrx_to_est:\n"));
1321 /*
1322 * If the congestion window was inflated to account
1323 * for the other side's cached packets, retract it.
1324 */
1325 if ( tp->t_dupacks > tcprexmtthresh
1326 && tp->snd_cwnd > tp->snd_ssthresh)
1327 tp->snd_cwnd = tp->snd_ssthresh;
1328 tp->t_dupacks = 0;
1329 if (SEQ_GT(ti->ti_ack, tp->snd_max))
1330 {
1331 tcpstat.tcps_rcvacktoomuch++;
1332 LogFlowFunc(("%d -> dropafterack\n", __LINE__));
1333 goto dropafterack;
1334 }
1335 acked = ti->ti_ack - tp->snd_una;
1336 tcpstat.tcps_rcvackpack++;
1337 tcpstat.tcps_rcvackbyte += acked;
1338
1339 /*
1340 * If we have a timestamp reply, update smoothed
1341 * round trip time. If no timestamp is present but
1342 * transmit timer is running and timed sequence
1343 * number was acked, update smoothed round trip time.
1344 * Since we now have an rtt measurement, cancel the
1345 * timer backoff (cf., Phil Karn's retransmit alg.).
1346 * Recompute the initial retransmit timer.
1347 */
1348#if 0
1349 if (ts_present)
1350 tcp_xmit_timer(tp, tcp_now-ts_ecr+1);
1351 else
1352#endif
1353 if (tp->t_rtt && SEQ_GT(ti->ti_ack, tp->t_rtseq))
1354 tcp_xmit_timer(pData, tp, tp->t_rtt);
1355
1356 /*
1357 * If all outstanding data is acked, stop retransmit
1358 * timer and remember to restart (more output or persist).
1359 * If there is more data to be acked, restart retransmit
1360 * timer, using current (possibly backed-off) value.
1361 */
1362 if (ti->ti_ack == tp->snd_max)
1363 {
1364 tp->t_timer[TCPT_REXMT] = 0;
1365 needoutput = 1;
1366 }
1367 else if (tp->t_timer[TCPT_PERSIST] == 0)
1368 tp->t_timer[TCPT_REXMT] = tp->t_rxtcur;
1369 /*
1370 * When new data is acked, open the congestion window.
1371 * If the window gives us less than ssthresh packets
1372 * in flight, open exponentially (maxseg per packet).
1373 * Otherwise open linearly: maxseg per window
1374 * (maxseg^2 / cwnd per packet).
1375 */
1376 {
1377 register u_int cw = tp->snd_cwnd;
1378 register u_int incr = tp->t_maxseg;
1379
1380 if (cw > tp->snd_ssthresh)
1381 incr = incr * incr / cw;
1382 tp->snd_cwnd = min(cw + incr, TCP_MAXWIN<<tp->snd_scale);
1383 }
1384 if (acked > SBUF_LEN(&so->so_snd))
1385 {
1386 tp->snd_wnd -= SBUF_LEN(&so->so_snd);
1387 sbdrop(&so->so_snd, (int)so->so_snd.sb_cc);
1388 ourfinisacked = 1;
1389 }
1390 else
1391 {
1392 sbdrop(&so->so_snd, acked);
1393 tp->snd_wnd -= acked;
1394 ourfinisacked = 0;
1395 }
1396 /*
1397 * XXX sowwakup is called when data is acked and there's room for
1398 * for more data... it should read() the socket
1399 */
1400#if 0
1401 if (so->so_snd.sb_flags & SB_NOTIFY)
1402 sowwakeup(so);
1403#endif
1404 tp->snd_una = ti->ti_ack;
1405 if (SEQ_LT(tp->snd_nxt, tp->snd_una))
1406 tp->snd_nxt = tp->snd_una;
1407
1408 switch (tp->t_state)
1409 {
1410 /*
1411 * In FIN_WAIT_1 STATE in addition to the processing
1412 * for the ESTABLISHED state if our FIN is now acknowledged
1413 * then enter FIN_WAIT_2.
1414 */
1415 case TCPS_FIN_WAIT_1:
1416 if (ourfinisacked)
1417 {
1418 /*
1419 * If we can't receive any more
1420 * data, then closing user can proceed.
1421 * Starting the timer is contrary to the
1422 * specification, but if we don't get a FIN
1423 * we'll hang forever.
1424 */
1425 if (so->so_state & SS_FCANTRCVMORE)
1426 {
1427 soisfdisconnected(so);
1428 tp->t_timer[TCPT_2MSL] = tcp_maxidle;
1429 }
1430 TCP_STATE_SWITCH_TO(tp, TCPS_FIN_WAIT_2);
1431 }
1432 break;
1433
1434 /*
1435 * In CLOSING STATE in addition to the processing for
1436 * the ESTABLISHED state if the ACK acknowledges our FIN
1437 * then enter the TIME-WAIT state, otherwise ignore
1438 * the segment.
1439 */
1440 case TCPS_CLOSING:
1441 if (ourfinisacked)
1442 {
1443 TCP_STATE_SWITCH_TO(tp, TCPS_TIME_WAIT);
1444 tcp_canceltimers(tp);
1445 tp->t_timer[TCPT_2MSL] = 2 * TCPTV_MSL;
1446 soisfdisconnected(so);
1447 }
1448 break;
1449
1450 /*
1451 * In LAST_ACK, we may still be waiting for data to drain
1452 * and/or to be acked, as well as for the ack of our FIN.
1453 * If our FIN is now acknowledged, delete the TCB,
1454 * enter the closed state and return.
1455 */
1456 case TCPS_LAST_ACK:
1457 if (ourfinisacked)
1458 {
1459 tp = tcp_close(pData, tp);
1460 LogFlowFunc(("%d -> drop\n", __LINE__));
1461 goto drop;
1462 }
1463 break;
1464
1465 /*
1466 * In TIME_WAIT state the only thing that should arrive
1467 * is a retransmission of the remote FIN. Acknowledge
1468 * it and restart the finack timer.
1469 */
1470 case TCPS_TIME_WAIT:
1471 tp->t_timer[TCPT_2MSL] = 2 * TCPTV_MSL;
1472 LogFlowFunc(("%d -> dropafterack\n", __LINE__));
1473 goto dropafterack;
1474 }
1475 } /* switch(tp->t_state) */
1476
1477step6:
1478 LogFlowFunc(("step6:\n"));
1479 /*
1480 * Update window information.
1481 * Don't look at window if no ACK: TAC's send garbage on first SYN.
1482 */
1483 if ( (tiflags & TH_ACK)
1484 && ( SEQ_LT(tp->snd_wl1, ti->ti_seq)
1485 || ( tp->snd_wl1 == ti->ti_seq
1486 && ( SEQ_LT(tp->snd_wl2, ti->ti_ack)
1487 || ( tp->snd_wl2 == ti->ti_ack
1488 && tiwin > tp->snd_wnd)))))
1489 {
1490 /* keep track of pure window updates */
1491 if ( ti->ti_len == 0
1492 && tp->snd_wl2 == ti->ti_ack
1493 && tiwin > tp->snd_wnd)
1494 tcpstat.tcps_rcvwinupd++;
1495 tp->snd_wnd = tiwin;
1496 tp->snd_wl1 = ti->ti_seq;
1497 tp->snd_wl2 = ti->ti_ack;
1498 if (tp->snd_wnd > tp->max_sndwnd)
1499 tp->max_sndwnd = tp->snd_wnd;
1500 needoutput = 1;
1501 }
1502
1503 /*
1504 * Process segments with URG.
1505 */
1506 if ((tiflags & TH_URG) && ti->ti_urp &&
1507 TCPS_HAVERCVDFIN(tp->t_state) == 0)
1508 {
1509 /*
1510 * This is a kludge, but if we receive and accept
1511 * random urgent pointers, we'll crash in
1512 * soreceive. It's hard to imagine someone
1513 * actually wanting to send this much urgent data.
1514 */
1515 if (ti->ti_urp + so->so_rcv.sb_cc > so->so_rcv.sb_datalen)
1516 {
1517 ti->ti_urp = 0;
1518 tiflags &= ~TH_URG;
1519 LogFlowFunc(("%d -> dodata\n", __LINE__));
1520 goto dodata;
1521 }
1522
1523 /*
1524 * If this segment advances the known urgent pointer,
1525 * then mark the data stream. This should not happen
1526 * in CLOSE_WAIT, CLOSING, LAST_ACK or TIME_WAIT STATES since
1527 * a FIN has been received from the remote side.
1528 * In these states we ignore the URG.
1529 *
1530 * According to RFC961 (Assigned Protocols),
1531 * the urgent pointer points to the last octet
1532 * of urgent data. We continue, however,
1533 * to consider it to indicate the first octet
1534 * of data past the urgent section as the original
1535 * spec states (in one of two places).
1536 */
1537 if (SEQ_GT(ti->ti_seq+ti->ti_urp, tp->rcv_up))
1538 {
1539 tp->rcv_up = ti->ti_seq + ti->ti_urp;
1540 so->so_urgc = SBUF_LEN(&so->so_rcv) +
1541 (tp->rcv_up - tp->rcv_nxt); /* -1; */
1542 tp->rcv_up = ti->ti_seq + ti->ti_urp;
1543 }
1544 }
1545 else
1546 /*
1547 * If no out of band data is expected,
1548 * pull receive urgent pointer along
1549 * with the receive window.
1550 */
1551 if (SEQ_GT(tp->rcv_nxt, tp->rcv_up))
1552 tp->rcv_up = tp->rcv_nxt;
1553dodata:
1554 LogFlowFunc(("dodata:\n"));
1555
1556 /*
1557 * If this is a small packet, then ACK now - with Nagel
1558 * congestion avoidance sender won't send more until
1559 * he gets an ACK.
1560 *
1561 * XXX: In case you wonder... The magic "27" below is ESC that
1562 * presumably starts a terminal escape-sequence and that we want
1563 * to ACK ASAP. [Original slirp code had three different
1564 * heuristics to chose from here and in the header prediction case
1565 * above, but the commented out alternatives were lost and the
1566 * header prediction case that had an expanded comment about this
1567 * has been modified to always send an ACK].
1568 */
1569 if ( ti->ti_len
1570 && (unsigned)ti->ti_len <= 5
1571 && ((struct tcpiphdr_2 *)ti)->first_char == (char)27)
1572 {
1573 tp->t_flags |= TF_ACKNOW;
1574 }
1575
1576 /*
1577 * Process the segment text, merging it into the TCP sequencing queue,
1578 * and arranging for acknowledgment of receipt if necessary.
1579 * This process logically involves adjusting tp->rcv_wnd as data
1580 * is presented to the user (this happens in tcp_usrreq.c,
1581 * case PRU_RCVD). If a FIN has already been received on this
1582 * connection then we just ignore the text.
1583 */
1584 if ( (ti->ti_len || (tiflags&TH_FIN))
1585 && TCPS_HAVERCVDFIN(tp->t_state) == 0)
1586 {
1587 if ( ti->ti_seq == tp->rcv_nxt
1588 && LIST_EMPTY(&tp->t_segq)
1589 && tp->t_state == TCPS_ESTABLISHED)
1590 {
1591 DELAY_ACK(tp, ti); /* little bit different from BSD declaration see netinet/tcp_input.c */
1592 tp->rcv_nxt += tlen;
1593 tiflags = ti->ti_t.th_flags & TH_FIN;
1594 tcpstat.tcps_rcvpack++;
1595 tcpstat.tcps_rcvbyte += tlen;
1596 if (so->so_state & SS_FCANTRCVMORE)
1597 m_freem(pData, m);
1598 else
1599 sbappend(pData, so, m);
1600 }
1601 else
1602 {
1603 tiflags = tcp_reass(pData, tp, &ti->ti_t, &tlen, m);
1604 tp->t_flags |= TF_ACKNOW;
1605 }
1606 /*
1607 * Note the amount of data that peer has sent into
1608 * our window, in order to estimate the sender's
1609 * buffer size.
1610 */
1611 len = SBUF_SIZE(&so->so_rcv) - (tp->rcv_adv - tp->rcv_nxt);
1612 }
1613 else
1614 {
1615 m_freem(pData, m);
1616 tiflags &= ~TH_FIN;
1617 }
1618
1619 /*
1620 * If FIN is received ACK the FIN and let the user know
1621 * that the connection is closing.
1622 */
1623 if (tiflags & TH_FIN)
1624 {
1625 if (TCPS_HAVERCVDFIN(tp->t_state) == 0)
1626 {
1627 /*
1628 * If we receive a FIN we can't send more data,
1629 * set it SS_FDRAIN
1630 * Shutdown the socket if there is no rx data in the
1631 * buffer.
1632 * soread() is called on completion of shutdown() and
1633 * will got to TCPS_LAST_ACK, and use tcp_output()
1634 * to send the FIN.
1635 */
1636/* sofcantrcvmore(so); */
1637 sofwdrain(so);
1638
1639 tp->t_flags |= TF_ACKNOW;
1640 tp->rcv_nxt++;
1641 }
1642 switch (tp->t_state)
1643 {
1644 /*
1645 * In SYN_RECEIVED and ESTABLISHED STATES
1646 * enter the CLOSE_WAIT state.
1647 */
1648 case TCPS_SYN_RECEIVED:
1649 case TCPS_ESTABLISHED:
1650 TCP_STATE_SWITCH_TO(tp, TCPS_CLOSE_WAIT);
1651 break;
1652
1653 /*
1654 * If still in FIN_WAIT_1 STATE FIN has not been acked so
1655 * enter the CLOSING state.
1656 */
1657 case TCPS_FIN_WAIT_1:
1658 TCP_STATE_SWITCH_TO(tp, TCPS_CLOSING);
1659 break;
1660
1661 /*
1662 * In FIN_WAIT_2 state enter the TIME_WAIT state,
1663 * starting the time-wait timer, turning off the other
1664 * standard timers.
1665 */
1666 case TCPS_FIN_WAIT_2:
1667 TCP_STATE_SWITCH_TO(tp, TCPS_TIME_WAIT);
1668 tcp_canceltimers(tp);
1669 tp->t_timer[TCPT_2MSL] = 2 * TCPTV_MSL;
1670 soisfdisconnected(so);
1671 break;
1672
1673 /*
1674 * In TIME_WAIT state restart the 2 MSL time_wait timer.
1675 */
1676 case TCPS_TIME_WAIT:
1677 tp->t_timer[TCPT_2MSL] = 2 * TCPTV_MSL;
1678 break;
1679 }
1680 }
1681
1682 /*
1683 * Return any desired output.
1684 */
1685 if (needoutput || (tp->t_flags & TF_ACKNOW))
1686 tcp_output(pData, tp);
1687
1688 SOCKET_UNLOCK(so);
1689 STAM_PROFILE_STOP(&pData->StatTCP_input, counter_input);
1690 LogFlowFuncLeave();
1691 return;
1692
1693dropafterack:
1694 LogFlowFunc(("dropafterack:\n"));
1695 /*
1696 * Generate an ACK dropping incoming segment if it occupies
1697 * sequence space, where the ACK reflects our state.
1698 */
1699 if (tiflags & TH_RST)
1700 {
1701 LogFlowFunc(("%d -> drop\n", __LINE__));
1702 goto drop;
1703 }
1704 m_freem(pData, m);
1705 tp->t_flags |= TF_ACKNOW;
1706 (void) tcp_output(pData, tp);
1707 SOCKET_UNLOCK(so);
1708 STAM_PROFILE_STOP(&pData->StatTCP_input, counter_input);
1709 LogFlowFuncLeave();
1710 return;
1711
1712dropwithreset:
1713 LogFlowFunc(("dropwithreset:\n"));
1714 /* reuses m if m!=NULL, m_free() unnecessary */
1715 if (tiflags & TH_ACK)
1716 tcp_respond(pData, tp, ti, m, (tcp_seq)0, ti->ti_ack, TH_RST);
1717 else
1718 {
1719 if (tiflags & TH_SYN)
1720 ti->ti_len++;
1721 tcp_respond(pData, tp, ti, m, ti->ti_seq+ti->ti_len, (tcp_seq)0,
1722 TH_RST|TH_ACK);
1723 }
1724
1725 if (so != &tcb)
1726 SOCKET_UNLOCK(so);
1727 STAM_PROFILE_STOP(&pData->StatTCP_input, counter_input);
1728 LogFlowFuncLeave();
1729 return;
1730
1731drop:
1732 LogFlowFunc(("drop:\n"));
1733 /*
1734 * Drop space held by incoming segment and return.
1735 */
1736 m_freem(pData, m);
1737
1738#ifdef VBOX_WITH_SLIRP_MT
1739 if (RTCritSectIsOwned(&so->so_mutex))
1740 {
1741 SOCKET_UNLOCK(so);
1742 }
1743#endif
1744
1745 STAM_PROFILE_STOP(&pData->StatTCP_input, counter_input);
1746 LogFlowFuncLeave();
1747 return;
1748}
1749
1750
1751void
1752tcp_fconnect_failed(PNATState pData, struct socket *so, int sockerr)
1753{
1754 struct tcpcb *tp;
1755 int code;
1756
1757 Log2(("NAT: connect error %d %R[natsock]\n", sockerr, so));
1758
1759 Assert(so->so_state & SS_ISFCONNECTING);
1760 so->so_state = SS_NOFDREF;
1761
1762 if (sockerr == ECONNREFUSED || sockerr == ECONNRESET)
1763 {
1764 /* hand off to tcp_input():cont_conn to send RST */
1765 TCP_INPUT(pData, NULL, 0, so);
1766 return;
1767 }
1768
1769 tp = sototcpcb(so);
1770 if (RT_UNLIKELY(tp == NULL)) /* should never happen */
1771 {
1772 LogRel(("NAT: tp == NULL %R[natsock]\n", so));
1773 sofree(pData, so);
1774 return;
1775 }
1776
1777 if (sockerr == ENETUNREACH || sockerr == ENETDOWN)
1778 code = ICMP_UNREACH_NET;
1779 else if (sockerr == EHOSTUNREACH || sockerr == EHOSTDOWN)
1780 code = ICMP_UNREACH_HOST;
1781 else
1782 code = -1;
1783
1784 if (code >= 0)
1785 {
1786 struct ip *oip;
1787 unsigned ohdrlen;
1788 struct mbuf *m;
1789
1790 if (RT_UNLIKELY(so->so_ohdr == NULL))
1791 goto out;
1792
1793 oip = (struct ip *)so->so_ohdr;
1794 ohdrlen = oip->ip_hl * 4 + 8;
1795
1796 m = m_gethdr(pData, M_NOWAIT, MT_HEADER);
1797 if (RT_UNLIKELY(m == NULL))
1798 goto out;
1799
1800 m_copyback(pData, m, 0, ohdrlen, (caddr_t)so->so_ohdr);
1801 m->m_pkthdr.header = mtod(m, void *);
1802
1803 icmp_error(pData, m, ICMP_UNREACH, code, 0, NULL);
1804 }
1805
1806 out:
1807 tcp_close(pData, tp);
1808}
1809
1810
1811void
1812tcp_dooptions(PNATState pData, struct tcpcb *tp, u_char *cp, int cnt, struct tcpiphdr *ti)
1813{
1814 u_int16_t mss;
1815 int opt, optlen;
1816
1817 LogFlowFunc(("tcp_dooptions: tp = %R[tcpcb793], cnt=%i\n", tp, cnt));
1818
1819 for (; cnt > 0; cnt -= optlen, cp += optlen)
1820 {
1821 opt = cp[0];
1822 if (opt == TCPOPT_EOL)
1823 break;
1824 if (opt == TCPOPT_NOP)
1825 optlen = 1;
1826 else
1827 {
1828 optlen = cp[1];
1829 if (optlen <= 0)
1830 break;
1831 }
1832 switch (opt)
1833 {
1834 default:
1835 continue;
1836
1837 case TCPOPT_MAXSEG:
1838 if (optlen != TCPOLEN_MAXSEG)
1839 continue;
1840 if (!(ti->ti_flags & TH_SYN))
1841 continue;
1842 memcpy((char *) &mss, (char *) cp + 2, sizeof(mss));
1843 NTOHS(mss);
1844 (void) tcp_mss(pData, tp, mss); /* sets t_maxseg */
1845 break;
1846
1847#if 0
1848 case TCPOPT_WINDOW:
1849 if (optlen != TCPOLEN_WINDOW)
1850 continue;
1851 if (!(ti->ti_flags & TH_SYN))
1852 continue;
1853 tp->t_flags |= TF_RCVD_SCALE;
1854 tp->requested_s_scale = min(cp[2], TCP_MAX_WINSHIFT);
1855 break;
1856
1857 case TCPOPT_TIMESTAMP:
1858 if (optlen != TCPOLEN_TIMESTAMP)
1859 continue;
1860 *ts_present = 1;
1861 memcpy((char *) ts_val, (char *)cp + 2, sizeof(*ts_val));
1862 NTOHL(*ts_val);
1863 memcpy((char *) ts_ecr, (char *)cp + 6, sizeof(*ts_ecr));
1864 NTOHL(*ts_ecr);
1865
1866 /*
1867 * A timestamp received in a SYN makes
1868 * it ok to send timestamp requests and replies.
1869 */
1870 if (ti->ti_flags & TH_SYN)
1871 {
1872 tp->t_flags |= TF_RCVD_TSTMP;
1873 tp->ts_recent = *ts_val;
1874 tp->ts_recent_age = tcp_now;
1875 }
1876 break;
1877#endif
1878 }
1879 }
1880}
1881
1882
1883/*
1884 * Pull out of band byte out of a segment so
1885 * it doesn't appear in the user's data queue.
1886 * It is still reflected in the segment length for
1887 * sequencing purposes.
1888 */
1889
1890#if 0
1891void
1892tcp_pulloutofband(struct socket *so, struct tcpiphdr *ti, struct mbuf *m)
1893{
1894 int cnt = ti->ti_urp - 1;
1895
1896 while (cnt >= 0)
1897 {
1898 if (m->m_len > cnt)
1899 {
1900 char *cp = mtod(m, caddr_t) + cnt;
1901 struct tcpcb *tp = sototcpcb(so);
1902
1903 tp->t_iobc = *cp;
1904 tp->t_oobflags |= TCPOOB_HAVEDATA;
1905 memcpy(sp, cp+1, (unsigned)(m->m_len - cnt - 1));
1906 m->m_len--;
1907 return;
1908 }
1909 cnt -= m->m_len;
1910 m = m->m_next; /* XXX WRONG! Fix it! */
1911 if (m == 0)
1912 break;
1913 }
1914 panic("tcp_pulloutofband");
1915}
1916#endif
1917
1918/*
1919 * Collect new round-trip time estimate
1920 * and update averages and current timeout.
1921 */
1922
1923void
1924tcp_xmit_timer(PNATState pData, register struct tcpcb *tp, int rtt)
1925{
1926 register short delta;
1927
1928 LogFlowFunc(("ENTER: tcp_xmit_timer: tp = %R[tcpcb793] rtt = %d\n", tp, rtt));
1929
1930 tcpstat.tcps_rttupdated++;
1931 if (tp->t_srtt != 0)
1932 {
1933 /*
1934 * srtt is stored as fixed point with 3 bits after the
1935 * binary point (i.e., scaled by 8). The following magic
1936 * is equivalent to the smoothing algorithm in rfc793 with
1937 * an alpha of .875 (srtt = rtt/8 + srtt*7/8 in fixed
1938 * point). Adjust rtt to origin 0.
1939 */
1940 delta = rtt - 1 - (tp->t_srtt >> TCP_RTT_SHIFT);
1941 if ((tp->t_srtt += delta) <= 0)
1942 tp->t_srtt = 1;
1943 /*
1944 * We accumulate a smoothed rtt variance (actually, a
1945 * smoothed mean difference), then set the retransmit
1946 * timer to smoothed rtt + 4 times the smoothed variance.
1947 * rttvar is stored as fixed point with 2 bits after the
1948 * binary point (scaled by 4). The following is
1949 * equivalent to rfc793 smoothing with an alpha of .75
1950 * (rttvar = rttvar*3/4 + |delta| / 4). This replaces
1951 * rfc793's wired-in beta.
1952 */
1953 if (delta < 0)
1954 delta = -delta;
1955 delta -= (tp->t_rttvar >> TCP_RTTVAR_SHIFT);
1956 if ((tp->t_rttvar += delta) <= 0)
1957 tp->t_rttvar = 1;
1958 }
1959 else
1960 {
1961 /*
1962 * No rtt measurement yet - use the unsmoothed rtt.
1963 * Set the variance to half the rtt (so our first
1964 * retransmit happens at 3*rtt).
1965 */
1966 tp->t_srtt = rtt << TCP_RTT_SHIFT;
1967 tp->t_rttvar = rtt << (TCP_RTTVAR_SHIFT - 1);
1968 }
1969 tp->t_rtt = 0;
1970 tp->t_rxtshift = 0;
1971
1972 /*
1973 * the retransmit should happen at rtt + 4 * rttvar.
1974 * Because of the way we do the smoothing, srtt and rttvar
1975 * will each average +1/2 tick of bias. When we compute
1976 * the retransmit timer, we want 1/2 tick of rounding and
1977 * 1 extra tick because of +-1/2 tick uncertainty in the
1978 * firing of the timer. The bias will give us exactly the
1979 * 1.5 tick we need. But, because the bias is
1980 * statistical, we have to test that we don't drop below
1981 * the minimum feasible timer (which is 2 ticks).
1982 */
1983 TCPT_RANGESET(tp->t_rxtcur, TCP_REXMTVAL(tp),
1984 (short)tp->t_rttmin, TCPTV_REXMTMAX); /* XXX */
1985
1986 /*
1987 * We received an ack for a packet that wasn't retransmitted;
1988 * it is probably safe to discard any error indications we've
1989 * received recently. This isn't quite right, but close enough
1990 * for now (a route might have failed after we sent a segment,
1991 * and the return path might not be symmetrical).
1992 */
1993 tp->t_softerror = 0;
1994}
1995
1996/*
1997 * Determine a reasonable value for maxseg size.
1998 * If the route is known, check route for mtu.
1999 * If none, use an mss that can be handled on the outgoing
2000 * interface without forcing IP to fragment; if bigger than
2001 * an mbuf cluster (MCLBYTES), round down to nearest multiple of MCLBYTES
2002 * to utilize large mbufs. If no route is found, route has no mtu,
2003 * or the destination isn't local, use a default, hopefully conservative
2004 * size (usually 512 or the default IP max size, but no more than the mtu
2005 * of the interface), as we can't discover anything about intervening
2006 * gateways or networks. We also initialize the congestion/slow start
2007 * window to be a single segment if the destination isn't local.
2008 * While looking at the routing entry, we also initialize other path-dependent
2009 * parameters from pre-set or cached values in the routing entry.
2010 */
2011
2012int
2013tcp_mss(PNATState pData, register struct tcpcb *tp, u_int offer)
2014{
2015 struct socket *so = tp->t_socket;
2016 int mss;
2017
2018 LogFlowFunc(("ENTER: tcp_mss: tp = %R[tcpcb793], offer = %d\n", tp, offer));
2019
2020 mss = min(if_mtu, if_mru) - sizeof(struct tcpiphdr);
2021 if (offer)
2022 mss = min(mss, offer);
2023 mss = max(mss, 32);
2024 if (mss < tp->t_maxseg || offer != 0)
2025 tp->t_maxseg = mss;
2026
2027 tp->snd_cwnd = mss;
2028
2029 sbreserve(pData, &so->so_snd, tcp_sndspace+((tcp_sndspace%mss)?(mss-(tcp_sndspace%mss)):0));
2030 sbreserve(pData, &so->so_rcv, tcp_rcvspace+((tcp_rcvspace%mss)?(mss-(tcp_rcvspace%mss)):0));
2031
2032 Log2((" returning mss = %d\n", mss));
2033
2034 return mss;
2035}
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette