VirtualBox

source: vbox/trunk/src/VBox/Devices/Network/slirp/ip_input.c@ 57358

Last change on this file since 57358 was 56960, checked in by vboxsync, 9 years ago

NAT: adjust IP checksum after decrementing TTL so that if we use this
IP header in a future ICMP error the checksum is correct.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 17.3 KB
Line 
1/* $Id: ip_input.c 56960 2015-07-16 23:30:18Z vboxsync $ */
2/** @file
3 * NAT - IP input.
4 */
5
6/*
7 * Copyright (C) 2006-2015 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18/*
19 * This code is based on:
20 *
21 * Copyright (c) 1982, 1986, 1988, 1993
22 * The Regents of the University of California. All rights reserved.
23 *
24 * Redistribution and use in source and binary forms, with or without
25 * modification, are permitted provided that the following conditions
26 * are met:
27 * 1. Redistributions of source code must retain the above copyright
28 * notice, this list of conditions and the following disclaimer.
29 * 2. Redistributions in binary form must reproduce the above copyright
30 * notice, this list of conditions and the following disclaimer in the
31 * documentation and/or other materials provided with the distribution.
32 * 3. All advertising materials mentioning features or use of this software
33 * must display the following acknowledgement:
34 * This product includes software developed by the University of
35 * California, Berkeley and its contributors.
36 * 4. Neither the name of the University nor the names of its contributors
37 * may be used to endorse or promote products derived from this software
38 * without specific prior written permission.
39 *
40 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
41 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
42 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
43 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
44 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
45 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
46 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
47 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
48 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
49 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
50 * SUCH DAMAGE.
51 *
52 * @(#)ip_input.c 8.2 (Berkeley) 1/4/94
53 * ip_input.c,v 1.11 1994/11/16 10:17:08 jkh Exp
54 */
55
56/*
57 * Changes and additions relating to SLiRP are
58 * Copyright (c) 1995 Danny Gasparovski.
59 *
60 * Please read the file COPYRIGHT for the
61 * terms and conditions of the copyright.
62 */
63
64#include <slirp.h>
65#include "ip_icmp.h"
66#include "alias.h"
67
68
69/*
70 * IP initialization: fill in IP protocol switch table.
71 * All protocols not implemented in kernel go to raw IP protocol handler.
72 */
73void
74ip_init(PNATState pData)
75{
76 int i = 0;
77 for (i = 0; i < IPREASS_NHASH; ++i)
78 TAILQ_INIT(&ipq[i]);
79 maxnipq = 100; /* ??? */
80 maxfragsperpacket = 16;
81 nipq = 0;
82 ip_currid = tt.tv_sec & 0xffff;
83 udp_init(pData);
84 tcp_init(pData);
85}
86
87/*
88 * Ip input routine. Checksum and byte swap header. If fragmented
89 * try to reassemble. Process options. Pass to next level.
90 */
91void
92ip_input(PNATState pData, struct mbuf *m)
93{
94 register struct ip *ip;
95 int hlen = 0;
96 int mlen = 0;
97
98 STAM_PROFILE_START(&pData->StatIP_input, a);
99
100 LogFlowFunc(("ENTER: m = %lx\n", (long)m));
101 ip = mtod(m, struct ip *);
102 Log2(("ip_dst=%RTnaipv4(len:%d) m_len = %d\n", ip->ip_dst, RT_N2H_U16(ip->ip_len), m->m_len));
103
104 ipstat.ips_total++;
105 {
106 int rc;
107 if (!(m->m_flags & M_SKIP_FIREWALL))
108 {
109 STAM_PROFILE_START(&pData->StatALIAS_input, b);
110 rc = LibAliasIn(pData->proxy_alias, mtod(m, char *), m_length(m, NULL));
111 STAM_PROFILE_STOP(&pData->StatALIAS_input, b);
112 Log2(("NAT: LibAlias return %d\n", rc));
113 }
114 else
115 m->m_flags &= ~M_SKIP_FIREWALL;
116 if (m->m_len != RT_N2H_U16(ip->ip_len))
117 m->m_len = RT_N2H_U16(ip->ip_len);
118 }
119
120 mlen = m->m_len;
121
122 if (mlen < sizeof(struct ip))
123 {
124 ipstat.ips_toosmall++;
125 goto bad_free_m;
126 }
127
128 ip = mtod(m, struct ip *);
129 if (ip->ip_v != IPVERSION)
130 {
131 ipstat.ips_badvers++;
132 goto bad_free_m;
133 }
134
135 hlen = ip->ip_hl << 2;
136 if ( hlen < sizeof(struct ip)
137 || hlen > m->m_len)
138 {
139 /* min header length */
140 ipstat.ips_badhlen++; /* or packet too short */
141 goto bad_free_m;
142 }
143
144 /* keep ip header intact for ICMP reply
145 * ip->ip_sum = cksum(m, hlen);
146 * if (ip->ip_sum) {
147 */
148 if (cksum(m, hlen))
149 {
150 ipstat.ips_badsum++;
151 goto bad_free_m;
152 }
153
154 /*
155 * Convert fields to host representation.
156 */
157 NTOHS(ip->ip_len);
158 if (ip->ip_len < hlen)
159 {
160 ipstat.ips_badlen++;
161 goto bad_free_m;
162 }
163
164 NTOHS(ip->ip_id);
165 NTOHS(ip->ip_off);
166
167 /*
168 * Check that the amount of data in the buffers
169 * is as at least much as the IP header would have us expect.
170 * Trim mbufs if longer than we expect.
171 * Drop packet if shorter than we expect.
172 */
173 if (mlen < ip->ip_len)
174 {
175 ipstat.ips_tooshort++;
176 goto bad_free_m;
177 }
178
179 /* Should drop packet if mbuf too long? hmmm... */
180 if (mlen > ip->ip_len)
181 m_adj(m, ip->ip_len - m->m_len);
182
183 /* source must be unicast */
184 if ((ip->ip_src.s_addr & RT_N2H_U32_C(0xe0000000)) == RT_N2H_U32_C(0xe0000000))
185 goto free_m;
186
187 /* check ip_ttl for a correct ICMP reply */
188 if (ip->ip_ttl==0 || ip->ip_ttl == 1)
189 {
190 /* XXX: if we're in destination so perhaps we need to send ICMP_TIMXCEED_REASS */
191 icmp_error(pData, m, ICMP_TIMXCEED, ICMP_TIMXCEED_INTRANS, 0, "ttl");
192 goto no_free_m;
193 }
194
195 ip->ip_ttl--;
196 if (ip->ip_sum > RT_H2N_U16_C(0xffffU - (1 << 8)))
197 ip->ip_sum += RT_H2N_U16_C(1 << 8) + 1;
198 else
199 ip->ip_sum += RT_H2N_U16_C(1 << 8);
200
201 /*
202 * Drop multicast (class d) and reserved (class e) here. The rest
203 * of the code is not yet prepared to deal with it. IGMP is not
204 * implemented either.
205 */
206 if ( (ip->ip_dst.s_addr & RT_N2H_U32_C(0xe0000000)) == RT_N2H_U32_C(0xe0000000)
207 && ip->ip_dst.s_addr != 0xffffffff)
208 {
209 goto free_m;
210 }
211
212 /*
213 * If offset or IP_MF are set, must reassemble.
214 * Otherwise, nothing need be done.
215 * (We could look in the reassembly queue to see
216 * if the packet was previously fragmented,
217 * but it's not worth the time; just let them time out.)
218 *
219 */
220 if (ip->ip_off & (IP_MF | IP_OFFMASK))
221 {
222 m = ip_reass(pData, m);
223 if (m == NULL)
224 goto no_free_m;
225 ip = mtod(m, struct ip *);
226 hlen = ip->ip_hl << 2;
227 }
228 else
229 ip->ip_len -= hlen;
230
231 /*
232 * Switch out to protocol's input routine.
233 */
234 ipstat.ips_delivered++;
235 switch (ip->ip_p)
236 {
237 case IPPROTO_TCP:
238 tcp_input(pData, m, hlen, (struct socket *)NULL);
239 break;
240 case IPPROTO_UDP:
241 udp_input(pData, m, hlen);
242 break;
243 case IPPROTO_ICMP:
244 icmp_input(pData, m, hlen);
245 break;
246 default:
247 ipstat.ips_noproto++;
248 m_freem(pData, m);
249 }
250 goto no_free_m;
251
252bad_free_m:
253 Log2(("NAT: IP datagram to %RTnaipv4 with size(%d) claimed as bad\n",
254 ip->ip_dst, ip->ip_len));
255free_m:
256 m_freem(pData, m);
257no_free_m:
258 STAM_PROFILE_STOP(&pData->StatIP_input, a);
259 LogFlowFuncLeave();
260 return;
261}
262
263struct mbuf *
264ip_reass(PNATState pData, struct mbuf* m)
265{
266 struct ip *ip;
267 struct mbuf *p, *q, *nq;
268 struct ipq_t *fp = NULL;
269 struct ipqhead *head;
270 int i, hlen, next;
271 u_short hash;
272
273 /* If maxnipq or maxfragsperpacket are 0, never accept fragments. */
274 LogFlowFunc(("ENTER: m:%p\n", m));
275 if ( maxnipq == 0
276 || maxfragsperpacket == 0)
277 {
278 ipstat.ips_fragments++;
279 ipstat.ips_fragdropped++;
280 m_freem(pData, m);
281 LogFlowFunc(("LEAVE: NULL\n"));
282 return (NULL);
283 }
284
285 ip = mtod(m, struct ip *);
286 hlen = ip->ip_hl << 2;
287
288 hash = IPREASS_HASH(ip->ip_src.s_addr, ip->ip_id);
289 head = &ipq[hash];
290
291 /*
292 * Look for queue of fragments
293 * of this datagram.
294 */
295 TAILQ_FOREACH(fp, head, ipq_list)
296 if (ip->ip_id == fp->ipq_id &&
297 ip->ip_src.s_addr == fp->ipq_src.s_addr &&
298 ip->ip_dst.s_addr == fp->ipq_dst.s_addr &&
299 ip->ip_p == fp->ipq_p)
300 goto found;
301
302 fp = NULL;
303
304 /*
305 * Attempt to trim the number of allocated fragment queues if it
306 * exceeds the administrative limit.
307 */
308 if ((nipq > maxnipq) && (maxnipq > 0))
309 {
310 /*
311 * drop something from the tail of the current queue
312 * before proceeding further
313 */
314 struct ipq_t *pHead = TAILQ_LAST(head, ipqhead);
315 if (pHead == NULL)
316 {
317 /* gak */
318 for (i = 0; i < IPREASS_NHASH; i++)
319 {
320 struct ipq_t *pTail = TAILQ_LAST(&ipq[i], ipqhead);
321 if (pTail)
322 {
323 ipstat.ips_fragtimeout += pTail->ipq_nfrags;
324 ip_freef(pData, &ipq[i], pTail);
325 break;
326 }
327 }
328 }
329 else
330 {
331 ipstat.ips_fragtimeout += pHead->ipq_nfrags;
332 ip_freef(pData, head, pHead);
333 }
334 }
335
336found:
337 /*
338 * Adjust ip_len to not reflect header,
339 * convert offset of this to bytes.
340 */
341 ip->ip_len -= hlen;
342 if (ip->ip_off & IP_MF)
343 {
344 /*
345 * Make sure that fragments have a data length
346 * that's a non-zero multiple of 8 bytes.
347 */
348 if (ip->ip_len == 0 || (ip->ip_len & 0x7) != 0)
349 {
350 ipstat.ips_toosmall++; /* XXX */
351 goto dropfrag;
352 }
353 m->m_flags |= M_FRAG;
354 }
355 else
356 m->m_flags &= ~M_FRAG;
357 ip->ip_off <<= 3;
358
359
360 /*
361 * Attempt reassembly; if it succeeds, proceed.
362 * ip_reass() will return a different mbuf.
363 */
364 ipstat.ips_fragments++;
365
366 /* Previous ip_reass() started here. */
367 /*
368 * Presence of header sizes in mbufs
369 * would confuse code below.
370 */
371 m->m_data += hlen;
372 m->m_len -= hlen;
373
374 /*
375 * If first fragment to arrive, create a reassembly queue.
376 */
377 if (fp == NULL)
378 {
379 fp = RTMemAlloc(sizeof(struct ipq_t));
380 if (fp == NULL)
381 goto dropfrag;
382 TAILQ_INSERT_HEAD(head, fp, ipq_list);
383 nipq++;
384 fp->ipq_nfrags = 1;
385 fp->ipq_ttl = IPFRAGTTL;
386 fp->ipq_p = ip->ip_p;
387 fp->ipq_id = ip->ip_id;
388 fp->ipq_src = ip->ip_src;
389 fp->ipq_dst = ip->ip_dst;
390 fp->ipq_frags = m;
391 m->m_nextpkt = NULL;
392 goto done;
393 }
394 else
395 {
396 fp->ipq_nfrags++;
397 }
398
399#define GETIP(m) ((struct ip*)((m)->m_pkthdr.header))
400
401 /*
402 * Find a segment which begins after this one does.
403 */
404 for (p = NULL, q = fp->ipq_frags; q; p = q, q = q->m_nextpkt)
405 if (GETIP(q)->ip_off > ip->ip_off)
406 break;
407
408 /*
409 * If there is a preceding segment, it may provide some of
410 * our data already. If so, drop the data from the incoming
411 * segment. If it provides all of our data, drop us, otherwise
412 * stick new segment in the proper place.
413 *
414 * If some of the data is dropped from the preceding
415 * segment, then it's checksum is invalidated.
416 */
417 if (p)
418 {
419 i = GETIP(p)->ip_off + GETIP(p)->ip_len - ip->ip_off;
420 if (i > 0)
421 {
422 if (i >= ip->ip_len)
423 goto dropfrag;
424 m_adj(m, i);
425 ip->ip_off += i;
426 ip->ip_len -= i;
427 }
428 m->m_nextpkt = p->m_nextpkt;
429 p->m_nextpkt = m;
430 }
431 else
432 {
433 m->m_nextpkt = fp->ipq_frags;
434 fp->ipq_frags = m;
435 }
436
437 /*
438 * While we overlap succeeding segments trim them or,
439 * if they are completely covered, dequeue them.
440 */
441 for (; q != NULL && ip->ip_off + ip->ip_len > GETIP(q)->ip_off;
442 q = nq)
443 {
444 i = (ip->ip_off + ip->ip_len) - GETIP(q)->ip_off;
445 if (i < GETIP(q)->ip_len)
446 {
447 GETIP(q)->ip_len -= i;
448 GETIP(q)->ip_off += i;
449 m_adj(q, i);
450 break;
451 }
452 nq = q->m_nextpkt;
453 m->m_nextpkt = nq;
454 ipstat.ips_fragdropped++;
455 fp->ipq_nfrags--;
456 m_freem(pData, q);
457 }
458
459 /*
460 * Check for complete reassembly and perform frag per packet
461 * limiting.
462 *
463 * Frag limiting is performed here so that the nth frag has
464 * a chance to complete the packet before we drop the packet.
465 * As a result, n+1 frags are actually allowed per packet, but
466 * only n will ever be stored. (n = maxfragsperpacket.)
467 *
468 */
469 next = 0;
470 for (p = NULL, q = fp->ipq_frags; q; p = q, q = q->m_nextpkt)
471 {
472 if (GETIP(q)->ip_off != next)
473 {
474 if (fp->ipq_nfrags > maxfragsperpacket)
475 {
476 ipstat.ips_fragdropped += fp->ipq_nfrags;
477 ip_freef(pData, head, fp);
478 }
479 goto done;
480 }
481 next += GETIP(q)->ip_len;
482 }
483 /* Make sure the last packet didn't have the IP_MF flag */
484 if (p->m_flags & M_FRAG)
485 {
486 if (fp->ipq_nfrags > maxfragsperpacket)
487 {
488 ipstat.ips_fragdropped += fp->ipq_nfrags;
489 ip_freef(pData, head, fp);
490 }
491 goto done;
492 }
493
494 /*
495 * Reassembly is complete. Make sure the packet is a sane size.
496 */
497 q = fp->ipq_frags;
498 ip = GETIP(q);
499 hlen = ip->ip_hl << 2;
500 if (next + hlen > IP_MAXPACKET)
501 {
502 ipstat.ips_fragdropped += fp->ipq_nfrags;
503 ip_freef(pData, head, fp);
504 goto done;
505 }
506
507 /*
508 * Concatenate fragments.
509 */
510 m = q;
511 nq = q->m_nextpkt;
512 q->m_nextpkt = NULL;
513 for (q = nq; q != NULL; q = nq)
514 {
515 nq = q->m_nextpkt;
516 q->m_nextpkt = NULL;
517 m_cat(pData, m, q);
518
519 m->m_len += hlen;
520 m->m_data -= hlen;
521 ip = mtod(m, struct ip *); /*update ip pointer */
522 hlen = ip->ip_hl << 2;
523 m->m_len -= hlen;
524 m->m_data += hlen;
525 }
526 m->m_len += hlen;
527 m->m_data -= hlen;
528
529 /*
530 * Create header for new ip packet by modifying header of first
531 * packet; dequeue and discard fragment reassembly header.
532 * Make header visible.
533 */
534
535 ip->ip_len = next;
536 ip->ip_src = fp->ipq_src;
537 ip->ip_dst = fp->ipq_dst;
538 TAILQ_REMOVE(head, fp, ipq_list);
539 nipq--;
540 RTMemFree(fp);
541
542 Assert((ip->ip_len == next));
543 /* some debugging cruft by sklower, below, will go away soon */
544#if 0
545 if (m->m_flags & M_PKTHDR) /* XXX this should be done elsewhere */
546 m_fixhdr(m);
547#endif
548 ipstat.ips_reassembled++;
549 LogFlowFunc(("LEAVE: %p\n", m));
550 return (m);
551
552dropfrag:
553 ipstat.ips_fragdropped++;
554 if (fp != NULL)
555 fp->ipq_nfrags--;
556 m_freem(pData, m);
557
558done:
559 LogFlowFunc(("LEAVE: NULL\n"));
560 return NULL;
561
562#undef GETIP
563}
564
565void
566ip_freef(PNATState pData, struct ipqhead *fhp, struct ipq_t *fp)
567{
568 struct mbuf *q;
569
570 while (fp->ipq_frags)
571 {
572 q = fp->ipq_frags;
573 fp->ipq_frags = q->m_nextpkt;
574 m_freem(pData, q);
575 }
576 TAILQ_REMOVE(fhp, fp, ipq_list);
577 RTMemFree(fp);
578 nipq--;
579}
580
581/*
582 * IP timer processing;
583 * if a timer expires on a reassembly
584 * queue, discard it.
585 */
586void
587ip_slowtimo(PNATState pData)
588{
589 register struct ipq_t *fp;
590
591 /* XXX: the fragment expiration is the same but requier
592 * additional loop see (see ip_input.c in FreeBSD tree)
593 */
594 int i;
595 LogFlow(("ip_slowtimo:\n"));
596 for (i = 0; i < IPREASS_NHASH; i++)
597 {
598 for(fp = TAILQ_FIRST(&ipq[i]); fp;)
599 {
600 struct ipq_t *fpp;
601
602 fpp = fp;
603 fp = TAILQ_NEXT(fp, ipq_list);
604 if(--fpp->ipq_ttl == 0)
605 {
606 ipstat.ips_fragtimeout += fpp->ipq_nfrags;
607 ip_freef(pData, &ipq[i], fpp);
608 }
609 }
610 }
611 /*
612 * If we are over the maximum number of fragments
613 * (due to the limit being lowered), drain off
614 * enough to get down to the new limit.
615 */
616 if (maxnipq >= 0 && nipq > maxnipq)
617 {
618 for (i = 0; i < IPREASS_NHASH; i++)
619 {
620 while (nipq > maxnipq && !TAILQ_EMPTY(&ipq[i]))
621 {
622 ipstat.ips_fragdropped += TAILQ_FIRST(&ipq[i])->ipq_nfrags;
623 ip_freef(pData, &ipq[i], TAILQ_FIRST(&ipq[i]));
624 }
625 }
626 }
627}
628
629
630/*
631 * Strip out IP options, at higher
632 * level protocol in the kernel.
633 * Second argument is buffer to which options
634 * will be moved, and return value is their length.
635 * (XXX) should be deleted; last arg currently ignored.
636 */
637void
638ip_stripoptions(struct mbuf *m, struct mbuf *mopt)
639{
640 register int i;
641 struct ip *ip = mtod(m, struct ip *);
642 register caddr_t opts;
643 int olen;
644 NOREF(mopt); /* @todo: do we really will need this options buffer? */
645
646 olen = (ip->ip_hl<<2) - sizeof(struct ip);
647 opts = (caddr_t)(ip + 1);
648 i = m->m_len - (sizeof(struct ip) + olen);
649 memcpy(opts, opts + olen, (unsigned)i);
650 m->m_len -= olen;
651
652 ip->ip_hl = sizeof(struct ip) >> 2;
653}
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette