VirtualBox

source: vbox/trunk/src/VBox/Devices/Network/slirp/ip_input.c@ 34244

Last change on this file since 34244 was 34103, checked in by vboxsync, 14 years ago

NAT: (debug) logging fixes

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 16.6 KB
Line 
1/* $Id: ip_input.c 34103 2010-11-16 11:18:55Z vboxsync $ */
2/** @file
3 * NAT - IP input.
4 */
5
6/*
7 * Copyright (C) 2006-2010 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18/*
19 * This code is based on:
20 *
21 * Copyright (c) 1982, 1986, 1988, 1993
22 * The Regents of the University of California. All rights reserved.
23 *
24 * Redistribution and use in source and binary forms, with or without
25 * modification, are permitted provided that the following conditions
26 * are met:
27 * 1. Redistributions of source code must retain the above copyright
28 * notice, this list of conditions and the following disclaimer.
29 * 2. Redistributions in binary form must reproduce the above copyright
30 * notice, this list of conditions and the following disclaimer in the
31 * documentation and/or other materials provided with the distribution.
32 * 3. All advertising materials mentioning features or use of this software
33 * must display the following acknowledgement:
34 * This product includes software developed by the University of
35 * California, Berkeley and its contributors.
36 * 4. Neither the name of the University nor the names of its contributors
37 * may be used to endorse or promote products derived from this software
38 * without specific prior written permission.
39 *
40 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
41 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
42 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
43 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
44 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
45 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
46 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
47 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
48 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
49 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
50 * SUCH DAMAGE.
51 *
52 * @(#)ip_input.c 8.2 (Berkeley) 1/4/94
53 * ip_input.c,v 1.11 1994/11/16 10:17:08 jkh Exp
54 */
55
56/*
57 * Changes and additions relating to SLiRP are
58 * Copyright (c) 1995 Danny Gasparovski.
59 *
60 * Please read the file COPYRIGHT for the
61 * terms and conditions of the copyright.
62 */
63
64#include <slirp.h>
65#include "ip_icmp.h"
66#include "alias.h"
67
68
69/*
70 * IP initialization: fill in IP protocol switch table.
71 * All protocols not implemented in kernel go to raw IP protocol handler.
72 */
73void
74ip_init(PNATState pData)
75{
76 int i = 0;
77 for (i = 0; i < IPREASS_NHASH; ++i)
78 TAILQ_INIT(&ipq[i]);
79 maxnipq = 100; /* ??? */
80 maxfragsperpacket = 16;
81 nipq = 0;
82 ip_currid = tt.tv_sec & 0xffff;
83 udp_init(pData);
84 tcp_init(pData);
85}
86
87static struct libalias *select_alias(PNATState pData, struct mbuf* m)
88{
89 struct libalias *la = pData->proxy_alias;
90 struct udphdr *udp = NULL;
91 struct ip *pip = NULL;
92
93 struct m_tag *t;
94 if ((t = m_tag_find(m, PACKET_TAG_ALIAS, NULL)) != 0)
95 return (struct libalias *)&t[1];
96
97 return la;
98}
99
100/*
101 * Ip input routine. Checksum and byte swap header. If fragmented
102 * try to reassemble. Process options. Pass to next level.
103 */
104void
105ip_input(PNATState pData, struct mbuf *m)
106{
107 register struct ip *ip;
108 int hlen = 0;
109 int mlen = 0;
110
111 STAM_PROFILE_START(&pData->StatIP_input, a);
112
113 LogFlow(("ip_input: m = %lx\n", (long)m));
114 ip = mtod(m, struct ip *);
115 Log2(("ip_dst=%R[IP4](len:%d) m_len = %d\n", &ip->ip_dst, RT_N2H_U16(ip->ip_len), m->m_len));
116
117 ipstat.ips_total++;
118 {
119 int rc;
120 STAM_PROFILE_START(&pData->StatALIAS_input, b);
121 rc = LibAliasIn(select_alias(pData, m), mtod(m, char *), m_length(m, NULL));
122 STAM_PROFILE_STOP(&pData->StatALIAS_input, b);
123 Log2(("NAT: LibAlias return %d\n", rc));
124 if (m->m_len != RT_N2H_U16(ip->ip_len))
125 m->m_len = RT_N2H_U16(ip->ip_len);
126 }
127
128 mlen = m->m_len;
129
130 if (mlen < sizeof(struct ip))
131 {
132 ipstat.ips_toosmall++;
133 STAM_PROFILE_STOP(&pData->StatIP_input, a);
134 return;
135 }
136
137 ip = mtod(m, struct ip *);
138 if (ip->ip_v != IPVERSION)
139 {
140 ipstat.ips_badvers++;
141 goto bad_free_m;
142 }
143
144 hlen = ip->ip_hl << 2;
145 if ( hlen < sizeof(struct ip)
146 || hlen > m->m_len)
147 {
148 /* min header length */
149 ipstat.ips_badhlen++; /* or packet too short */
150 goto bad_free_m;
151 }
152
153 /* keep ip header intact for ICMP reply
154 * ip->ip_sum = cksum(m, hlen);
155 * if (ip->ip_sum) {
156 */
157 if (cksum(m, hlen))
158 {
159 ipstat.ips_badsum++;
160 goto bad_free_m;
161 }
162
163 /*
164 * Convert fields to host representation.
165 */
166 NTOHS(ip->ip_len);
167 if (ip->ip_len < hlen)
168 {
169 ipstat.ips_badlen++;
170 goto bad_free_m;
171 }
172
173 NTOHS(ip->ip_id);
174 NTOHS(ip->ip_off);
175
176 /*
177 * Check that the amount of data in the buffers
178 * is as at least much as the IP header would have us expect.
179 * Trim mbufs if longer than we expect.
180 * Drop packet if shorter than we expect.
181 */
182 if (mlen < ip->ip_len)
183 {
184 ipstat.ips_tooshort++;
185 goto bad_free_m;
186 }
187
188 /* Should drop packet if mbuf too long? hmmm... */
189 if (mlen > ip->ip_len)
190 m_adj(m, ip->ip_len - m->m_len);
191
192 /* check ip_ttl for a correct ICMP reply */
193 if (ip->ip_ttl==0 || ip->ip_ttl == 1)
194 {
195 icmp_error(pData, m, ICMP_TIMXCEED, ICMP_TIMXCEED_INTRANS, 0, "ttl");
196 goto bad_free_m;
197 }
198
199 ip->ip_ttl--;
200 /*
201 * If offset or IP_MF are set, must reassemble.
202 * Otherwise, nothing need be done.
203 * (We could look in the reassembly queue to see
204 * if the packet was previously fragmented,
205 * but it's not worth the time; just let them time out.)
206 *
207 */
208 if (ip->ip_off & (IP_MF | IP_OFFMASK))
209 {
210 m = ip_reass(pData, m);
211 if (m == NULL)
212 {
213 STAM_PROFILE_STOP(&pData->StatIP_input, a);
214 return;
215 }
216 ip = mtod(m, struct ip *);
217 hlen = ip->ip_hl << 2;
218 }
219 else
220 ip->ip_len -= hlen;
221
222 /*
223 * Switch out to protocol's input routine.
224 */
225 ipstat.ips_delivered++;
226 switch (ip->ip_p)
227 {
228 case IPPROTO_TCP:
229 tcp_input(pData, m, hlen, (struct socket *)NULL);
230 break;
231 case IPPROTO_UDP:
232 udp_input(pData, m, hlen);
233 break;
234 case IPPROTO_ICMP:
235 icmp_input(pData, m, hlen);
236 break;
237 default:
238 ipstat.ips_noproto++;
239 m_freem(pData, m);
240 }
241 STAM_PROFILE_STOP(&pData->StatIP_input, a);
242 return;
243
244bad_free_m:
245 Log2(("NAT: IP datagram to %R[IP4] with size(%d) claimed as bad\n",
246 &ip->ip_dst, ip->ip_len));
247 m_freem(pData, m);
248 STAM_PROFILE_STOP(&pData->StatIP_input, a);
249 return;
250}
251
252struct mbuf *
253ip_reass(PNATState pData, struct mbuf* m)
254{
255 struct ip *ip;
256 struct mbuf *p, *q, *nq;
257 struct ipq_t *fp = NULL;
258 struct ipqhead *head;
259 int i, hlen, next;
260 u_short hash;
261
262 /* If maxnipq or maxfragsperpacket are 0, never accept fragments. */
263 if ( maxnipq == 0
264 || maxfragsperpacket == 0)
265 {
266 ipstat.ips_fragments++;
267 ipstat.ips_fragdropped++;
268 m_freem(pData, m);
269 return (NULL);
270 }
271
272 ip = mtod(m, struct ip *);
273 hlen = ip->ip_hl << 2;
274
275 hash = IPREASS_HASH(ip->ip_src.s_addr, ip->ip_id);
276 head = &ipq[hash];
277
278 /*
279 * Look for queue of fragments
280 * of this datagram.
281 */
282 TAILQ_FOREACH(fp, head, ipq_list)
283 if (ip->ip_id == fp->ipq_id &&
284 ip->ip_src.s_addr == fp->ipq_src.s_addr &&
285 ip->ip_dst.s_addr == fp->ipq_dst.s_addr &&
286 ip->ip_p == fp->ipq_p)
287 goto found;
288
289 fp = NULL;
290
291 /*
292 * Attempt to trim the number of allocated fragment queues if it
293 * exceeds the administrative limit.
294 */
295 if ((nipq > maxnipq) && (maxnipq > 0))
296 {
297 /*
298 * drop something from the tail of the current queue
299 * before proceeding further
300 */
301 struct ipq_t *pHead = TAILQ_LAST(head, ipqhead);
302 if (pHead == NULL)
303 {
304 /* gak */
305 for (i = 0; i < IPREASS_NHASH; i++)
306 {
307 struct ipq_t *pTail = TAILQ_LAST(&ipq[i], ipqhead);
308 if (pTail)
309 {
310 ipstat.ips_fragtimeout += pTail->ipq_nfrags;
311 ip_freef(pData, &ipq[i], pTail);
312 break;
313 }
314 }
315 }
316 else
317 {
318 ipstat.ips_fragtimeout += pHead->ipq_nfrags;
319 ip_freef(pData, head, pHead);
320 }
321 }
322
323found:
324 /*
325 * Adjust ip_len to not reflect header,
326 * convert offset of this to bytes.
327 */
328 ip->ip_len -= hlen;
329 if (ip->ip_off & IP_MF)
330 {
331 /*
332 * Make sure that fragments have a data length
333 * that's a non-zero multiple of 8 bytes.
334 */
335 if (ip->ip_len == 0 || (ip->ip_len & 0x7) != 0)
336 {
337 ipstat.ips_toosmall++; /* XXX */
338 goto dropfrag;
339 }
340 m->m_flags |= M_FRAG;
341 }
342 else
343 m->m_flags &= ~M_FRAG;
344 ip->ip_off <<= 3;
345
346
347 /*
348 * Attempt reassembly; if it succeeds, proceed.
349 * ip_reass() will return a different mbuf.
350 */
351 ipstat.ips_fragments++;
352
353 /* Previous ip_reass() started here. */
354 /*
355 * Presence of header sizes in mbufs
356 * would confuse code below.
357 */
358 m->m_data += hlen;
359 m->m_len -= hlen;
360
361 /*
362 * If first fragment to arrive, create a reassembly queue.
363 */
364 if (fp == NULL)
365 {
366 fp = RTMemAlloc(sizeof(struct ipq_t));
367 if (fp == NULL)
368 goto dropfrag;
369 TAILQ_INSERT_HEAD(head, fp, ipq_list);
370 nipq++;
371 fp->ipq_nfrags = 1;
372 fp->ipq_ttl = IPFRAGTTL;
373 fp->ipq_p = ip->ip_p;
374 fp->ipq_id = ip->ip_id;
375 fp->ipq_src = ip->ip_src;
376 fp->ipq_dst = ip->ip_dst;
377 fp->ipq_frags = m;
378 m->m_nextpkt = NULL;
379 goto done;
380 }
381 else
382 {
383 fp->ipq_nfrags++;
384 }
385
386#define GETIP(m) ((struct ip*)((m)->m_pkthdr.header))
387
388 /*
389 * Find a segment which begins after this one does.
390 */
391 for (p = NULL, q = fp->ipq_frags; q; p = q, q = q->m_nextpkt)
392 if (GETIP(q)->ip_off > ip->ip_off)
393 break;
394
395 /*
396 * If there is a preceding segment, it may provide some of
397 * our data already. If so, drop the data from the incoming
398 * segment. If it provides all of our data, drop us, otherwise
399 * stick new segment in the proper place.
400 *
401 * If some of the data is dropped from the the preceding
402 * segment, then it's checksum is invalidated.
403 */
404 if (p)
405 {
406 i = GETIP(p)->ip_off + GETIP(p)->ip_len - ip->ip_off;
407 if (i > 0)
408 {
409 if (i >= ip->ip_len)
410 goto dropfrag;
411 m_adj(m, i);
412 ip->ip_off += i;
413 ip->ip_len -= i;
414 }
415 m->m_nextpkt = p->m_nextpkt;
416 p->m_nextpkt = m;
417 }
418 else
419 {
420 m->m_nextpkt = fp->ipq_frags;
421 fp->ipq_frags = m;
422 }
423
424 /*
425 * While we overlap succeeding segments trim them or,
426 * if they are completely covered, dequeue them.
427 */
428 for (; q != NULL && ip->ip_off + ip->ip_len > GETIP(q)->ip_off;
429 q = nq)
430 {
431 i = (ip->ip_off + ip->ip_len) - GETIP(q)->ip_off;
432 if (i < GETIP(q)->ip_len)
433 {
434 GETIP(q)->ip_len -= i;
435 GETIP(q)->ip_off += i;
436 m_adj(q, i);
437 break;
438 }
439 nq = q->m_nextpkt;
440 m->m_nextpkt = nq;
441 ipstat.ips_fragdropped++;
442 fp->ipq_nfrags--;
443 m_freem(pData, q);
444 }
445
446 /*
447 * Check for complete reassembly and perform frag per packet
448 * limiting.
449 *
450 * Frag limiting is performed here so that the nth frag has
451 * a chance to complete the packet before we drop the packet.
452 * As a result, n+1 frags are actually allowed per packet, but
453 * only n will ever be stored. (n = maxfragsperpacket.)
454 *
455 */
456 next = 0;
457 for (p = NULL, q = fp->ipq_frags; q; p = q, q = q->m_nextpkt)
458 {
459 if (GETIP(q)->ip_off != next)
460 {
461 if (fp->ipq_nfrags > maxfragsperpacket)
462 {
463 ipstat.ips_fragdropped += fp->ipq_nfrags;
464 ip_freef(pData, head, fp);
465 }
466 goto done;
467 }
468 next += GETIP(q)->ip_len;
469 }
470 /* Make sure the last packet didn't have the IP_MF flag */
471 if (p->m_flags & M_FRAG)
472 {
473 if (fp->ipq_nfrags > maxfragsperpacket)
474 {
475 ipstat.ips_fragdropped += fp->ipq_nfrags;
476 ip_freef(pData, head, fp);
477 }
478 goto done;
479 }
480
481 /*
482 * Reassembly is complete. Make sure the packet is a sane size.
483 */
484 q = fp->ipq_frags;
485 ip = GETIP(q);
486 hlen = ip->ip_hl << 2;
487 if (next + hlen > IP_MAXPACKET)
488 {
489 ipstat.ips_fragdropped += fp->ipq_nfrags;
490 ip_freef(pData, head, fp);
491 goto done;
492 }
493
494 /*
495 * Concatenate fragments.
496 */
497 m = q;
498 nq = q->m_nextpkt;
499 q->m_nextpkt = NULL;
500 for (q = nq; q != NULL; q = nq)
501 {
502 nq = q->m_nextpkt;
503 q->m_nextpkt = NULL;
504 m_cat(pData, m, q);
505
506 m->m_len += hlen;
507 m->m_data -= hlen;
508 ip = mtod(m, struct ip *); /*update ip pointer */
509 hlen = ip->ip_hl << 2;
510 m->m_len -= hlen;
511 m->m_data += hlen;
512 }
513 m->m_len += hlen;
514 m->m_data -= hlen;
515
516 /*
517 * Create header for new ip packet by modifying header of first
518 * packet; dequeue and discard fragment reassembly header.
519 * Make header visible.
520 */
521
522 ip->ip_len = next;
523 ip->ip_src = fp->ipq_src;
524 ip->ip_dst = fp->ipq_dst;
525 TAILQ_REMOVE(head, fp, ipq_list);
526 nipq--;
527 RTMemFree(fp);
528
529 Assert((ip->ip_len == next));
530 /* some debugging cruft by sklower, below, will go away soon */
531#if 0
532 if (m->m_flags & M_PKTHDR) /* XXX this should be done elsewhere */
533 m_fixhdr(m);
534#endif
535 ipstat.ips_reassembled++;
536 return (m);
537
538dropfrag:
539 ipstat.ips_fragdropped++;
540 if (fp != NULL)
541 fp->ipq_nfrags--;
542 m_freem(pData, m);
543
544done:
545 return NULL;
546
547#undef GETIP
548}
549
550void
551ip_freef(PNATState pData, struct ipqhead *fhp, struct ipq_t *fp)
552{
553 struct mbuf *q;
554
555 while (fp->ipq_frags)
556 {
557 q = fp->ipq_frags;
558 fp->ipq_frags = q->m_nextpkt;
559 m_freem(pData, q);
560 }
561 TAILQ_REMOVE(fhp, fp, ipq_list);
562 RTMemFree(fp);
563 nipq--;
564}
565
566/*
567 * IP timer processing;
568 * if a timer expires on a reassembly
569 * queue, discard it.
570 */
571void
572ip_slowtimo(PNATState pData)
573{
574 register struct ipq_t *fp;
575
576 /* XXX: the fragment expiration is the same but requier
577 * additional loop see (see ip_input.c in FreeBSD tree)
578 */
579 int i;
580 LogFlow(("ip_slowtimo:\n"));
581 for (i = 0; i < IPREASS_NHASH; i++)
582 {
583 for(fp = TAILQ_FIRST(&ipq[i]); fp;)
584 {
585 struct ipq_t *fpp;
586
587 fpp = fp;
588 fp = TAILQ_NEXT(fp, ipq_list);
589 if(--fpp->ipq_ttl == 0)
590 {
591 ipstat.ips_fragtimeout += fpp->ipq_nfrags;
592 ip_freef(pData, &ipq[i], fpp);
593 }
594 }
595 }
596 /*
597 * If we are over the maximum number of fragments
598 * (due to the limit being lowered), drain off
599 * enough to get down to the new limit.
600 */
601 if (maxnipq >= 0 && nipq > maxnipq)
602 {
603 for (i = 0; i < IPREASS_NHASH; i++)
604 {
605 while (nipq > maxnipq && !TAILQ_EMPTY(&ipq[i]))
606 {
607 ipstat.ips_fragdropped += TAILQ_FIRST(&ipq[i])->ipq_nfrags;
608 ip_freef(pData, &ipq[i], TAILQ_FIRST(&ipq[i]));
609 }
610 }
611 }
612}
613
614
615/*
616 * Strip out IP options, at higher
617 * level protocol in the kernel.
618 * Second argument is buffer to which options
619 * will be moved, and return value is their length.
620 * (XXX) should be deleted; last arg currently ignored.
621 */
622void
623ip_stripoptions(struct mbuf *m, struct mbuf *mopt)
624{
625 register int i;
626 struct ip *ip = mtod(m, struct ip *);
627 register caddr_t opts;
628 int olen;
629
630 olen = (ip->ip_hl<<2) - sizeof(struct ip);
631 opts = (caddr_t)(ip + 1);
632 i = m->m_len - (sizeof(struct ip) + olen);
633 memcpy(opts, opts + olen, (unsigned)i);
634 m->m_len -= olen;
635
636 ip->ip_hl = sizeof(struct ip) >> 2;
637}
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette