VirtualBox

source: vbox/trunk/src/VBox/Devices/Network/slirp/ip_input.c@ 30350

Last change on this file since 30350 was 30350, checked in by vboxsync, 14 years ago

NAT: Don't m_free mbuf just after send,
it's responsible of recv threads to release mbuf.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 16.7 KB
Line 
1/* $Id: ip_input.c 30350 2010-06-22 02:39:23Z vboxsync $ */
2/** @file
3 * NAT - IP input.
4 */
5
6/*
7 * Copyright (C) 2006-2010 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18/*
19 * This code is based on:
20 *
21 * Copyright (c) 1982, 1986, 1988, 1993
22 * The Regents of the University of California. All rights reserved.
23 *
24 * Redistribution and use in source and binary forms, with or without
25 * modification, are permitted provided that the following conditions
26 * are met:
27 * 1. Redistributions of source code must retain the above copyright
28 * notice, this list of conditions and the following disclaimer.
29 * 2. Redistributions in binary form must reproduce the above copyright
30 * notice, this list of conditions and the following disclaimer in the
31 * documentation and/or other materials provided with the distribution.
32 * 3. All advertising materials mentioning features or use of this software
33 * must display the following acknowledgement:
34 * This product includes software developed by the University of
35 * California, Berkeley and its contributors.
36 * 4. Neither the name of the University nor the names of its contributors
37 * may be used to endorse or promote products derived from this software
38 * without specific prior written permission.
39 *
40 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
41 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
42 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
43 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
44 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
45 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
46 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
47 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
48 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
49 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
50 * SUCH DAMAGE.
51 *
52 * @(#)ip_input.c 8.2 (Berkeley) 1/4/94
53 * ip_input.c,v 1.11 1994/11/16 10:17:08 jkh Exp
54 */
55
56/*
57 * Changes and additions relating to SLiRP are
58 * Copyright (c) 1995 Danny Gasparovski.
59 *
60 * Please read the file COPYRIGHT for the
61 * terms and conditions of the copyright.
62 */
63
64#include <slirp.h>
65#include "ip_icmp.h"
66#include "alias.h"
67
68
69/*
70 * IP initialization: fill in IP protocol switch table.
71 * All protocols not implemented in kernel go to raw IP protocol handler.
72 */
73void
74ip_init(PNATState pData)
75{
76 int i = 0;
77 for (i = 0; i < IPREASS_NHASH; ++i)
78 TAILQ_INIT(&ipq[i]);
79 maxnipq = 100; /* ??? */
80 maxfragsperpacket = 16;
81 nipq = 0;
82 ip_currid = tt.tv_sec & 0xffff;
83 udp_init(pData);
84 tcp_init(pData);
85}
86
87static struct libalias *select_alias(PNATState pData, struct mbuf* m)
88{
89 struct libalias *la = pData->proxy_alias;
90 struct udphdr *udp = NULL;
91 struct ip *pip = NULL;
92
93 struct m_tag *t;
94 if ((t = m_tag_find(m, PACKET_TAG_ALIAS, NULL)) != 0)
95 return (struct libalias *)&t[1];
96
97 return la;
98}
99
100/*
101 * Ip input routine. Checksum and byte swap header. If fragmented
102 * try to reassemble. Process options. Pass to next level.
103 */
104void
105ip_input(PNATState pData, struct mbuf *m)
106{
107 register struct ip *ip;
108 int hlen = 0;
109 int mlen = 0;
110
111 STAM_PROFILE_START(&pData->StatIP_input, a);
112
113 DEBUG_CALL("ip_input");
114 DEBUG_ARG("m = %lx", (long)m);
115 ip = mtod(m, struct ip *);
116 Log2(("ip_dst=%R[IP4](len:%d) m_len = %d", &ip->ip_dst, RT_N2H_U16(ip->ip_len), m->m_len));
117 Log2(("ip_dst=%R[IP4](len:%d) m_len = %d\n", &ip->ip_dst, RT_N2H_U16(ip->ip_len), m->m_len));
118
119 ipstat.ips_total++;
120 {
121 int rc;
122 STAM_PROFILE_START(&pData->StatALIAS_input, b);
123 rc = LibAliasIn(select_alias(pData, m), mtod(m, char *), m_length(m, NULL));
124 STAM_PROFILE_STOP(&pData->StatALIAS_input, b);
125 Log2(("NAT: LibAlias return %d\n", rc));
126 if (m->m_len != RT_N2H_U16(ip->ip_len))
127 m->m_len = RT_N2H_U16(ip->ip_len);
128 }
129
130 mlen = m->m_len;
131
132 if (mlen < sizeof(struct ip))
133 {
134 ipstat.ips_toosmall++;
135 STAM_PROFILE_STOP(&pData->StatIP_input, a);
136 return;
137 }
138
139 ip = mtod(m, struct ip *);
140 if (ip->ip_v != IPVERSION)
141 {
142 ipstat.ips_badvers++;
143 goto bad;
144 }
145
146 hlen = ip->ip_hl << 2;
147 if ( hlen < sizeof(struct ip)
148 || hlen > m->m_len)
149 {
150 /* min header length */
151 ipstat.ips_badhlen++; /* or packet too short */
152 goto bad;
153 }
154
155 /* keep ip header intact for ICMP reply
156 * ip->ip_sum = cksum(m, hlen);
157 * if (ip->ip_sum) {
158 */
159 if (cksum(m, hlen))
160 {
161 ipstat.ips_badsum++;
162 goto bad;
163 }
164
165 /*
166 * Convert fields to host representation.
167 */
168 NTOHS(ip->ip_len);
169 if (ip->ip_len < hlen)
170 {
171 ipstat.ips_badlen++;
172 goto bad;
173 }
174
175 NTOHS(ip->ip_id);
176 NTOHS(ip->ip_off);
177
178 /*
179 * Check that the amount of data in the buffers
180 * is as at least much as the IP header would have us expect.
181 * Trim mbufs if longer than we expect.
182 * Drop packet if shorter than we expect.
183 */
184 if (mlen < ip->ip_len)
185 {
186 ipstat.ips_tooshort++;
187 goto bad;
188 }
189
190 /* Should drop packet if mbuf too long? hmmm... */
191 if (mlen > ip->ip_len)
192 m_adj(m, ip->ip_len - m->m_len);
193
194 /* check ip_ttl for a correct ICMP reply */
195 if (ip->ip_ttl==0 || ip->ip_ttl == 1)
196 {
197 icmp_error(pData, m, ICMP_TIMXCEED, ICMP_TIMXCEED_INTRANS, 0, "ttl");
198 /* don't let this mbuf to be freed */
199 return;
200 }
201
202 ip->ip_ttl--;
203 /*
204 * If offset or IP_MF are set, must reassemble.
205 * Otherwise, nothing need be done.
206 * (We could look in the reassembly queue to see
207 * if the packet was previously fragmented,
208 * but it's not worth the time; just let them time out.)
209 *
210 */
211 if (ip->ip_off & (IP_MF | IP_OFFMASK))
212 {
213 m = ip_reass(pData, m);
214 if (m == NULL)
215 {
216 STAM_PROFILE_STOP(&pData->StatIP_input, a);
217 return;
218 }
219 ip = mtod(m, struct ip *);
220 hlen = ip->ip_hl << 2;
221 }
222 else
223 ip->ip_len -= hlen;
224
225 /*
226 * Switch out to protocol's input routine.
227 */
228 ipstat.ips_delivered++;
229 switch (ip->ip_p)
230 {
231 case IPPROTO_TCP:
232 tcp_input(pData, m, hlen, (struct socket *)NULL);
233 break;
234 case IPPROTO_UDP:
235 udp_input(pData, m, hlen);
236 break;
237 case IPPROTO_ICMP:
238 icmp_input(pData, m, hlen);
239 break;
240 default:
241 ipstat.ips_noproto++;
242 m_freem(pData, m);
243 }
244 STAM_PROFILE_STOP(&pData->StatIP_input, a);
245 return;
246
247bad:
248 Log2(("NAT: IP datagram to %R[IP4] with size(%d) claimed as bad\n",
249 &ip->ip_dst, ip->ip_len));
250 m_freem(pData, m);
251 STAM_PROFILE_STOP(&pData->StatIP_input, a);
252 return;
253}
254
255struct mbuf *
256ip_reass(PNATState pData, struct mbuf* m)
257{
258 struct ip *ip;
259 struct mbuf *p, *q, *nq;
260 struct ipq_t *fp = NULL;
261 struct ipqhead *head;
262 int i, hlen, next;
263 u_short hash;
264
265 /* If maxnipq or maxfragsperpacket are 0, never accept fragments. */
266 if ( maxnipq == 0
267 || maxfragsperpacket == 0)
268 {
269 ipstat.ips_fragments++;
270 ipstat.ips_fragdropped++;
271 m_freem(pData, m);
272 return (NULL);
273 }
274
275 ip = mtod(m, struct ip *);
276 hlen = ip->ip_hl << 2;
277
278 hash = IPREASS_HASH(ip->ip_src.s_addr, ip->ip_id);
279 head = &ipq[hash];
280
281 /*
282 * Look for queue of fragments
283 * of this datagram.
284 */
285 TAILQ_FOREACH(fp, head, ipq_list)
286 if (ip->ip_id == fp->ipq_id &&
287 ip->ip_src.s_addr == fp->ipq_src.s_addr &&
288 ip->ip_dst.s_addr == fp->ipq_dst.s_addr &&
289 ip->ip_p == fp->ipq_p)
290 goto found;
291
292 fp = NULL;
293
294 /*
295 * Attempt to trim the number of allocated fragment queues if it
296 * exceeds the administrative limit.
297 */
298 if ((nipq > maxnipq) && (maxnipq > 0))
299 {
300 /*
301 * drop something from the tail of the current queue
302 * before proceeding further
303 */
304 struct ipq_t *pHead = TAILQ_LAST(head, ipqhead);
305 if (pHead == NULL)
306 {
307 /* gak */
308 for (i = 0; i < IPREASS_NHASH; i++)
309 {
310 struct ipq_t *pTail = TAILQ_LAST(&ipq[i], ipqhead);
311 if (pTail)
312 {
313 ipstat.ips_fragtimeout += pTail->ipq_nfrags;
314 ip_freef(pData, &ipq[i], pTail);
315 break;
316 }
317 }
318 }
319 else
320 {
321 ipstat.ips_fragtimeout += pHead->ipq_nfrags;
322 ip_freef(pData, head, pHead);
323 }
324 }
325
326found:
327 /*
328 * Adjust ip_len to not reflect header,
329 * convert offset of this to bytes.
330 */
331 ip->ip_len -= hlen;
332 if (ip->ip_off & IP_MF)
333 {
334 /*
335 * Make sure that fragments have a data length
336 * that's a non-zero multiple of 8 bytes.
337 */
338 if (ip->ip_len == 0 || (ip->ip_len & 0x7) != 0)
339 {
340 ipstat.ips_toosmall++; /* XXX */
341 goto dropfrag;
342 }
343 m->m_flags |= M_FRAG;
344 }
345 else
346 m->m_flags &= ~M_FRAG;
347 ip->ip_off <<= 3;
348
349
350 /*
351 * Attempt reassembly; if it succeeds, proceed.
352 * ip_reass() will return a different mbuf.
353 */
354 ipstat.ips_fragments++;
355
356 /* Previous ip_reass() started here. */
357 /*
358 * Presence of header sizes in mbufs
359 * would confuse code below.
360 */
361 m->m_data += hlen;
362 m->m_len -= hlen;
363
364 /*
365 * If first fragment to arrive, create a reassembly queue.
366 */
367 if (fp == NULL)
368 {
369 fp = RTMemAlloc(sizeof(struct ipq_t));
370 if (fp == NULL)
371 goto dropfrag;
372 TAILQ_INSERT_HEAD(head, fp, ipq_list);
373 nipq++;
374 fp->ipq_nfrags = 1;
375 fp->ipq_ttl = IPFRAGTTL;
376 fp->ipq_p = ip->ip_p;
377 fp->ipq_id = ip->ip_id;
378 fp->ipq_src = ip->ip_src;
379 fp->ipq_dst = ip->ip_dst;
380 fp->ipq_frags = m;
381 m->m_nextpkt = NULL;
382 goto done;
383 }
384 else
385 {
386 fp->ipq_nfrags++;
387 }
388
389#define GETIP(m) ((struct ip*)((m)->m_pkthdr.header))
390
391 /*
392 * Find a segment which begins after this one does.
393 */
394 for (p = NULL, q = fp->ipq_frags; q; p = q, q = q->m_nextpkt)
395 if (GETIP(q)->ip_off > ip->ip_off)
396 break;
397
398 /*
399 * If there is a preceding segment, it may provide some of
400 * our data already. If so, drop the data from the incoming
401 * segment. If it provides all of our data, drop us, otherwise
402 * stick new segment in the proper place.
403 *
404 * If some of the data is dropped from the the preceding
405 * segment, then it's checksum is invalidated.
406 */
407 if (p)
408 {
409 i = GETIP(p)->ip_off + GETIP(p)->ip_len - ip->ip_off;
410 if (i > 0)
411 {
412 if (i >= ip->ip_len)
413 goto dropfrag;
414 m_adj(m, i);
415 ip->ip_off += i;
416 ip->ip_len -= i;
417 }
418 m->m_nextpkt = p->m_nextpkt;
419 p->m_nextpkt = m;
420 }
421 else
422 {
423 m->m_nextpkt = fp->ipq_frags;
424 fp->ipq_frags = m;
425 }
426
427 /*
428 * While we overlap succeeding segments trim them or,
429 * if they are completely covered, dequeue them.
430 */
431 for (; q != NULL && ip->ip_off + ip->ip_len > GETIP(q)->ip_off;
432 q = nq)
433 {
434 i = (ip->ip_off + ip->ip_len) - GETIP(q)->ip_off;
435 if (i < GETIP(q)->ip_len)
436 {
437 GETIP(q)->ip_len -= i;
438 GETIP(q)->ip_off += i;
439 m_adj(q, i);
440 break;
441 }
442 nq = q->m_nextpkt;
443 m->m_nextpkt = nq;
444 ipstat.ips_fragdropped++;
445 fp->ipq_nfrags--;
446 m_freem(pData, q);
447 }
448
449 /*
450 * Check for complete reassembly and perform frag per packet
451 * limiting.
452 *
453 * Frag limiting is performed here so that the nth frag has
454 * a chance to complete the packet before we drop the packet.
455 * As a result, n+1 frags are actually allowed per packet, but
456 * only n will ever be stored. (n = maxfragsperpacket.)
457 *
458 */
459 next = 0;
460 for (p = NULL, q = fp->ipq_frags; q; p = q, q = q->m_nextpkt)
461 {
462 if (GETIP(q)->ip_off != next)
463 {
464 if (fp->ipq_nfrags > maxfragsperpacket)
465 {
466 ipstat.ips_fragdropped += fp->ipq_nfrags;
467 ip_freef(pData, head, fp);
468 }
469 goto done;
470 }
471 next += GETIP(q)->ip_len;
472 }
473 /* Make sure the last packet didn't have the IP_MF flag */
474 if (p->m_flags & M_FRAG)
475 {
476 if (fp->ipq_nfrags > maxfragsperpacket)
477 {
478 ipstat.ips_fragdropped += fp->ipq_nfrags;
479 ip_freef(pData, head, fp);
480 }
481 goto done;
482 }
483
484 /*
485 * Reassembly is complete. Make sure the packet is a sane size.
486 */
487 q = fp->ipq_frags;
488 ip = GETIP(q);
489 hlen = ip->ip_hl << 2;
490 if (next + hlen > IP_MAXPACKET)
491 {
492 ipstat.ips_fragdropped += fp->ipq_nfrags;
493 ip_freef(pData, head, fp);
494 goto done;
495 }
496
497 /*
498 * Concatenate fragments.
499 */
500 m = q;
501 nq = q->m_nextpkt;
502 q->m_nextpkt = NULL;
503 for (q = nq; q != NULL; q = nq)
504 {
505 nq = q->m_nextpkt;
506 q->m_nextpkt = NULL;
507 m_cat(pData, m, q);
508
509 m->m_len += hlen;
510 m->m_data -= hlen;
511 ip = mtod(m, struct ip *); /*update ip pointer */
512 hlen = ip->ip_hl << 2;
513 m->m_len -= hlen;
514 m->m_data += hlen;
515 }
516 m->m_len += hlen;
517 m->m_data -= hlen;
518
519 /*
520 * Create header for new ip packet by modifying header of first
521 * packet; dequeue and discard fragment reassembly header.
522 * Make header visible.
523 */
524
525 ip->ip_len = next;
526 ip->ip_src = fp->ipq_src;
527 ip->ip_dst = fp->ipq_dst;
528 TAILQ_REMOVE(head, fp, ipq_list);
529 nipq--;
530 RTMemFree(fp);
531
532 Assert((ip->ip_len == next));
533 /* some debugging cruft by sklower, below, will go away soon */
534#if 0
535 if (m->m_flags & M_PKTHDR) /* XXX this should be done elsewhere */
536 m_fixhdr(m);
537#endif
538 ipstat.ips_reassembled++;
539 return (m);
540
541dropfrag:
542 ipstat.ips_fragdropped++;
543 if (fp != NULL)
544 fp->ipq_nfrags--;
545 m_freem(pData, m);
546
547done:
548 return NULL;
549
550#undef GETIP
551}
552
553void
554ip_freef(PNATState pData, struct ipqhead *fhp, struct ipq_t *fp)
555{
556 struct mbuf *q;
557
558 while (fp->ipq_frags)
559 {
560 q = fp->ipq_frags;
561 fp->ipq_frags = q->m_nextpkt;
562 m_freem(pData, q);
563 }
564 TAILQ_REMOVE(fhp, fp, ipq_list);
565 RTMemFree(fp);
566 nipq--;
567}
568
569/*
570 * IP timer processing;
571 * if a timer expires on a reassembly
572 * queue, discard it.
573 */
574void
575ip_slowtimo(PNATState pData)
576{
577 register struct ipq_t *fp;
578
579 /* XXX: the fragment expiration is the same but requier
580 * additional loop see (see ip_input.c in FreeBSD tree)
581 */
582 int i;
583 DEBUG_CALL("ip_slowtimo");
584 for (i = 0; i < IPREASS_NHASH; i++)
585 {
586 for(fp = TAILQ_FIRST(&ipq[i]); fp;)
587 {
588 struct ipq_t *fpp;
589
590 fpp = fp;
591 fp = TAILQ_NEXT(fp, ipq_list);
592 if(--fpp->ipq_ttl == 0)
593 {
594 ipstat.ips_fragtimeout += fpp->ipq_nfrags;
595 ip_freef(pData, &ipq[i], fpp);
596 }
597 }
598 }
599 /*
600 * If we are over the maximum number of fragments
601 * (due to the limit being lowered), drain off
602 * enough to get down to the new limit.
603 */
604 if (maxnipq >= 0 && nipq > maxnipq)
605 {
606 for (i = 0; i < IPREASS_NHASH; i++)
607 {
608 while (nipq > maxnipq && !TAILQ_EMPTY(&ipq[i]))
609 {
610 ipstat.ips_fragdropped += TAILQ_FIRST(&ipq[i])->ipq_nfrags;
611 ip_freef(pData, &ipq[i], TAILQ_FIRST(&ipq[i]));
612 }
613 }
614 }
615}
616
617
618/*
619 * Strip out IP options, at higher
620 * level protocol in the kernel.
621 * Second argument is buffer to which options
622 * will be moved, and return value is their length.
623 * (XXX) should be deleted; last arg currently ignored.
624 */
625void
626ip_stripoptions(struct mbuf *m, struct mbuf *mopt)
627{
628 register int i;
629 struct ip *ip = mtod(m, struct ip *);
630 register caddr_t opts;
631 int olen;
632
633 olen = (ip->ip_hl<<2) - sizeof(struct ip);
634 opts = (caddr_t)(ip + 1);
635 i = m->m_len - (sizeof(struct ip) + olen);
636 memcpy(opts, opts + olen, (unsigned)i);
637 m->m_len -= olen;
638
639 ip->ip_hl = sizeof(struct ip) >> 2;
640}
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette