VirtualBox

source: vbox/trunk/src/VBox/Devices/Network/slirp/ip_input.c@ 25799

Last change on this file since 25799 was 25799, checked in by vboxsync, 15 years ago

NAT: fixed recent regression (xtracker 4590) on non-Linux/non-Windows hosts; some -Wshadow warnings

  • Property svn:eol-style set to native
File size: 16.2 KB
Line 
1/*
2 * Copyright (c) 1982, 1986, 1988, 1993
3 * The Regents of the University of California. All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. All advertising materials mentioning features or use of this software
14 * must display the following acknowledgement:
15 * This product includes software developed by the University of
16 * California, Berkeley and its contributors.
17 * 4. Neither the name of the University nor the names of its contributors
18 * may be used to endorse or promote products derived from this software
19 * without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
26 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
27 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31 * SUCH DAMAGE.
32 *
33 * @(#)ip_input.c 8.2 (Berkeley) 1/4/94
34 * ip_input.c,v 1.11 1994/11/16 10:17:08 jkh Exp
35 */
36
37/*
38 * Changes and additions relating to SLiRP are
39 * Copyright (c) 1995 Danny Gasparovski.
40 *
41 * Please read the file COPYRIGHT for the
42 * terms and conditions of the copyright.
43 */
44
45#include <slirp.h>
46#include "ip_icmp.h"
47#include "alias.h"
48
49
50/*
51 * IP initialization: fill in IP protocol switch table.
52 * All protocols not implemented in kernel go to raw IP protocol handler.
53 */
54void
55ip_init(PNATState pData)
56{
57 int i = 0;
58 for (i = 0; i < IPREASS_NHASH; ++i)
59 TAILQ_INIT(&ipq[i]);
60 maxnipq = 100; /* ??? */
61 maxfragsperpacket = 16;
62 nipq = 0;
63 ip_currid = tt.tv_sec & 0xffff;
64 udp_init(pData);
65 tcp_init(pData);
66}
67
68static struct libalias *select_alias(PNATState pData, struct mbuf* m)
69{
70 struct libalias *la = pData->proxy_alias;
71 struct udphdr *udp = NULL;
72 struct ip *pip = NULL;
73
74#ifndef VBOX_WITH_SLIRP_BSD_MBUF
75 if (m->m_la)
76 return m->m_la;
77#else
78 struct m_tag *t;
79 if (t = m_tag_find(m, PACKET_TAG_ALIAS, NULL) != 0)
80 {
81 return (struct libalias *)&t[1];
82 }
83#endif
84
85 return la;
86}
87
88/*
89 * Ip input routine. Checksum and byte swap header. If fragmented
90 * try to reassemble. Process options. Pass to next level.
91 */
92void
93ip_input(PNATState pData, struct mbuf *m)
94{
95 register struct ip *ip;
96 int hlen = 0;
97 int mlen = 0;
98
99 STAM_PROFILE_START(&pData->StatIP_input, a);
100
101 DEBUG_CALL("ip_input");
102 DEBUG_ARG("m = %lx", (long)m);
103 ip = mtod(m, struct ip *);
104 Log2(("ip_dst=%R[IP4](len:%d) m_len = %d", &ip->ip_dst, ntohs(ip->ip_len), m->m_len));
105 Log2(("ip_dst=%R[IP4](len:%d) m_len = %d\n", &ip->ip_dst, ntohs(ip->ip_len), m->m_len));
106
107 ipstat.ips_total++;
108 {
109 int rc;
110 STAM_PROFILE_START(&pData->StatALIAS_input, b);
111 rc = LibAliasIn(select_alias(pData, m), mtod(m, char *), m->m_len);
112 STAM_PROFILE_STOP(&pData->StatALIAS_input, b);
113 Log2(("NAT: LibAlias return %d\n", rc));
114 if (m->m_len != ntohs(ip->ip_len))
115 {
116 m->m_len = ntohs(ip->ip_len);
117 }
118 }
119
120 mlen = m->m_len;
121
122 if (mlen < sizeof(struct ip))
123 {
124 ipstat.ips_toosmall++;
125 STAM_PROFILE_STOP(&pData->StatIP_input, a);
126 return;
127 }
128
129 ip = mtod(m, struct ip *);
130 if (ip->ip_v != IPVERSION)
131 {
132 ipstat.ips_badvers++;
133 goto bad;
134 }
135
136 hlen = ip->ip_hl << 2;
137 if ( hlen < sizeof(struct ip)
138 || hlen > m->m_len)
139 {
140 /* min header length */
141 ipstat.ips_badhlen++; /* or packet too short */
142 goto bad;
143 }
144
145 /* keep ip header intact for ICMP reply
146 * ip->ip_sum = cksum(m, hlen);
147 * if (ip->ip_sum) {
148 */
149 if (cksum(m, hlen))
150 {
151 ipstat.ips_badsum++;
152 goto bad;
153 }
154
155 /*
156 * Convert fields to host representation.
157 */
158 NTOHS(ip->ip_len);
159 if (ip->ip_len < hlen)
160 {
161 ipstat.ips_badlen++;
162 goto bad;
163 }
164
165 NTOHS(ip->ip_id);
166 NTOHS(ip->ip_off);
167
168 /*
169 * Check that the amount of data in the buffers
170 * is as at least much as the IP header would have us expect.
171 * Trim mbufs if longer than we expect.
172 * Drop packet if shorter than we expect.
173 */
174 if (mlen < ip->ip_len)
175 {
176 ipstat.ips_tooshort++;
177 goto bad;
178 }
179
180 /* Should drop packet if mbuf too long? hmmm... */
181 if (mlen > ip->ip_len)
182 m_adj(m, ip->ip_len - m->m_len);
183
184 /* check ip_ttl for a correct ICMP reply */
185 if (ip->ip_ttl==0 || ip->ip_ttl == 1)
186 {
187 icmp_error(pData, m, ICMP_TIMXCEED, ICMP_TIMXCEED_INTRANS, 0, "ttl");
188 goto bad;
189 }
190
191 ip->ip_ttl--;
192 /*
193 * If offset or IP_MF are set, must reassemble.
194 * Otherwise, nothing need be done.
195 * (We could look in the reassembly queue to see
196 * if the packet was previously fragmented,
197 * but it's not worth the time; just let them time out.)
198 *
199 */
200 if (ip->ip_off & (IP_MF | IP_OFFMASK))
201 {
202 m = ip_reass(pData, m);
203 if (m == NULL)
204 {
205 STAM_PROFILE_STOP(&pData->StatIP_input, a);
206 return;
207 }
208 ip = mtod(m, struct ip *);
209 hlen = ip->ip_hl << 2;
210 }
211 else
212 ip->ip_len -= hlen;
213
214 /*
215 * Switch out to protocol's input routine.
216 */
217 ipstat.ips_delivered++;
218 switch (ip->ip_p)
219 {
220 case IPPROTO_TCP:
221 tcp_input(pData, m, hlen, (struct socket *)NULL);
222 break;
223 case IPPROTO_UDP:
224 udp_input(pData, m, hlen);
225 break;
226 case IPPROTO_ICMP:
227 icmp_input(pData, m, hlen);
228 break;
229 default:
230 ipstat.ips_noproto++;
231 m_free(pData, m);
232 }
233 STAM_PROFILE_STOP(&pData->StatIP_input, a);
234 return;
235
236bad:
237 Log2(("NAT: IP datagram to %R[IP4] with size(%d) claimed as bad\n",
238 &ip->ip_dst, ip->ip_len));
239 m_freem(pData, m);
240 STAM_PROFILE_STOP(&pData->StatIP_input, a);
241 return;
242}
243
244struct mbuf *
245ip_reass(PNATState pData, struct mbuf* m)
246{
247 struct ip *ip;
248 struct mbuf *p, *q, *nq;
249 struct ipq_t *fp = NULL;
250 struct ipqhead *head;
251 int i, hlen, next;
252 u_short hash;
253
254 /* If maxnipq or maxfragsperpacket are 0, never accept fragments. */
255 if ( maxnipq == 0
256 || maxfragsperpacket == 0)
257 {
258 ipstat.ips_fragments++;
259 ipstat.ips_fragdropped++;
260 m_freem(pData, m);
261 return (NULL);
262 }
263
264 ip = mtod(m, struct ip *);
265 hlen = ip->ip_hl << 2;
266
267 hash = IPREASS_HASH(ip->ip_src.s_addr, ip->ip_id);
268 head = &ipq[hash];
269
270 /*
271 * Look for queue of fragments
272 * of this datagram.
273 */
274 TAILQ_FOREACH(fp, head, ipq_list)
275 if (ip->ip_id == fp->ipq_id &&
276 ip->ip_src.s_addr == fp->ipq_src.s_addr &&
277 ip->ip_dst.s_addr == fp->ipq_dst.s_addr &&
278 ip->ip_p == fp->ipq_p)
279 goto found;
280
281 fp = NULL;
282
283 /*
284 * Attempt to trim the number of allocated fragment queues if it
285 * exceeds the administrative limit.
286 */
287 if ((nipq > maxnipq) && (maxnipq > 0))
288 {
289 /*
290 * drop something from the tail of the current queue
291 * before proceeding further
292 */
293 struct ipq_t *q = TAILQ_LAST(head, ipqhead);
294 if (q == NULL)
295 {
296 /* gak */
297 for (i = 0; i < IPREASS_NHASH; i++)
298 {
299 struct ipq_t *r = TAILQ_LAST(&ipq[i], ipqhead);
300 if (r)
301 {
302 ipstat.ips_fragtimeout += r->ipq_nfrags;
303 ip_freef(pData, &ipq[i], r);
304 break;
305 }
306 }
307 }
308 else
309 {
310 ipstat.ips_fragtimeout += q->ipq_nfrags;
311 ip_freef(pData, head, q);
312 }
313 }
314
315found:
316 /*
317 * Adjust ip_len to not reflect header,
318 * convert offset of this to bytes.
319 */
320 ip->ip_len -= hlen;
321 if (ip->ip_off & IP_MF)
322 {
323 /*
324 * Make sure that fragments have a data length
325 * that's a non-zero multiple of 8 bytes.
326 */
327 if (ip->ip_len == 0 || (ip->ip_len & 0x7) != 0)
328 {
329 ipstat.ips_toosmall++; /* XXX */
330 goto dropfrag;
331 }
332 m->m_flags |= M_FRAG;
333 }
334 else
335 m->m_flags &= ~M_FRAG;
336 ip->ip_off <<= 3;
337
338
339 /*
340 * Attempt reassembly; if it succeeds, proceed.
341 * ip_reass() will return a different mbuf.
342 */
343 ipstat.ips_fragments++;
344
345 /* Previous ip_reass() started here. */
346 /*
347 * Presence of header sizes in mbufs
348 * would confuse code below.
349 */
350 m->m_data += hlen;
351 m->m_len -= hlen;
352
353 /*
354 * If first fragment to arrive, create a reassembly queue.
355 */
356 if (fp == NULL)
357 {
358 fp = RTMemAlloc(sizeof(struct ipq_t));
359 if (fp == NULL)
360 goto dropfrag;
361 TAILQ_INSERT_HEAD(head, fp, ipq_list);
362 nipq++;
363 fp->ipq_nfrags = 1;
364 fp->ipq_ttl = IPFRAGTTL;
365 fp->ipq_p = ip->ip_p;
366 fp->ipq_id = ip->ip_id;
367 fp->ipq_src = ip->ip_src;
368 fp->ipq_dst = ip->ip_dst;
369 fp->ipq_frags = m;
370 m->m_nextpkt = NULL;
371 goto done;
372 }
373 else
374 {
375 fp->ipq_nfrags++;
376 }
377
378#ifndef VBOX_WITH_SLIRP_BSD_MBUF
379#define GETIP(m) ((struct ip*)(MBUF_IP_HEADER(m)))
380#else
381#define GETIP(m) ((struct ip*)((m)->m_pkthdr.header))
382#endif
383
384
385 /*
386 * Find a segment which begins after this one does.
387 */
388 for (p = NULL, q = fp->ipq_frags; q; p = q, q = q->m_nextpkt)
389 if (GETIP(q)->ip_off > ip->ip_off)
390 break;
391
392 /*
393 * If there is a preceding segment, it may provide some of
394 * our data already. If so, drop the data from the incoming
395 * segment. If it provides all of our data, drop us, otherwise
396 * stick new segment in the proper place.
397 *
398 * If some of the data is dropped from the the preceding
399 * segment, then it's checksum is invalidated.
400 */
401 if (p)
402 {
403 i = GETIP(p)->ip_off + GETIP(p)->ip_len - ip->ip_off;
404 if (i > 0)
405 {
406 if (i >= ip->ip_len)
407 goto dropfrag;
408 m_adj(m, i);
409 ip->ip_off += i;
410 ip->ip_len -= i;
411 }
412 m->m_nextpkt = p->m_nextpkt;
413 p->m_nextpkt = m;
414 }
415 else
416 {
417 m->m_nextpkt = fp->ipq_frags;
418 fp->ipq_frags = m;
419 }
420
421 /*
422 * While we overlap succeeding segments trim them or,
423 * if they are completely covered, dequeue them.
424 */
425 for (; q != NULL && ip->ip_off + ip->ip_len > GETIP(q)->ip_off;
426 q = nq)
427 {
428 i = (ip->ip_off + ip->ip_len) - GETIP(q)->ip_off;
429 if (i < GETIP(q)->ip_len)
430 {
431 GETIP(q)->ip_len -= i;
432 GETIP(q)->ip_off += i;
433 m_adj(q, i);
434 break;
435 }
436 nq = q->m_nextpkt;
437 m->m_nextpkt = nq;
438 ipstat.ips_fragdropped++;
439 fp->ipq_nfrags--;
440 m_freem(pData, q);
441 }
442
443 /*
444 * Check for complete reassembly and perform frag per packet
445 * limiting.
446 *
447 * Frag limiting is performed here so that the nth frag has
448 * a chance to complete the packet before we drop the packet.
449 * As a result, n+1 frags are actually allowed per packet, but
450 * only n will ever be stored. (n = maxfragsperpacket.)
451 *
452 */
453 next = 0;
454 for (p = NULL, q = fp->ipq_frags; q; p = q, q = q->m_nextpkt)
455 {
456 if (GETIP(q)->ip_off != next)
457 {
458 if (fp->ipq_nfrags > maxfragsperpacket)
459 {
460 ipstat.ips_fragdropped += fp->ipq_nfrags;
461 ip_freef(pData, head, fp);
462 }
463 goto done;
464 }
465 next += GETIP(q)->ip_len;
466 }
467 /* Make sure the last packet didn't have the IP_MF flag */
468 if (p->m_flags & M_FRAG)
469 {
470 if (fp->ipq_nfrags > maxfragsperpacket)
471 {
472 ipstat.ips_fragdropped += fp->ipq_nfrags;
473 ip_freef(pData, head, fp);
474 }
475 goto done;
476 }
477
478 /*
479 * Reassembly is complete. Make sure the packet is a sane size.
480 */
481 q = fp->ipq_frags;
482 ip = GETIP(q);
483 hlen = ip->ip_hl << 2;
484 if (next + hlen > IP_MAXPACKET)
485 {
486 ipstat.ips_fragdropped += fp->ipq_nfrags;
487 ip_freef(pData, head, fp);
488 goto done;
489 }
490
491 /*
492 * Concatenate fragments.
493 */
494 m = q;
495 nq = q->m_nextpkt;
496 q->m_nextpkt = NULL;
497 for (q = nq; q != NULL; q = nq)
498 {
499 nq = q->m_nextpkt;
500 q->m_nextpkt = NULL;
501 m_cat(pData, m, q);
502
503 m->m_len += hlen;
504 m->m_data -= hlen;
505 ip = mtod(m, struct ip *); /*update ip pointer */
506 hlen = ip->ip_hl << 2;
507 m->m_len -= hlen;
508 m->m_data += hlen;
509 }
510 m->m_len += hlen;
511 m->m_data -= hlen;
512
513 /*
514 * Create header for new ip packet by modifying header of first
515 * packet; dequeue and discard fragment reassembly header.
516 * Make header visible.
517 */
518
519 ip->ip_len = next;
520 ip->ip_src = fp->ipq_src;
521 ip->ip_dst = fp->ipq_dst;
522 TAILQ_REMOVE(head, fp, ipq_list);
523 nipq--;
524 RTMemFree(fp);
525
526 Assert((ip->ip_len == next));
527 /* some debugging cruft by sklower, below, will go away soon */
528#if 0
529 if (m->m_flags & M_PKTHDR) /* XXX this should be done elsewhere */
530 m_fixhdr(m);
531#endif
532 ipstat.ips_reassembled++;
533 return (m);
534
535dropfrag:
536 ipstat.ips_fragdropped++;
537 if (fp != NULL)
538 fp->ipq_nfrags--;
539 m_freem(pData, m);
540
541done:
542 return NULL;
543
544#undef GETIP
545}
546
547void
548ip_freef(PNATState pData, struct ipqhead *fhp, struct ipq_t *fp)
549{
550 struct mbuf *q;
551
552 while (fp->ipq_frags)
553 {
554 q = fp->ipq_frags;
555 fp->ipq_frags = q->m_nextpkt;
556 m_freem(pData, q);
557 }
558 TAILQ_REMOVE(fhp, fp, ipq_list);
559 RTMemFree(fp);
560 nipq--;
561}
562
563/*
564 * IP timer processing;
565 * if a timer expires on a reassembly
566 * queue, discard it.
567 */
568void
569ip_slowtimo(PNATState pData)
570{
571 register struct ipq_t *fp;
572
573 /* XXX: the fragment expiration is the same but requier
574 * additional loop see (see ip_input.c in FreeBSD tree)
575 */
576 int i;
577 DEBUG_CALL("ip_slowtimo");
578 for (i = 0; i < IPREASS_NHASH; i++)
579 {
580 for(fp = TAILQ_FIRST(&ipq[i]); fp;)
581 {
582 struct ipq_t *fpp;
583
584 fpp = fp;
585 fp = TAILQ_NEXT(fp, ipq_list);
586 if(--fpp->ipq_ttl == 0)
587 {
588 ipstat.ips_fragtimeout += fpp->ipq_nfrags;
589 ip_freef(pData, &ipq[i], fpp);
590 }
591 }
592 }
593 /*
594 * If we are over the maximum number of fragments
595 * (due to the limit being lowered), drain off
596 * enough to get down to the new limit.
597 */
598 if (maxnipq >= 0 && nipq > maxnipq)
599 {
600 for (i = 0; i < IPREASS_NHASH; i++)
601 {
602 while (nipq > maxnipq && !TAILQ_EMPTY(&ipq[i]))
603 {
604 ipstat.ips_fragdropped += TAILQ_FIRST(&ipq[i])->ipq_nfrags;
605 ip_freef(pData, &ipq[i], TAILQ_FIRST(&ipq[i]));
606 }
607 }
608 }
609}
610
611
612/*
613 * Strip out IP options, at higher
614 * level protocol in the kernel.
615 * Second argument is buffer to which options
616 * will be moved, and return value is their length.
617 * (XXX) should be deleted; last arg currently ignored.
618 */
619void
620ip_stripoptions(struct mbuf *m, struct mbuf *mopt)
621{
622 register int i;
623 struct ip *ip = mtod(m, struct ip *);
624 register caddr_t opts;
625 int olen;
626
627 olen = (ip->ip_hl<<2) - sizeof(struct ip);
628 opts = (caddr_t)(ip + 1);
629 i = m->m_len - (sizeof(struct ip) + olen);
630 memcpy(opts, opts + olen, (unsigned)i);
631 m->m_len -= olen;
632
633 ip->ip_hl = sizeof(struct ip) >> 2;
634}
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette