VirtualBox

source: vbox/trunk/src/VBox/Devices/Network/slirp/ip_input.c@ 13835

Last change on this file since 13835 was 13783, checked in by vboxsync, 16 years ago

per-socket and per-mbuf mutexes are removed
only global locks are stayin on their places

  • Property svn:eol-style set to native
File size: 18.0 KB
Line 
1/*
2 * Copyright (c) 1982, 1986, 1988, 1993
3 * The Regents of the University of California. All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. All advertising materials mentioning features or use of this software
14 * must display the following acknowledgement:
15 * This product includes software developed by the University of
16 * California, Berkeley and its contributors.
17 * 4. Neither the name of the University nor the names of its contributors
18 * may be used to endorse or promote products derived from this software
19 * without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
26 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
27 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31 * SUCH DAMAGE.
32 *
33 * @(#)ip_input.c 8.2 (Berkeley) 1/4/94
34 * ip_input.c,v 1.11 1994/11/16 10:17:08 jkh Exp
35 */
36
37/*
38 * Changes and additions relating to SLiRP are
39 * Copyright (c) 1995 Danny Gasparovski.
40 *
41 * Please read the file COPYRIGHT for the
42 * terms and conditions of the copyright.
43 */
44
45#include <slirp.h>
46#include "ip_icmp.h"
47
48
49/*
50 * IP initialization: fill in IP protocol switch table.
51 * All protocols not implemented in kernel go to raw IP protocol handler.
52 */
53void
54ip_init(PNATState pData)
55{
56 ipq.next = ipq.prev = ptr_to_u32(pData, &ipq);
57 ip_currid = tt.tv_sec & 0xffff;
58 udp_init(pData);
59 tcp_init(pData);
60}
61
62/*
63 * Ip input routine. Checksum and byte swap header. If fragmented
64 * try to reassemble. Process options. Pass to next level.
65 */
66void
67ip_input(PNATState pData, struct mbuf *m)
68{
69 register struct ip *ip;
70 int hlen;
71#ifdef VBOX_WITH_SYNC_SLIRP
72 int rc;
73#endif
74
75 DEBUG_CALL("ip_input");
76 DEBUG_ARG("m = %lx", (long)m);
77 DEBUG_ARG("m_len = %d", m->m_len);
78
79
80 ipstat.ips_total++;
81
82 if (m->m_len < sizeof (struct ip)) {
83 ipstat.ips_toosmall++;
84 return;
85 }
86
87 ip = mtod(m, struct ip *);
88
89 if (ip->ip_v != IPVERSION) {
90 ipstat.ips_badvers++;
91 goto bad;
92 }
93
94 hlen = ip->ip_hl << 2;
95 if (hlen<sizeof(struct ip ) || hlen>m->m_len) {/* min header length */
96 ipstat.ips_badhlen++; /* or packet too short */
97 goto bad;
98 }
99
100 /* keep ip header intact for ICMP reply
101 * ip->ip_sum = cksum(m, hlen);
102 * if (ip->ip_sum) {
103 */
104 if(cksum(m,hlen)) {
105 ipstat.ips_badsum++;
106 goto bad;
107 }
108
109 /*
110 * Convert fields to host representation.
111 */
112 NTOHS(ip->ip_len);
113 if (ip->ip_len < hlen) {
114 ipstat.ips_badlen++;
115 goto bad;
116 }
117 NTOHS(ip->ip_id);
118 NTOHS(ip->ip_off);
119
120 /*
121 * Check that the amount of data in the buffers
122 * is as at least much as the IP header would have us expect.
123 * Trim mbufs if longer than we expect.
124 * Drop packet if shorter than we expect.
125 */
126 if (m->m_len < ip->ip_len) {
127 ipstat.ips_tooshort++;
128 goto bad;
129 }
130 /* Should drop packet if mbuf too long? hmmm... */
131 if (m->m_len > ip->ip_len)
132 m_adj(m, ip->ip_len - m->m_len);
133
134 /* check ip_ttl for a correct ICMP reply */
135 if(ip->ip_ttl==0 || ip->ip_ttl==1) {
136 icmp_error(pData, m, ICMP_TIMXCEED,ICMP_TIMXCEED_INTRANS, 0,"ttl");
137 goto bad;
138 }
139
140 /*
141 * Process options and, if not destined for us,
142 * ship it on. ip_dooptions returns 1 when an
143 * error was detected (causing an icmp message
144 * to be sent and the original packet to be freed).
145 */
146/* We do no IP options */
147/* if (hlen > sizeof (struct ip) && ip_dooptions(m))
148 * goto next;
149 */
150 /*
151 * If offset or IP_MF are set, must reassemble.
152 * Otherwise, nothing need be done.
153 * (We could look in the reassembly queue to see
154 * if the packet was previously fragmented,
155 * but it's not worth the time; just let them time out.)
156 *
157 * XXX This should fail, don't fragment yet
158 */
159 if (ip->ip_off &~ IP_DF) {
160 register struct ipq_t *fp;
161 /*
162 * Look for queue of fragments
163 * of this datagram.
164 */
165 for (fp = u32_to_ptr(pData, ipq.next, struct ipq_t *); fp != &ipq;
166 fp = u32_to_ptr(pData, fp->next, struct ipq_t *))
167 if (ip->ip_id == fp->ipq_id &&
168 ip->ip_src.s_addr == fp->ipq_src.s_addr &&
169 ip->ip_dst.s_addr == fp->ipq_dst.s_addr &&
170 ip->ip_p == fp->ipq_p)
171 goto found;
172 fp = 0;
173 found:
174
175 /*
176 * Adjust ip_len to not reflect header,
177 * set ip_mff if more fragments are expected,
178 * convert offset of this to bytes.
179 */
180 ip->ip_len -= hlen;
181 if (ip->ip_off & IP_MF)
182 ((struct ipasfrag *)ip)->ipf_mff |= 1;
183 else
184 ((struct ipasfrag *)ip)->ipf_mff &= ~1;
185
186 ip->ip_off <<= 3;
187
188 /*
189 * If datagram marked as having more fragments
190 * or if this is not the first fragment,
191 * attempt reassembly; if it succeeds, proceed.
192 */
193 if (((struct ipasfrag *)ip)->ipf_mff & 1 || ip->ip_off) {
194 ipstat.ips_fragments++;
195 ip = ip_reass(pData, (struct ipasfrag *)ip, fp);
196 if (ip == 0) {
197 return;
198 }
199 ipstat.ips_reassembled++;
200 m = dtom(pData, ip);
201 } else
202 if (fp)
203 ip_freef(pData, fp);
204
205 } else
206 ip->ip_len -= hlen;
207
208 /*
209 * Switch out to protocol's input routine.
210 */
211 ipstat.ips_delivered++;
212 switch (ip->ip_p) {
213 case IPPROTO_TCP:
214 tcp_input(pData, m, hlen, (struct socket *)NULL);
215 break;
216 case IPPROTO_UDP:
217 udp_input(pData, m, hlen);
218 break;
219 case IPPROTO_ICMP:
220 icmp_input(pData, m, hlen);
221 break;
222 default:
223 ipstat.ips_noproto++;
224 m_free(pData, m);
225 }
226 return;
227bad:
228 m_freem(pData, m);
229 return;
230}
231
232/*
233 * Take incoming datagram fragment and try to
234 * reassemble it into whole datagram. If a chain for
235 * reassembly of this datagram already exists, then it
236 * is given as fp; otherwise have to make a chain.
237 */
238struct ip *
239ip_reass(PNATState pData, register struct ipasfrag *ip, register struct ipq_t *fp)
240{
241 register struct mbuf *m = dtom(pData, ip);
242 register struct ipasfrag *q;
243 int hlen = ip->ip_hl << 2;
244 int i, next;
245
246 DEBUG_CALL("ip_reass");
247 DEBUG_ARG("ip = %lx", (long)ip);
248 DEBUG_ARG("fp = %lx", (long)fp);
249 DEBUG_ARG("m = %lx", (long)m);
250
251 /*
252 * Presence of header sizes in mbufs
253 * would confuse code below.
254 * Fragment m_data is concatenated.
255 */
256 m->m_data += hlen;
257 m->m_len -= hlen;
258
259 /*
260 * If first fragment to arrive, create a reassembly queue.
261 */
262 if (fp == 0) {
263 struct mbuf *t;
264 if ((t = m_get(pData)) == NULL) goto dropfrag;
265 fp = mtod(t, struct ipq_t *);
266 insque_32(pData, fp, &ipq);
267 fp->ipq_ttl = IPFRAGTTL;
268 fp->ipq_p = ip->ip_p;
269 fp->ipq_id = ip->ip_id;
270 fp->ipq_next = fp->ipq_prev = ptr_to_u32(pData, (struct ipasfrag *)fp);
271 fp->ipq_src = ((struct ip *)ip)->ip_src;
272 fp->ipq_dst = ((struct ip *)ip)->ip_dst;
273 q = (struct ipasfrag *)fp;
274 goto insert;
275 }
276
277 /*
278 * Find a segment which begins after this one does.
279 */
280 for (q = u32_to_ptr(pData, fp->ipq_next, struct ipasfrag *); q != (struct ipasfrag *)fp;
281 q = u32_to_ptr(pData, q->ipf_next, struct ipasfrag *))
282 if (q->ip_off > ip->ip_off)
283 break;
284
285 /*
286 * If there is a preceding segment, it may provide some of
287 * our data already. If so, drop the data from the incoming
288 * segment. If it provides all of our data, drop us.
289 */
290 if (u32_to_ptr(pData, q->ipf_prev, struct ipq_t *) != fp) {
291 i = (u32_to_ptr(pData, q->ipf_prev, struct ipasfrag *))->ip_off +
292 (u32_to_ptr(pData, q->ipf_prev, struct ipasfrag *))->ip_len - ip->ip_off;
293 if (i > 0) {
294 if (i >= ip->ip_len)
295 goto dropfrag;
296 m_adj(dtom(pData, ip), i);
297 ip->ip_off += i;
298 ip->ip_len -= i;
299 }
300 }
301
302 /*
303 * While we overlap succeeding segments trim them or,
304 * if they are completely covered, dequeue them.
305 */
306 while (q != (struct ipasfrag *)fp && ip->ip_off + ip->ip_len > q->ip_off) {
307 i = (ip->ip_off + ip->ip_len) - q->ip_off;
308 if (i < q->ip_len) {
309 q->ip_len -= i;
310 q->ip_off += i;
311 m_adj(dtom(pData, q), i);
312 break;
313 }
314 q = u32_to_ptr(pData, q->ipf_next, struct ipasfrag *);
315 m_freem(pData, dtom(pData, u32_to_ptr(pData, q->ipf_prev, struct ipasfrag *)));
316 ip_deq(pData, u32_to_ptr(pData, q->ipf_prev, struct ipasfrag *));
317 }
318
319insert:
320 /*
321 * Stick new segment in its place;
322 * check for complete reassembly.
323 */
324 ip_enq(pData, ip, u32_to_ptr(pData, q->ipf_prev, struct ipasfrag *));
325 next = 0;
326 for (q = u32_to_ptr(pData, fp->ipq_next, struct ipasfrag *); q != (struct ipasfrag *)fp;
327 q = u32_to_ptr(pData, q->ipf_next, struct ipasfrag *)) {
328 if (q->ip_off != next)
329 return (0);
330 next += q->ip_len;
331 }
332 if (u32_to_ptr(pData, q->ipf_prev, struct ipasfrag *)->ipf_mff & 1)
333 return (0);
334
335 /*
336 * Reassembly is complete; concatenate fragments.
337 */
338 q = u32_to_ptr(pData, fp->ipq_next, struct ipasfrag *);
339 m = dtom(pData, q);
340
341 q = u32_to_ptr(pData, q->ipf_next, struct ipasfrag *);
342 while (q != (struct ipasfrag *)fp) {
343 struct mbuf *t;
344 t = dtom(pData, q);
345 q = u32_to_ptr(pData, q->ipf_next, struct ipasfrag *);
346 m_cat(pData, m, t);
347 }
348
349 /*
350 * Create header for new ip packet by
351 * modifying header of first packet;
352 * dequeue and discard fragment reassembly header.
353 * Make header visible.
354 */
355 ip = u32_to_ptr(pData, fp->ipq_next, struct ipasfrag *);
356
357 /*
358 * If the fragments concatenated to an mbuf that's
359 * bigger than the total size of the fragment, then and
360 * m_ext buffer was alloced. But fp->ipq_next points to
361 * the old buffer (in the mbuf), so we must point ip
362 * into the new buffer.
363 */
364 if (m->m_flags & M_EXT) {
365 int delta;
366 delta = (char *)ip - m->m_dat;
367 ip = (struct ipasfrag *)(m->m_ext + delta);
368 }
369
370 /* DEBUG_ARG("ip = %lx", (long)ip);
371 * ip=(struct ipasfrag *)m->m_data; */
372
373 ip->ip_len = next;
374 ip->ipf_mff &= ~1;
375 ((struct ip *)ip)->ip_src = fp->ipq_src;
376 ((struct ip *)ip)->ip_dst = fp->ipq_dst;
377 remque_32(pData, fp);
378 (void) m_free(pData, dtom(pData, fp));
379 m = dtom(pData, ip);
380 m->m_len += (ip->ip_hl << 2);
381 m->m_data -= (ip->ip_hl << 2);
382
383 return ((struct ip *)ip);
384
385dropfrag:
386 ipstat.ips_fragdropped++;
387 m_freem(pData, m);
388 return (0);
389}
390
391/*
392 * Free a fragment reassembly header and all
393 * associated datagrams.
394 */
395void
396ip_freef(PNATState pData, struct ipq_t *fp)
397{
398 register struct ipasfrag *q, *p;
399
400 for (q = u32_to_ptr(pData, fp->ipq_next, struct ipasfrag *); q != (struct ipasfrag *)fp;
401 q = p) {
402 p = u32_to_ptr(pData, q->ipf_next, struct ipasfrag *);
403 ip_deq(pData, q);
404 m_freem(pData, dtom(pData, q));
405 }
406 remque_32(pData, fp);
407 (void) m_free(pData, dtom(pData, fp));
408}
409
410/*
411 * Put an ip fragment on a reassembly chain.
412 * Like insque, but pointers in middle of structure.
413 */
414void
415ip_enq(PNATState pData, register struct ipasfrag *p, register struct ipasfrag *prev)
416{
417 DEBUG_CALL("ip_enq");
418 DEBUG_ARG("prev = %lx", (long)prev);
419 p->ipf_prev = ptr_to_u32(pData, prev);
420 p->ipf_next = prev->ipf_next;
421 u32_to_ptr(pData, prev->ipf_next, struct ipasfrag *)->ipf_prev = ptr_to_u32(pData, p);
422 prev->ipf_next = ptr_to_u32(pData, p);
423}
424
425/*
426 * To ip_enq as remque is to insque.
427 */
428void
429ip_deq(PNATState pData, register struct ipasfrag *p)
430{
431 struct ipasfrag *prev = u32_to_ptr(pData, p->ipf_prev, struct ipasfrag *);
432 struct ipasfrag *next = u32_to_ptr(pData, p->ipf_next, struct ipasfrag *);
433 u32ptr_done(pData, prev->ipf_next, p);
434 prev->ipf_next = p->ipf_next;
435 next->ipf_prev = p->ipf_prev;
436}
437
438/*
439 * IP timer processing;
440 * if a timer expires on a reassembly
441 * queue, discard it.
442 */
443void
444ip_slowtimo(PNATState pData)
445{
446 register struct ipq_t *fp;
447
448 DEBUG_CALL("ip_slowtimo");
449
450 fp = u32_to_ptr(pData, ipq.next, struct ipq_t *);
451 if (fp == 0)
452 return;
453
454 while (fp != &ipq) {
455 --fp->ipq_ttl;
456 fp = u32_to_ptr(pData, fp->next, struct ipq_t *);
457 if (u32_to_ptr(pData, fp->prev, struct ipq_t *)->ipq_ttl == 0) {
458 ipstat.ips_fragtimeout++;
459 ip_freef(pData, u32_to_ptr(pData, fp->prev, struct ipq_t *));
460 }
461 }
462}
463
464/*
465 * Do option processing on a datagram,
466 * possibly discarding it if bad options are encountered,
467 * or forwarding it if source-routed.
468 * Returns 1 if packet has been forwarded/freed,
469 * 0 if the packet should be processed further.
470 */
471
472#ifdef notdef
473
474int
475ip_dooptions(m)
476 struct mbuf *m;
477{
478 register struct ip *ip = mtod(m, struct ip *);
479 register u_char *cp;
480 register struct ip_timestamp *ipt;
481 register struct in_ifaddr *ia;
482/* int opt, optlen, cnt, off, code, type = ICMP_PARAMPROB, forward = 0; */
483 int opt, optlen, cnt, off, code, type, forward = 0;
484 struct in_addr *sin, dst;
485typedef u_int32_t n_time;
486 n_time ntime;
487
488 dst = ip->ip_dst;
489 cp = (u_char *)(ip + 1);
490 cnt = (ip->ip_hl << 2) - sizeof (struct ip);
491 for (; cnt > 0; cnt -= optlen, cp += optlen) {
492 opt = cp[IPOPT_OPTVAL];
493 if (opt == IPOPT_EOL)
494 break;
495 if (opt == IPOPT_NOP)
496 optlen = 1;
497 else {
498 optlen = cp[IPOPT_OLEN];
499 if (optlen <= 0 || optlen > cnt) {
500 code = &cp[IPOPT_OLEN] - (u_char *)ip;
501 goto bad;
502 }
503 }
504 switch (opt) {
505
506 default:
507 break;
508
509 /*
510 * Source routing with record.
511 * Find interface with current destination address.
512 * If none on this machine then drop if strictly routed,
513 * or do nothing if loosely routed.
514 * Record interface address and bring up next address
515 * component. If strictly routed make sure next
516 * address is on directly accessible net.
517 */
518 case IPOPT_LSRR:
519 case IPOPT_SSRR:
520 if ((off = cp[IPOPT_OFFSET]) < IPOPT_MINOFF) {
521 code = &cp[IPOPT_OFFSET] - (u_char *)ip;
522 goto bad;
523 }
524 ipaddr.sin_addr = ip->ip_dst;
525 ia = (struct in_ifaddr *)
526 ifa_ifwithaddr((struct sockaddr *)&ipaddr);
527 if (ia == 0) {
528 if (opt == IPOPT_SSRR) {
529 type = ICMP_UNREACH;
530 code = ICMP_UNREACH_SRCFAIL;
531 goto bad;
532 }
533 /*
534 * Loose routing, and not at next destination
535 * yet; nothing to do except forward.
536 */
537 break;
538 }
539 off--; / * 0 origin * /
540 if (off > optlen - sizeof(struct in_addr)) {
541 /*
542 * End of source route. Should be for us.
543 */
544 save_rte(cp, ip->ip_src);
545 break;
546 }
547 /*
548 * locate outgoing interface
549 */
550 bcopy((caddr_t)(cp + off), (caddr_t)&ipaddr.sin_addr,
551 sizeof(ipaddr.sin_addr));
552 if (opt == IPOPT_SSRR) {
553#define INA struct in_ifaddr *
554#define SA struct sockaddr *
555 if ((ia = (INA)ifa_ifwithdstaddr((SA)&ipaddr)) == 0)
556 ia = (INA)ifa_ifwithnet((SA)&ipaddr);
557 } else
558 ia = ip_rtaddr(ipaddr.sin_addr);
559 if (ia == 0) {
560 type = ICMP_UNREACH;
561 code = ICMP_UNREACH_SRCFAIL;
562 goto bad;
563 }
564 ip->ip_dst = ipaddr.sin_addr;
565 bcopy((caddr_t)&(IA_SIN(ia)->sin_addr),
566 (caddr_t)(cp + off), sizeof(struct in_addr));
567 cp[IPOPT_OFFSET] += sizeof(struct in_addr);
568 /*
569 * Let ip_intr's mcast routing check handle mcast pkts
570 */
571 forward = !IN_MULTICAST(ntohl(ip->ip_dst.s_addr));
572 break;
573
574 case IPOPT_RR:
575 if ((off = cp[IPOPT_OFFSET]) < IPOPT_MINOFF) {
576 code = &cp[IPOPT_OFFSET] - (u_char *)ip;
577 goto bad;
578 }
579 /*
580 * If no space remains, ignore.
581 */
582 off--; * 0 origin *
583 if (off > optlen - sizeof(struct in_addr))
584 break;
585 bcopy((caddr_t)(&ip->ip_dst), (caddr_t)&ipaddr.sin_addr,
586 sizeof(ipaddr.sin_addr));
587 /*
588 * locate outgoing interface; if we're the destination,
589 * use the incoming interface (should be same).
590 */
591 if ((ia = (INA)ifa_ifwithaddr((SA)&ipaddr)) == 0 &&
592 (ia = ip_rtaddr(ipaddr.sin_addr)) == 0) {
593 type = ICMP_UNREACH;
594 code = ICMP_UNREACH_HOST;
595 goto bad;
596 }
597 bcopy((caddr_t)&(IA_SIN(ia)->sin_addr),
598 (caddr_t)(cp + off), sizeof(struct in_addr));
599 cp[IPOPT_OFFSET] += sizeof(struct in_addr);
600 break;
601
602 case IPOPT_TS:
603 code = cp - (u_char *)ip;
604 ipt = (struct ip_timestamp *)cp;
605 if (ipt->ipt_len < 5)
606 goto bad;
607 if (ipt->ipt_ptr > ipt->ipt_len - sizeof (int32_t)) {
608 if (++ipt->ipt_oflw == 0)
609 goto bad;
610 break;
611 }
612 sin = (struct in_addr *)(cp + ipt->ipt_ptr - 1);
613 switch (ipt->ipt_flg) {
614
615 case IPOPT_TS_TSONLY:
616 break;
617
618 case IPOPT_TS_TSANDADDR:
619 if (ipt->ipt_ptr + sizeof(n_time) +
620 sizeof(struct in_addr) > ipt->ipt_len)
621 goto bad;
622 ipaddr.sin_addr = dst;
623 ia = (INA)ifaof_ i f p foraddr((SA)&ipaddr,
624 m->m_pkthdr.rcvif);
625 if (ia == 0)
626 continue;
627 bcopy((caddr_t)&IA_SIN(ia)->sin_addr,
628 (caddr_t)sin, sizeof(struct in_addr));
629 ipt->ipt_ptr += sizeof(struct in_addr);
630 break;
631
632 case IPOPT_TS_PRESPEC:
633 if (ipt->ipt_ptr + sizeof(n_time) +
634 sizeof(struct in_addr) > ipt->ipt_len)
635 goto bad;
636 bcopy((caddr_t)sin, (caddr_t)&ipaddr.sin_addr,
637 sizeof(struct in_addr));
638 if (ifa_ifwithaddr((SA)&ipaddr) == 0)
639 continue;
640 ipt->ipt_ptr += sizeof(struct in_addr);
641 break;
642
643 default:
644 goto bad;
645 }
646 ntime = iptime();
647 bcopy((caddr_t)&ntime, (caddr_t)cp + ipt->ipt_ptr - 1,
648 sizeof(n_time));
649 ipt->ipt_ptr += sizeof(n_time);
650 }
651 }
652 if (forward) {
653 ip_forward(m, 1);
654 return (1);
655 }
656 }
657 }
658 return (0);
659bad:
660 /* ip->ip_len -= ip->ip_hl << 2; XXX icmp_error adds in hdr length */
661
662/* Not yet */
663 icmp_error(m, type, code, 0, 0);
664
665 ipstat.ips_badoptions++;
666 return (1);
667}
668
669#endif /* notdef */
670
671/*
672 * Strip out IP options, at higher
673 * level protocol in the kernel.
674 * Second argument is buffer to which options
675 * will be moved, and return value is their length.
676 * (XXX) should be deleted; last arg currently ignored.
677 */
678void
679ip_stripoptions(m, mopt)
680 register struct mbuf *m;
681 struct mbuf *mopt;
682{
683 register int i;
684 struct ip *ip = mtod(m, struct ip *);
685 register caddr_t opts;
686 int olen;
687
688 olen = (ip->ip_hl<<2) - sizeof (struct ip);
689 opts = (caddr_t)(ip + 1);
690 i = m->m_len - (sizeof (struct ip) + olen);
691 memcpy(opts, opts + olen, (unsigned)i);
692 m->m_len -= olen;
693
694 ip->ip_hl = sizeof(struct ip) >> 2;
695}
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette