VirtualBox

source: vbox/trunk/src/VBox/Devices/Network/slirp/ip_input.c@ 13384

Last change on this file since 13384 was 1076, checked in by vboxsync, 18 years ago

Removed tons of ifdef VBOX conditionals to make slirp readable again

  • Property svn:eol-style set to native
File size: 17.9 KB
Line 
1/*
2 * Copyright (c) 1982, 1986, 1988, 1993
3 * The Regents of the University of California. All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. All advertising materials mentioning features or use of this software
14 * must display the following acknowledgement:
15 * This product includes software developed by the University of
16 * California, Berkeley and its contributors.
17 * 4. Neither the name of the University nor the names of its contributors
18 * may be used to endorse or promote products derived from this software
19 * without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
26 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
27 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31 * SUCH DAMAGE.
32 *
33 * @(#)ip_input.c 8.2 (Berkeley) 1/4/94
34 * ip_input.c,v 1.11 1994/11/16 10:17:08 jkh Exp
35 */
36
37/*
38 * Changes and additions relating to SLiRP are
39 * Copyright (c) 1995 Danny Gasparovski.
40 *
41 * Please read the file COPYRIGHT for the
42 * terms and conditions of the copyright.
43 */
44
45#include <slirp.h>
46#include "ip_icmp.h"
47
48
49/*
50 * IP initialization: fill in IP protocol switch table.
51 * All protocols not implemented in kernel go to raw IP protocol handler.
52 */
53void
54ip_init(PNATState pData)
55{
56 ipq.next = ipq.prev = ptr_to_u32(pData, &ipq);
57 ip_currid = tt.tv_sec & 0xffff;
58 udp_init(pData);
59 tcp_init(pData);
60}
61
62/*
63 * Ip input routine. Checksum and byte swap header. If fragmented
64 * try to reassemble. Process options. Pass to next level.
65 */
66void
67ip_input(PNATState pData, struct mbuf *m)
68{
69 register struct ip *ip;
70 int hlen;
71
72 DEBUG_CALL("ip_input");
73 DEBUG_ARG("m = %lx", (long)m);
74 DEBUG_ARG("m_len = %d", m->m_len);
75
76 ipstat.ips_total++;
77
78 if (m->m_len < sizeof (struct ip)) {
79 ipstat.ips_toosmall++;
80 return;
81 }
82
83 ip = mtod(m, struct ip *);
84
85 if (ip->ip_v != IPVERSION) {
86 ipstat.ips_badvers++;
87 goto bad;
88 }
89
90 hlen = ip->ip_hl << 2;
91 if (hlen<sizeof(struct ip ) || hlen>m->m_len) {/* min header length */
92 ipstat.ips_badhlen++; /* or packet too short */
93 goto bad;
94 }
95
96 /* keep ip header intact for ICMP reply
97 * ip->ip_sum = cksum(m, hlen);
98 * if (ip->ip_sum) {
99 */
100 if(cksum(m,hlen)) {
101 ipstat.ips_badsum++;
102 goto bad;
103 }
104
105 /*
106 * Convert fields to host representation.
107 */
108 NTOHS(ip->ip_len);
109 if (ip->ip_len < hlen) {
110 ipstat.ips_badlen++;
111 goto bad;
112 }
113 NTOHS(ip->ip_id);
114 NTOHS(ip->ip_off);
115
116 /*
117 * Check that the amount of data in the buffers
118 * is as at least much as the IP header would have us expect.
119 * Trim mbufs if longer than we expect.
120 * Drop packet if shorter than we expect.
121 */
122 if (m->m_len < ip->ip_len) {
123 ipstat.ips_tooshort++;
124 goto bad;
125 }
126 /* Should drop packet if mbuf too long? hmmm... */
127 if (m->m_len > ip->ip_len)
128 m_adj(m, ip->ip_len - m->m_len);
129
130 /* check ip_ttl for a correct ICMP reply */
131 if(ip->ip_ttl==0 || ip->ip_ttl==1) {
132 icmp_error(pData, m, ICMP_TIMXCEED,ICMP_TIMXCEED_INTRANS, 0,"ttl");
133 goto bad;
134 }
135
136 /*
137 * Process options and, if not destined for us,
138 * ship it on. ip_dooptions returns 1 when an
139 * error was detected (causing an icmp message
140 * to be sent and the original packet to be freed).
141 */
142/* We do no IP options */
143/* if (hlen > sizeof (struct ip) && ip_dooptions(m))
144 * goto next;
145 */
146 /*
147 * If offset or IP_MF are set, must reassemble.
148 * Otherwise, nothing need be done.
149 * (We could look in the reassembly queue to see
150 * if the packet was previously fragmented,
151 * but it's not worth the time; just let them time out.)
152 *
153 * XXX This should fail, don't fragment yet
154 */
155 if (ip->ip_off &~ IP_DF) {
156 register struct ipq_t *fp;
157 /*
158 * Look for queue of fragments
159 * of this datagram.
160 */
161 for (fp = u32_to_ptr(pData, ipq.next, struct ipq_t *); fp != &ipq;
162 fp = u32_to_ptr(pData, fp->next, struct ipq_t *))
163 if (ip->ip_id == fp->ipq_id &&
164 ip->ip_src.s_addr == fp->ipq_src.s_addr &&
165 ip->ip_dst.s_addr == fp->ipq_dst.s_addr &&
166 ip->ip_p == fp->ipq_p)
167 goto found;
168 fp = 0;
169 found:
170
171 /*
172 * Adjust ip_len to not reflect header,
173 * set ip_mff if more fragments are expected,
174 * convert offset of this to bytes.
175 */
176 ip->ip_len -= hlen;
177 if (ip->ip_off & IP_MF)
178 ((struct ipasfrag *)ip)->ipf_mff |= 1;
179 else
180 ((struct ipasfrag *)ip)->ipf_mff &= ~1;
181
182 ip->ip_off <<= 3;
183
184 /*
185 * If datagram marked as having more fragments
186 * or if this is not the first fragment,
187 * attempt reassembly; if it succeeds, proceed.
188 */
189 if (((struct ipasfrag *)ip)->ipf_mff & 1 || ip->ip_off) {
190 ipstat.ips_fragments++;
191 ip = ip_reass(pData, (struct ipasfrag *)ip, fp);
192 if (ip == 0)
193 return;
194 ipstat.ips_reassembled++;
195 m = dtom(pData, ip);
196 } else
197 if (fp)
198 ip_freef(pData, fp);
199
200 } else
201 ip->ip_len -= hlen;
202
203 /*
204 * Switch out to protocol's input routine.
205 */
206 ipstat.ips_delivered++;
207 switch (ip->ip_p) {
208 case IPPROTO_TCP:
209 tcp_input(pData, m, hlen, (struct socket *)NULL);
210 break;
211 case IPPROTO_UDP:
212 udp_input(pData, m, hlen);
213 break;
214 case IPPROTO_ICMP:
215 icmp_input(pData, m, hlen);
216 break;
217 default:
218 ipstat.ips_noproto++;
219 m_free(pData, m);
220 }
221 return;
222bad:
223 m_freem(pData, m);
224 return;
225}
226
227/*
228 * Take incoming datagram fragment and try to
229 * reassemble it into whole datagram. If a chain for
230 * reassembly of this datagram already exists, then it
231 * is given as fp; otherwise have to make a chain.
232 */
233struct ip *
234ip_reass(PNATState pData, register struct ipasfrag *ip, register struct ipq_t *fp)
235{
236 register struct mbuf *m = dtom(pData, ip);
237 register struct ipasfrag *q;
238 int hlen = ip->ip_hl << 2;
239 int i, next;
240
241 DEBUG_CALL("ip_reass");
242 DEBUG_ARG("ip = %lx", (long)ip);
243 DEBUG_ARG("fp = %lx", (long)fp);
244 DEBUG_ARG("m = %lx", (long)m);
245
246 /*
247 * Presence of header sizes in mbufs
248 * would confuse code below.
249 * Fragment m_data is concatenated.
250 */
251 m->m_data += hlen;
252 m->m_len -= hlen;
253
254 /*
255 * If first fragment to arrive, create a reassembly queue.
256 */
257 if (fp == 0) {
258 struct mbuf *t;
259 if ((t = m_get(pData)) == NULL) goto dropfrag;
260 fp = mtod(t, struct ipq_t *);
261 insque_32(pData, fp, &ipq);
262 fp->ipq_ttl = IPFRAGTTL;
263 fp->ipq_p = ip->ip_p;
264 fp->ipq_id = ip->ip_id;
265 fp->ipq_next = fp->ipq_prev = ptr_to_u32(pData, (struct ipasfrag *)fp);
266 fp->ipq_src = ((struct ip *)ip)->ip_src;
267 fp->ipq_dst = ((struct ip *)ip)->ip_dst;
268 q = (struct ipasfrag *)fp;
269 goto insert;
270 }
271
272 /*
273 * Find a segment which begins after this one does.
274 */
275 for (q = u32_to_ptr(pData, fp->ipq_next, struct ipasfrag *); q != (struct ipasfrag *)fp;
276 q = u32_to_ptr(pData, q->ipf_next, struct ipasfrag *))
277 if (q->ip_off > ip->ip_off)
278 break;
279
280 /*
281 * If there is a preceding segment, it may provide some of
282 * our data already. If so, drop the data from the incoming
283 * segment. If it provides all of our data, drop us.
284 */
285 if (u32_to_ptr(pData, q->ipf_prev, struct ipq_t *) != fp) {
286 i = (u32_to_ptr(pData, q->ipf_prev, struct ipasfrag *))->ip_off +
287 (u32_to_ptr(pData, q->ipf_prev, struct ipasfrag *))->ip_len - ip->ip_off;
288 if (i > 0) {
289 if (i >= ip->ip_len)
290 goto dropfrag;
291 m_adj(dtom(pData, ip), i);
292 ip->ip_off += i;
293 ip->ip_len -= i;
294 }
295 }
296
297 /*
298 * While we overlap succeeding segments trim them or,
299 * if they are completely covered, dequeue them.
300 */
301 while (q != (struct ipasfrag *)fp && ip->ip_off + ip->ip_len > q->ip_off) {
302 i = (ip->ip_off + ip->ip_len) - q->ip_off;
303 if (i < q->ip_len) {
304 q->ip_len -= i;
305 q->ip_off += i;
306 m_adj(dtom(pData, q), i);
307 break;
308 }
309 q = u32_to_ptr(pData, q->ipf_next, struct ipasfrag *);
310 m_freem(pData, dtom(pData, u32_to_ptr(pData, q->ipf_prev, struct ipasfrag *)));
311 ip_deq(pData, u32_to_ptr(pData, q->ipf_prev, struct ipasfrag *));
312 }
313
314insert:
315 /*
316 * Stick new segment in its place;
317 * check for complete reassembly.
318 */
319 ip_enq(pData, ip, u32_to_ptr(pData, q->ipf_prev, struct ipasfrag *));
320 next = 0;
321 for (q = u32_to_ptr(pData, fp->ipq_next, struct ipasfrag *); q != (struct ipasfrag *)fp;
322 q = u32_to_ptr(pData, q->ipf_next, struct ipasfrag *)) {
323 if (q->ip_off != next)
324 return (0);
325 next += q->ip_len;
326 }
327 if (u32_to_ptr(pData, q->ipf_prev, struct ipasfrag *)->ipf_mff & 1)
328 return (0);
329
330 /*
331 * Reassembly is complete; concatenate fragments.
332 */
333 q = u32_to_ptr(pData, fp->ipq_next, struct ipasfrag *);
334 m = dtom(pData, q);
335
336 q = u32_to_ptr(pData, q->ipf_next, struct ipasfrag *);
337 while (q != (struct ipasfrag *)fp) {
338 struct mbuf *t;
339 t = dtom(pData, q);
340 q = u32_to_ptr(pData, q->ipf_next, struct ipasfrag *);
341 m_cat(pData, m, t);
342 }
343
344 /*
345 * Create header for new ip packet by
346 * modifying header of first packet;
347 * dequeue and discard fragment reassembly header.
348 * Make header visible.
349 */
350 ip = u32_to_ptr(pData, fp->ipq_next, struct ipasfrag *);
351
352 /*
353 * If the fragments concatenated to an mbuf that's
354 * bigger than the total size of the fragment, then and
355 * m_ext buffer was alloced. But fp->ipq_next points to
356 * the old buffer (in the mbuf), so we must point ip
357 * into the new buffer.
358 */
359 if (m->m_flags & M_EXT) {
360 int delta;
361 delta = (char *)ip - m->m_dat;
362 ip = (struct ipasfrag *)(m->m_ext + delta);
363 }
364
365 /* DEBUG_ARG("ip = %lx", (long)ip);
366 * ip=(struct ipasfrag *)m->m_data; */
367
368 ip->ip_len = next;
369 ip->ipf_mff &= ~1;
370 ((struct ip *)ip)->ip_src = fp->ipq_src;
371 ((struct ip *)ip)->ip_dst = fp->ipq_dst;
372 remque_32(pData, fp);
373 (void) m_free(pData, dtom(pData, fp));
374 m = dtom(pData, ip);
375 m->m_len += (ip->ip_hl << 2);
376 m->m_data -= (ip->ip_hl << 2);
377
378 return ((struct ip *)ip);
379
380dropfrag:
381 ipstat.ips_fragdropped++;
382 m_freem(pData, m);
383 return (0);
384}
385
386/*
387 * Free a fragment reassembly header and all
388 * associated datagrams.
389 */
390void
391ip_freef(PNATState pData, struct ipq_t *fp)
392{
393 register struct ipasfrag *q, *p;
394
395 for (q = u32_to_ptr(pData, fp->ipq_next, struct ipasfrag *); q != (struct ipasfrag *)fp;
396 q = p) {
397 p = u32_to_ptr(pData, q->ipf_next, struct ipasfrag *);
398 ip_deq(pData, q);
399 m_freem(pData, dtom(pData, q));
400 }
401 remque_32(pData, fp);
402 (void) m_free(pData, dtom(pData, fp));
403}
404
405/*
406 * Put an ip fragment on a reassembly chain.
407 * Like insque, but pointers in middle of structure.
408 */
409void
410ip_enq(PNATState pData, register struct ipasfrag *p, register struct ipasfrag *prev)
411{
412 DEBUG_CALL("ip_enq");
413 DEBUG_ARG("prev = %lx", (long)prev);
414 p->ipf_prev = ptr_to_u32(pData, prev);
415 p->ipf_next = prev->ipf_next;
416 u32_to_ptr(pData, prev->ipf_next, struct ipasfrag *)->ipf_prev = ptr_to_u32(pData, p);
417 prev->ipf_next = ptr_to_u32(pData, p);
418}
419
420/*
421 * To ip_enq as remque is to insque.
422 */
423void
424ip_deq(PNATState pData, register struct ipasfrag *p)
425{
426 struct ipasfrag *prev = u32_to_ptr(pData, p->ipf_prev, struct ipasfrag *);
427 struct ipasfrag *next = u32_to_ptr(pData, p->ipf_next, struct ipasfrag *);
428 u32ptr_done(pData, prev->ipf_next, p);
429 prev->ipf_next = p->ipf_next;
430 next->ipf_prev = p->ipf_prev;
431}
432
433/*
434 * IP timer processing;
435 * if a timer expires on a reassembly
436 * queue, discard it.
437 */
438void
439ip_slowtimo(PNATState pData)
440{
441 register struct ipq_t *fp;
442
443 DEBUG_CALL("ip_slowtimo");
444
445 fp = u32_to_ptr(pData, ipq.next, struct ipq_t *);
446 if (fp == 0)
447 return;
448
449 while (fp != &ipq) {
450 --fp->ipq_ttl;
451 fp = u32_to_ptr(pData, fp->next, struct ipq_t *);
452 if (u32_to_ptr(pData, fp->prev, struct ipq_t *)->ipq_ttl == 0) {
453 ipstat.ips_fragtimeout++;
454 ip_freef(pData, u32_to_ptr(pData, fp->prev, struct ipq_t *));
455 }
456 }
457}
458
459/*
460 * Do option processing on a datagram,
461 * possibly discarding it if bad options are encountered,
462 * or forwarding it if source-routed.
463 * Returns 1 if packet has been forwarded/freed,
464 * 0 if the packet should be processed further.
465 */
466
467#ifdef notdef
468
469int
470ip_dooptions(m)
471 struct mbuf *m;
472{
473 register struct ip *ip = mtod(m, struct ip *);
474 register u_char *cp;
475 register struct ip_timestamp *ipt;
476 register struct in_ifaddr *ia;
477/* int opt, optlen, cnt, off, code, type = ICMP_PARAMPROB, forward = 0; */
478 int opt, optlen, cnt, off, code, type, forward = 0;
479 struct in_addr *sin, dst;
480typedef u_int32_t n_time;
481 n_time ntime;
482
483 dst = ip->ip_dst;
484 cp = (u_char *)(ip + 1);
485 cnt = (ip->ip_hl << 2) - sizeof (struct ip);
486 for (; cnt > 0; cnt -= optlen, cp += optlen) {
487 opt = cp[IPOPT_OPTVAL];
488 if (opt == IPOPT_EOL)
489 break;
490 if (opt == IPOPT_NOP)
491 optlen = 1;
492 else {
493 optlen = cp[IPOPT_OLEN];
494 if (optlen <= 0 || optlen > cnt) {
495 code = &cp[IPOPT_OLEN] - (u_char *)ip;
496 goto bad;
497 }
498 }
499 switch (opt) {
500
501 default:
502 break;
503
504 /*
505 * Source routing with record.
506 * Find interface with current destination address.
507 * If none on this machine then drop if strictly routed,
508 * or do nothing if loosely routed.
509 * Record interface address and bring up next address
510 * component. If strictly routed make sure next
511 * address is on directly accessible net.
512 */
513 case IPOPT_LSRR:
514 case IPOPT_SSRR:
515 if ((off = cp[IPOPT_OFFSET]) < IPOPT_MINOFF) {
516 code = &cp[IPOPT_OFFSET] - (u_char *)ip;
517 goto bad;
518 }
519 ipaddr.sin_addr = ip->ip_dst;
520 ia = (struct in_ifaddr *)
521 ifa_ifwithaddr((struct sockaddr *)&ipaddr);
522 if (ia == 0) {
523 if (opt == IPOPT_SSRR) {
524 type = ICMP_UNREACH;
525 code = ICMP_UNREACH_SRCFAIL;
526 goto bad;
527 }
528 /*
529 * Loose routing, and not at next destination
530 * yet; nothing to do except forward.
531 */
532 break;
533 }
534 off--; / * 0 origin * /
535 if (off > optlen - sizeof(struct in_addr)) {
536 /*
537 * End of source route. Should be for us.
538 */
539 save_rte(cp, ip->ip_src);
540 break;
541 }
542 /*
543 * locate outgoing interface
544 */
545 bcopy((caddr_t)(cp + off), (caddr_t)&ipaddr.sin_addr,
546 sizeof(ipaddr.sin_addr));
547 if (opt == IPOPT_SSRR) {
548#define INA struct in_ifaddr *
549#define SA struct sockaddr *
550 if ((ia = (INA)ifa_ifwithdstaddr((SA)&ipaddr)) == 0)
551 ia = (INA)ifa_ifwithnet((SA)&ipaddr);
552 } else
553 ia = ip_rtaddr(ipaddr.sin_addr);
554 if (ia == 0) {
555 type = ICMP_UNREACH;
556 code = ICMP_UNREACH_SRCFAIL;
557 goto bad;
558 }
559 ip->ip_dst = ipaddr.sin_addr;
560 bcopy((caddr_t)&(IA_SIN(ia)->sin_addr),
561 (caddr_t)(cp + off), sizeof(struct in_addr));
562 cp[IPOPT_OFFSET] += sizeof(struct in_addr);
563 /*
564 * Let ip_intr's mcast routing check handle mcast pkts
565 */
566 forward = !IN_MULTICAST(ntohl(ip->ip_dst.s_addr));
567 break;
568
569 case IPOPT_RR:
570 if ((off = cp[IPOPT_OFFSET]) < IPOPT_MINOFF) {
571 code = &cp[IPOPT_OFFSET] - (u_char *)ip;
572 goto bad;
573 }
574 /*
575 * If no space remains, ignore.
576 */
577 off--; * 0 origin *
578 if (off > optlen - sizeof(struct in_addr))
579 break;
580 bcopy((caddr_t)(&ip->ip_dst), (caddr_t)&ipaddr.sin_addr,
581 sizeof(ipaddr.sin_addr));
582 /*
583 * locate outgoing interface; if we're the destination,
584 * use the incoming interface (should be same).
585 */
586 if ((ia = (INA)ifa_ifwithaddr((SA)&ipaddr)) == 0 &&
587 (ia = ip_rtaddr(ipaddr.sin_addr)) == 0) {
588 type = ICMP_UNREACH;
589 code = ICMP_UNREACH_HOST;
590 goto bad;
591 }
592 bcopy((caddr_t)&(IA_SIN(ia)->sin_addr),
593 (caddr_t)(cp + off), sizeof(struct in_addr));
594 cp[IPOPT_OFFSET] += sizeof(struct in_addr);
595 break;
596
597 case IPOPT_TS:
598 code = cp - (u_char *)ip;
599 ipt = (struct ip_timestamp *)cp;
600 if (ipt->ipt_len < 5)
601 goto bad;
602 if (ipt->ipt_ptr > ipt->ipt_len - sizeof (int32_t)) {
603 if (++ipt->ipt_oflw == 0)
604 goto bad;
605 break;
606 }
607 sin = (struct in_addr *)(cp + ipt->ipt_ptr - 1);
608 switch (ipt->ipt_flg) {
609
610 case IPOPT_TS_TSONLY:
611 break;
612
613 case IPOPT_TS_TSANDADDR:
614 if (ipt->ipt_ptr + sizeof(n_time) +
615 sizeof(struct in_addr) > ipt->ipt_len)
616 goto bad;
617 ipaddr.sin_addr = dst;
618 ia = (INA)ifaof_ i f p foraddr((SA)&ipaddr,
619 m->m_pkthdr.rcvif);
620 if (ia == 0)
621 continue;
622 bcopy((caddr_t)&IA_SIN(ia)->sin_addr,
623 (caddr_t)sin, sizeof(struct in_addr));
624 ipt->ipt_ptr += sizeof(struct in_addr);
625 break;
626
627 case IPOPT_TS_PRESPEC:
628 if (ipt->ipt_ptr + sizeof(n_time) +
629 sizeof(struct in_addr) > ipt->ipt_len)
630 goto bad;
631 bcopy((caddr_t)sin, (caddr_t)&ipaddr.sin_addr,
632 sizeof(struct in_addr));
633 if (ifa_ifwithaddr((SA)&ipaddr) == 0)
634 continue;
635 ipt->ipt_ptr += sizeof(struct in_addr);
636 break;
637
638 default:
639 goto bad;
640 }
641 ntime = iptime();
642 bcopy((caddr_t)&ntime, (caddr_t)cp + ipt->ipt_ptr - 1,
643 sizeof(n_time));
644 ipt->ipt_ptr += sizeof(n_time);
645 }
646 }
647 if (forward) {
648 ip_forward(m, 1);
649 return (1);
650 }
651 }
652 }
653 return (0);
654bad:
655 /* ip->ip_len -= ip->ip_hl << 2; XXX icmp_error adds in hdr length */
656
657/* Not yet */
658 icmp_error(m, type, code, 0, 0);
659
660 ipstat.ips_badoptions++;
661 return (1);
662}
663
664#endif /* notdef */
665
666/*
667 * Strip out IP options, at higher
668 * level protocol in the kernel.
669 * Second argument is buffer to which options
670 * will be moved, and return value is their length.
671 * (XXX) should be deleted; last arg currently ignored.
672 */
673void
674ip_stripoptions(m, mopt)
675 register struct mbuf *m;
676 struct mbuf *mopt;
677{
678 register int i;
679 struct ip *ip = mtod(m, struct ip *);
680 register caddr_t opts;
681 int olen;
682
683 olen = (ip->ip_hl<<2) - sizeof (struct ip);
684 opts = (caddr_t)(ip + 1);
685 i = m->m_len - (sizeof (struct ip) + olen);
686 memcpy(opts, opts + olen, (unsigned)i);
687 m->m_len -= olen;
688
689 ip->ip_hl = sizeof(struct ip) >> 2;
690}
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette