File:  [DragonFly] / src / sys / netinet / ip_demux.c
Revision 1.9: download - view: text, annotated - select for diffs
Mon Mar 22 06:38:17 2004 UTC (10 years, 8 months ago) by hsu
Branches: MAIN
CVS tags: HEAD
Consolidate length checks in ip_demux().

    1: /*
    2:  * Copyright (c) 2003 Jeffrey Hsu
    3:  * All rights reserved.
    4:  *
    5:  * $DragonFly: src/sys/netinet/ip_demux.c,v 1.9 2004/03/22 06:38:17 hsu Exp $
    6:  */
    7: 
    8: #include "opt_inet.h"
    9: 
   10: #include <sys/param.h>
   11: #include <sys/systm.h>
   12: #include <sys/kernel.h>
   13: #include <sys/socket.h>
   14: #include <sys/socketvar.h>
   15: #include <sys/thread.h>
   16: #include <sys/sysctl.h>
   17: 
   18: #include <net/if.h>
   19: #include <net/netisr.h>
   20: 
   21: #include <netinet/in_systm.h>
   22: #include <netinet/in.h>
   23: #include <netinet/in_var.h>
   24: #include <netinet/in_pcb.h>
   25: #include <netinet/ip.h>
   26: #include <netinet/ip_var.h>
   27: #include <netinet/tcp.h>
   28: #include <netinet/tcpip.h>
   29: #include <netinet/tcp_var.h>
   30: #include <netinet/udp.h>
   31: #include <netinet/udp_var.h>
   32: 
   33: extern struct thread netisr_cpu[];
   34: 
   35: static struct thread tcp_thread[MAXCPU];
   36: static struct thread udp_thread[MAXCPU];
   37: 
   38: /*
   39:  * XXX when we remove the MP lock changes to this must be master-synchronized
   40:  */
   41: static int      ip_mthread_enable = 0;
   42: SYSCTL_INT(_net_inet_ip, OID_AUTO, mthread_enable, CTLFLAG_RW,
   43:     &ip_mthread_enable, 0, "");
   44: 
   45: static __inline int
   46: INP_MPORT_HASH(in_addr_t src, in_addr_t dst, in_port_t sport, in_port_t dport)
   47: {
   48: 	/*
   49: 	 * Use low order bytes.
   50: 	 */
   51: 
   52: #if (BYTE_ORDER == LITTLE_ENDIAN)
   53: 	KASSERT(ncpus2 < 256, ("need different hash function"));  /* XXX JH */
   54: 	return (((src >> 24) ^ (sport >> 8) ^ (dst >> 24) ^ (dport >> 8)) &
   55: 		ncpus2_mask);
   56: #else
   57: 	return ((src ^ sport ^ dst ^ dport) & ncpus2_mask);
   58: #endif
   59: }
   60: 
   61: /*
   62:  * Map a packet to a protocol processing thread.
   63:  */
   64: lwkt_port_t
   65: ip_mport(struct mbuf *m)
   66: {
   67: 	struct ip *ip;
   68: 	int iphlen;
   69: 	struct tcphdr *th;
   70: 	struct udphdr *uh;
   71: 	int thoff;				/* TCP data offset */
   72: 	lwkt_port_t port;
   73: 	int cpu;
   74: 
   75: 	if (ip_mthread_enable == 0)
   76: 		return (&netisr_cpu[0].td_msgport);
   77: 
   78: 	if (m->m_pkthdr.len < sizeof(struct ip)) {
   79: 		ipstat.ips_tooshort++;
   80: 		return (NULL);
   81: 	}
   82: 
   83: 	if (m->m_len < sizeof(struct ip) &&
   84: 	    (m = m_pullup(m, sizeof(struct ip))) == NULL) {
   85: 		ipstat.ips_toosmall++;
   86: 		return (NULL);
   87: 	}
   88: 
   89: 	ip = mtod(m, struct ip *);
   90: 
   91: 	/*
   92: 	 * XXX generic packet handling defrag on CPU 0 for now.
   93: 	 */
   94: 	if (ntohs(ip->ip_off) & (IP_MF | IP_OFFMASK))
   95: 		return (&netisr_cpu[0].td_msgport);
   96: 
   97: 	iphlen = ip->ip_hl << 2;
   98: 
   99: 	switch (ip->ip_p) {
  100: 	case IPPROTO_TCP:
  101: 		if (m->m_len < iphlen + sizeof(struct tcphdr) &&
  102: 		    (m = m_pullup(m, iphlen + sizeof(struct tcphdr))) == NULL) {
  103: 			tcpstat.tcps_rcvshort++;
  104: 			return (NULL);
  105: 		}
  106: 		th = (struct tcphdr *)((caddr_t)ip + iphlen);
  107: 		thoff = th->th_off << 2;
  108: 		if (thoff < sizeof(struct tcphdr) || thoff > ip->ip_len) {
  109: 			tcpstat.tcps_rcvbadoff++;
  110: 			return (NULL);
  111: 		}
  112: 		if (m->m_len < iphlen + thoff) {
  113: 			m = m_pullup(m, iphlen + thoff);
  114: 			if (m == NULL) {
  115: 				tcpstat.tcps_rcvshort++;
  116: 				return (NULL);
  117: 			}
  118: 			ip = mtod(m, struct ip *);
  119: 			th = (struct tcphdr *)((caddr_t)ip + iphlen);
  120: 		}
  121: 
  122: 		cpu = INP_MPORT_HASH(ip->ip_src.s_addr, ip->ip_dst.s_addr,
  123: 		    th->th_sport, th->th_dport);
  124: 		port = &tcp_thread[cpu].td_msgport;
  125: 		break;
  126: 	case IPPROTO_UDP:
  127: 		if (m->m_len < iphlen + sizeof(struct udphdr)) {
  128: 			m = m_pullup(m, iphlen + sizeof(struct udphdr));
  129: 			if (m == NULL) {
  130: 				udpstat.udps_hdrops++;
  131: 				return (NULL);
  132: 			}
  133: 			ip = mtod(m, struct ip *);
  134: 		}
  135: 		uh = (struct udphdr *)((caddr_t)ip + iphlen);
  136: 
  137: 		if (IN_MULTICAST(ntohl(ip->ip_dst.s_addr)) ||
  138: 		    in_broadcast(ip->ip_dst, m->m_pkthdr.rcvif)) {
  139: 			cpu = 0;
  140: 		} else {
  141: 			cpu = INP_MPORT_HASH(ip->ip_src.s_addr,
  142: 			    ip->ip_dst.s_addr, uh->uh_sport, uh->uh_dport);
  143: 		}
  144: 		port = &udp_thread[cpu].td_msgport;
  145: 		break;
  146: 	default:
  147: 		port = &netisr_cpu[0].td_msgport;
  148: 		break;
  149: 	}
  150: 	KKASSERT(port->mp_putport != NULL);
  151: 
  152: 	return (port);
  153: }
  154: 
  155: /*
  156:  * Map a TCP socket to a protocol processing thread.
  157:  */
  158: lwkt_port_t
  159: tcp_soport(struct socket *so, struct sockaddr *nam)
  160: {
  161: 	struct inpcb *inp;
  162: 
  163: 	/*
  164: 	 * The following processing all take place on Protocol Thread 0:
  165: 	 *   only bind() and connect() have a non-null nam parameter
  166: 	 *   attach() has a null socket parameter
  167: 	 *   Fast and slow timeouts pass in two NULLs
  168: 	 */
  169: 	if (nam != NULL || so == NULL)
  170: 		return (&tcp_thread[0].td_msgport);
  171: 
  172: 	/*
  173: 	 * Already bound and connected.  For TCP connections, the
  174: 	 * (faddr, fport, laddr, lport) association cannot change now.
  175: 	 *
  176: 	 * Note: T/TCP code needs some reorganization to fit into
  177: 	 * this model.  XXX JH
  178: 	 */
  179: 	inp = sotoinpcb(so);
  180: 	if (!inp)		/* connection reset by peer */
  181: 		return (&tcp_thread[0].td_msgport);
  182: 
  183: 	/*
  184: 	 * Rely on type-stable memory and check in protocol handler
  185: 	 * to fix race condition here w/ deallocation of inp.  XXX JH
  186: 	 */
  187: 
  188: 	return (&tcp_thread[INP_MPORT_HASH(inp->inp_laddr.s_addr,
  189: 	    inp->inp_faddr.s_addr, inp->inp_lport,
  190: 	    inp->inp_fport)].td_msgport);
  191: }
  192: 
  193: /*
  194:  * Map a UDP socket to a protocol processing thread.
  195:  */
  196: lwkt_port_t
  197: udp_soport(struct socket *so, struct sockaddr *nam)
  198: {
  199: 	struct inpcb *inp;
  200: 
  201: 	/*
  202: 	 * The following processing all take place on Protocol Thread 0:
  203: 	 *   only bind() and connect() have a non-null nam parameter
  204: 	 *   attach() has a null socket parameter
  205: 	 *   Fast and slow timeouts pass in two NULLs
  206: 	 */
  207: 	if (nam != NULL || so == NULL)
  208: 		return (&udp_thread[0].td_msgport);
  209: 
  210: 	inp = sotoinpcb(so);
  211: 
  212: 	if (IN_MULTICAST(ntohl(inp->inp_laddr.s_addr)))
  213: 		return (&udp_thread[0].td_msgport);
  214: 
  215: 	/*
  216: 	 * Rely on type-stable memory and check in protocol handler
  217: 	 * to fix race condition here w/ deallocation of inp.  XXX JH
  218: 	 */
  219: 
  220: 	return (&udp_thread[INP_MPORT_HASH(inp->inp_laddr.s_addr,
  221: 	    inp->inp_faddr.s_addr, inp->inp_lport,
  222: 	    inp->inp_fport)].td_msgport);
  223: }
  224: 
  225: /*
  226:  * Map a network address to a processor.
  227:  */
  228: int
  229: tcp_addrcpu(in_addr_t src, in_port_t sport, in_addr_t dst, in_port_t dport)
  230: {
  231: 	return (INP_MPORT_HASH(src, dst, sport, dport));
  232: }
  233: 
  234: int
  235: udp_addrcpu(in_addr_t src, in_port_t sport, in_addr_t dst, in_port_t dport)
  236: {
  237: 	if (IN_MULTICAST(ntohl(dst)))
  238: 		return (0);
  239: 	else
  240: 		return (INP_MPORT_HASH(src, dst, sport, dport));
  241: }
  242: 
  243: /*
  244:  * Return LWKT port for cpu.
  245:  */
  246: lwkt_port_t
  247: tcp_cport(int cpu)
  248: {
  249: 	return (&tcp_thread[cpu].td_msgport);
  250: }
  251: 
  252: void
  253: tcp_thread_init(void)
  254: {
  255: 	int cpu;
  256: 
  257: 	for (cpu = 0; cpu < ncpus2; cpu++) {
  258: 		lwkt_create(netmsg_service_loop, NULL, NULL, 
  259: 			&tcp_thread[cpu], 0, cpu, "tcp_thread %d", cpu);
  260: 	}
  261: }
  262: 
  263: void
  264: udp_thread_init(void)
  265: {
  266: 	int cpu;
  267: 
  268: 	for (cpu = 0; cpu < ncpus2; cpu++) {
  269: 		lwkt_create(netmsg_service_loop, NULL, NULL,
  270: 			&udp_thread[cpu], 0, cpu, "udp_thread %d", cpu);
  271: 	}
  272: }