#define MPF_PACKET AF_ROSE /* Assumptions: - if device has no dev->hard_header routine, it adds and removes ll header inside itself. In this case ll header is invisible outside of device, but higher levels still should reserve dev->hard_header_len. Some devices are enough clever to reallocate skb, when header will not fit to reserved space (tunnel), another ones are silly (PPP). - packet socket receives packets with pulled ll header, so that SOCK_RAW should push it back.
On receive: -----------
Incoming, dev->hard_header!=NULL mac_header -> ll header data -> data
Outgoing, dev->hard_header!=NULL mac_header -> ll header data -> ll header
Incoming, dev->hard_header==NULL mac_header -> UNKNOWN position. It is very likely, that it points to ll header. PPP makes it, that is wrong, because introduce assymetry between rx and tx paths. data -> data
Outgoing, dev->hard_header==NULL mac_header -> data. ll header is still not built! data -> data
Resume If dev->hard_header==NULL we are unlikely to restore sensible ll header.
dev->hard_header == NULL (ll header is added by device, we cannot control it) mac_header -> data data -> data
We should set nh.raw on output to correct posistion, packet classifier depends on it. */
/* Private packet socket structures. */
struct packet_mclist { struct packet_mclist *next; int ifindex; int count; unsigned short type; unsigned short alen; unsigned char addr[MAX_ADDR_LEN]; }; /* identical to struct packet_mreq except it has * a longer address field. */ struct packet_mreq_max { int mr_ifindex; unsigned short mr_type; unsigned short mr_alen; unsigned char mr_address[MAX_ADDR_LEN]; };
if (!sock_flag (sk, SOCK_DEAD)) { printk ("Attempt to release alive packet socket: %p\n", sk); return; }
sk_refcnt_debug_dec (sk); }
static const struct proto_ops packet_ops;
/*SO_ATTACH_FILTER */ static inline unsigned int run_filter (struct sk_buff *skb, struct sock *sk, unsigned int res) { struct sk_filter *filter;
rcu_read_lock_bh (); filter = rcu_dereference (sk->sk_filter); if (filter != NULL) res = sk_run_filter (skb, filter->insns, filter->len); rcu_read_unlock_bh ();
return res; }
/* This function makes lazy skb cloning in hope that most of packets are discarded by BPF.
Note tricky part: we DO mangle shared skb! skb->data, skb->len and skb->cb are mangled. It works because (and until) packets falling here are owned by current CPU. Output packets are cloned by dev_queue_xmit_nit(), input packets are processed by net_bh sequencially, so that if we return skb to original state on exit, we will not harm anyone. */ static int packet_rcv (struct sk_buff *skb, struct net_device *dev, struct packet_type *pt, struct net_device *orig_dev) { struct sock *sk; struct sockaddr_ll *sll; struct packet_sock *po; u8 *skb_head = skb->data; int skb_len = skb->len; unsigned int snaplen, res;
if (skb->pkt_type == PACKET_LOOPBACK) goto drop;
sk = pt->af_packet_priv; po = pkt_sk (sk);
if (dev_net (dev) != sock_net (sk)) goto drop;
skb->dev = dev;
if (dev->header_ops) { /* The device has an explicit notion of ll header, exported to higher levels.
Otherwise, the device hides datails of it frame structure, so that corresponding packet head never delivered to user. */ if (sk->sk_type != SOCK_DGRAM) skb_push (skb, skb->data - skb_mac_header (skb)); else if (skb->pkt_type == PACKET_OUTGOING) { /* Special case: outgoing packets have ll header at head */ skb_pull (skb, skb_network_offset (skb)); } }
snaplen = skb->len;
//SO_ATTACH_FILTER res = run_filter (skb, sk, snaplen); if (!res) goto drop_n_restore; if (snaplen > res) snaplen = res; // if (atomic_read (&sk->sk_rmem_alloc) + skb->truesize >= (unsigned) sk->sk_rcvbuf) goto drop_n_acct;
if (skb_shared (skb)) { struct sk_buff *nskb = skb_clone (skb, GFP_ATOMIC); if (nskb == NULL) goto drop_n_acct;
#if 0 /* What error should we return now? EUNATTACH? */ if (pkt_sk (sk)->ifindex < 0) return -ENODEV; #endif
/* * Call the generic datagram receiver. This handles all sorts * of horrible races and re-entrancy so we can forget about it * in the protocol layers. * * Now it will return ENETDOWN, if device have just gone down, * but then it will block. */
/* * Free or return the buffer as appropriate. Again this * hides all the races and re-entrancy issues from us. */ err = (flags & MSG_TRUNC) ? skb->len : copied;
case SIOCADDRT: case SIOCDELRT: case SIOCDARP: case SIOCGARP: case SIOCSARP: case SIOCGIFADDR: case SIOCSIFADDR: case SIOCGIFBRDADDR: case SIOCSIFBRDADDR: case SIOCGIFNETMASK: case SIOCSIFNETMASK: case SIOCGIFDSTADDR: case SIOCSIFDSTADDR: case SIOCSIFFLAGS: if (!net_eq (sock_net (sk), &init_net)) return -ENOIOCTLCMD; return inet_dgram_ops.ioctl (sock, cmd, arg);
while (1) { n_read = recvfrom (sock, buffer, 2048, 0, NULL, NULL); /* 14 6(dest)+6(source)+2(type or length) + 20 ip header + 8 icmp,tcp or udp header = 42 */ if (n_read < 42) { fprintf (stdout, "Incomplete header, packet corrupt\n"); continue; }
ethhead = buffer; p = ethhead; int n = 0xFF; printf ("MAC: %.2X:%02X:%02X:%02X:%02X:%02X==>" "%.2X:%.2X:%.2X:%.2X:%.2X:%.2X\n", p[6] & n, p[7] & n, p[8] & n, p[9] & n, p[10] & n, p[11] & n, p[0] & n, p[1] & n, p[2] & n, p[3] & n, p[4] & n, p[5] & n);