include/linux/skbuff.h
changeset 0 aa628870c1d3
child 2 d1f6d8b6f81c
equal deleted inserted replaced
-1:000000000000 0:aa628870c1d3
       
     1 /*
       
     2  *	Definitions for the 'struct sk_buff' memory handlers.
       
     3  *
       
     4  *	Authors:
       
     5  *		Alan Cox, <gw4pts@gw4pts.ampr.org>
       
     6  *		Florian La Roche, <rzsfl@rz.uni-sb.de>
       
     7  *
       
     8  *	This program is free software; you can redistribute it and/or
       
     9  *	modify it under the terms of the GNU General Public License
       
    10  *	as published by the Free Software Foundation; either version
       
    11  *	2 of the License, or (at your option) any later version.
       
    12  */
       
    13 
       
    14 #ifndef _LINUX_SKBUFF_H
       
    15 #define _LINUX_SKBUFF_H
       
    16 
       
    17 #include <linux/kernel.h>
       
    18 #include <linux/compiler.h>
       
    19 #include <linux/time.h>
       
    20 #include <linux/cache.h>
       
    21 
       
    22 #include <asm/atomic.h>
       
    23 #include <asm/types.h>
       
    24 #include <linux/spinlock.h>
       
    25 #include <linux/net.h>
       
    26 #include <linux/textsearch.h>
       
    27 #include <net/checksum.h>
       
    28 #include <linux/rcupdate.h>
       
    29 #include <linux/dmaengine.h>
       
    30 #include <linux/hrtimer.h>
       
    31 
       
    32 #define HAVE_ALLOC_SKB		/* For the drivers to know */
       
    33 #define HAVE_ALIGNABLE_SKB	/* Ditto 8)		   */
       
    34 
       
    35 /* Don't change this without changing skb_csum_unnecessary! */
       
    36 #define CHECKSUM_NONE 0
       
    37 #define CHECKSUM_UNNECESSARY 1
       
    38 #define CHECKSUM_COMPLETE 2
       
    39 #define CHECKSUM_PARTIAL 3
       
    40 
       
    41 #define SKB_DATA_ALIGN(X)	(((X) + (SMP_CACHE_BYTES - 1)) & \
       
    42 				 ~(SMP_CACHE_BYTES - 1))
       
    43 #define SKB_WITH_OVERHEAD(X)	\
       
    44 	((X) - SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
       
    45 #define SKB_MAX_ORDER(X, ORDER) \
       
    46 	SKB_WITH_OVERHEAD((PAGE_SIZE << (ORDER)) - (X))
       
    47 #define SKB_MAX_HEAD(X)		(SKB_MAX_ORDER((X), 0))
       
    48 #define SKB_MAX_ALLOC		(SKB_MAX_ORDER(0, 2))
       
    49 
       
    50 /* A. Checksumming of received packets by device.
       
    51  *
       
    52  *	NONE: device failed to checksum this packet.
       
    53  *		skb->csum is undefined.
       
    54  *
       
    55  *	UNNECESSARY: device parsed packet and wouldbe verified checksum.
       
    56  *		skb->csum is undefined.
       
    57  *	      It is bad option, but, unfortunately, many of vendors do this.
       
    58  *	      Apparently with secret goal to sell you new device, when you
       
    59  *	      will add new protocol to your host. F.e. IPv6. 8)
       
    60  *
       
    61  *	COMPLETE: the most generic way. Device supplied checksum of _all_
       
    62  *	    the packet as seen by netif_rx in skb->csum.
       
    63  *	    NOTE: Even if device supports only some protocols, but
       
    64  *	    is able to produce some skb->csum, it MUST use COMPLETE,
       
    65  *	    not UNNECESSARY.
       
    66  *
       
    67  *	PARTIAL: identical to the case for output below.  This may occur
       
    68  *	    on a packet received directly from another Linux OS, e.g.,
       
    69  *	    a virtualised Linux kernel on the same host.  The packet can
       
    70  *	    be treated in the same way as UNNECESSARY except that on
       
    71  *	    output (i.e., forwarding) the checksum must be filled in
       
    72  *	    by the OS or the hardware.
       
    73  *
       
    74  * B. Checksumming on output.
       
    75  *
       
    76  *	NONE: skb is checksummed by protocol or csum is not required.
       
    77  *
       
    78  *	PARTIAL: device is required to csum packet as seen by hard_start_xmit
       
    79  *	from skb->csum_start to the end and to record the checksum
       
    80  *	at skb->csum_start + skb->csum_offset.
       
    81  *
       
    82  *	Device must show its capabilities in dev->features, set
       
    83  *	at device setup time.
       
    84  *	NETIF_F_HW_CSUM	- it is clever device, it is able to checksum
       
    85  *			  everything.
       
    86  *	NETIF_F_NO_CSUM - loopback or reliable single hop media.
       
    87  *	NETIF_F_IP_CSUM - device is dumb. It is able to csum only
       
    88  *			  TCP/UDP over IPv4. Sigh. Vendors like this
       
    89  *			  way by an unknown reason. Though, see comment above
       
    90  *			  about CHECKSUM_UNNECESSARY. 8)
       
    91  *	NETIF_F_IPV6_CSUM about as dumb as the last one but does IPv6 instead.
       
    92  *
       
    93  *	Any questions? No questions, good. 		--ANK
       
    94  */
       
    95 
       
    96 struct net_device;
       
    97 struct scatterlist;
       
    98 struct pipe_inode_info;
       
    99 
       
   100 #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
       
   101 struct nf_conntrack {
       
   102 	atomic_t use;
       
   103 };
       
   104 #endif
       
   105 
       
   106 #ifdef CONFIG_BRIDGE_NETFILTER
       
   107 struct nf_bridge_info {
       
   108 	atomic_t use;
       
   109 	struct net_device *physindev;
       
   110 	struct net_device *physoutdev;
       
   111 	unsigned int mask;
       
   112 	unsigned long data[32 / sizeof(unsigned long)];
       
   113 };
       
   114 #endif
       
   115 
       
   116 struct sk_buff_head {
       
   117 	/* These two members must be first. */
       
   118 	struct sk_buff	*next;
       
   119 	struct sk_buff	*prev;
       
   120 
       
   121 	__u32		qlen;
       
   122 	spinlock_t	lock;
       
   123 };
       
   124 
       
   125 struct sk_buff;
       
   126 
       
   127 /* To allow 64K frame to be packed as single skb without frag_list */
       
   128 #define MAX_SKB_FRAGS (65536/PAGE_SIZE + 2)
       
   129 
       
   130 typedef struct skb_frag_struct skb_frag_t;
       
   131 
       
   132 struct skb_frag_struct {
       
   133 	struct page *page;
       
   134 	__u32 page_offset;
       
   135 	__u32 size;
       
   136 };
       
   137 
       
   138 /* This data is invariant across clones and lives at
       
   139  * the end of the header data, ie. at skb->end.
       
   140  */
       
   141 struct skb_shared_info {
       
   142 	atomic_t	dataref;
       
   143 	unsigned short	nr_frags;
       
   144 	unsigned short	gso_size;
       
   145 	/* Warning: this field is not always filled in (UFO)! */
       
   146 	unsigned short	gso_segs;
       
   147 	unsigned short  gso_type;
       
   148 	__be32          ip6_frag_id;
       
   149 #ifdef CONFIG_HAS_DMA
       
   150 	unsigned int	num_dma_maps;
       
   151 #endif
       
   152 	struct sk_buff	*frag_list;
       
   153 	skb_frag_t	frags[MAX_SKB_FRAGS];
       
   154 #ifdef CONFIG_HAS_DMA
       
   155 	dma_addr_t	dma_maps[MAX_SKB_FRAGS + 1];
       
   156 #endif
       
   157 };
       
   158 
       
   159 /* We divide dataref into two halves.  The higher 16 bits hold references
       
   160  * to the payload part of skb->data.  The lower 16 bits hold references to
       
   161  * the entire skb->data.  A clone of a headerless skb holds the length of
       
   162  * the header in skb->hdr_len.
       
   163  *
       
   164  * All users must obey the rule that the skb->data reference count must be
       
   165  * greater than or equal to the payload reference count.
       
   166  *
       
   167  * Holding a reference to the payload part means that the user does not
       
   168  * care about modifications to the header part of skb->data.
       
   169  */
       
   170 #define SKB_DATAREF_SHIFT 16
       
   171 #define SKB_DATAREF_MASK ((1 << SKB_DATAREF_SHIFT) - 1)
       
   172 
       
   173 
       
   174 enum {
       
   175 	SKB_FCLONE_UNAVAILABLE,
       
   176 	SKB_FCLONE_ORIG,
       
   177 	SKB_FCLONE_CLONE,
       
   178 };
       
   179 
       
   180 enum {
       
   181 	SKB_GSO_TCPV4 = 1 << 0,
       
   182 	SKB_GSO_UDP = 1 << 1,
       
   183 
       
   184 	/* This indicates the skb is from an untrusted source. */
       
   185 	SKB_GSO_DODGY = 1 << 2,
       
   186 
       
   187 	/* This indicates the tcp segment has CWR set. */
       
   188 	SKB_GSO_TCP_ECN = 1 << 3,
       
   189 
       
   190 	SKB_GSO_TCPV6 = 1 << 4,
       
   191 };
       
   192 
       
   193 #if BITS_PER_LONG > 32
       
   194 #define NET_SKBUFF_DATA_USES_OFFSET 1
       
   195 #endif
       
   196 
       
   197 #ifdef NET_SKBUFF_DATA_USES_OFFSET
       
   198 typedef unsigned int sk_buff_data_t;
       
   199 #else
       
   200 typedef unsigned char *sk_buff_data_t;
       
   201 #endif
       
   202 
       
   203 /** 
       
   204  *	struct sk_buff - socket buffer
       
   205  *	@next: Next buffer in list
       
   206  *	@prev: Previous buffer in list
       
   207  *	@sk: Socket we are owned by
       
   208  *	@tstamp: Time we arrived
       
   209  *	@dev: Device we arrived on/are leaving by
       
   210  *	@transport_header: Transport layer header
       
   211  *	@network_header: Network layer header
       
   212  *	@mac_header: Link layer header
       
   213  *	@dst: destination entry
       
   214  *	@sp: the security path, used for xfrm
       
   215  *	@cb: Control buffer. Free for use by every layer. Put private vars here
       
   216  *	@len: Length of actual data
       
   217  *	@data_len: Data length
       
   218  *	@mac_len: Length of link layer header
       
   219  *	@hdr_len: writable header length of cloned skb
       
   220  *	@csum: Checksum (must include start/offset pair)
       
   221  *	@csum_start: Offset from skb->head where checksumming should start
       
   222  *	@csum_offset: Offset from csum_start where checksum should be stored
       
   223  *	@local_df: allow local fragmentation
       
   224  *	@cloned: Head may be cloned (check refcnt to be sure)
       
   225  *	@nohdr: Payload reference only, must not modify header
       
   226  *	@pkt_type: Packet class
       
   227  *	@fclone: skbuff clone status
       
   228  *	@ip_summed: Driver fed us an IP checksum
       
   229  *	@priority: Packet queueing priority
       
   230  *	@users: User count - see {datagram,tcp}.c
       
   231  *	@protocol: Packet protocol from driver
       
   232  *	@truesize: Buffer size 
       
   233  *	@head: Head of buffer
       
   234  *	@data: Data head pointer
       
   235  *	@tail: Tail pointer
       
   236  *	@end: End pointer
       
   237  *	@destructor: Destruct function
       
   238  *	@mark: Generic packet mark
       
   239  *	@nfct: Associated connection, if any
       
   240  *	@ipvs_property: skbuff is owned by ipvs
       
   241  *	@peeked: this packet has been seen already, so stats have been
       
   242  *		done for it, don't do them again
       
   243  *	@nf_trace: netfilter packet trace flag
       
   244  *	@nfctinfo: Relationship of this skb to the connection
       
   245  *	@nfct_reasm: netfilter conntrack re-assembly pointer
       
   246  *	@nf_bridge: Saved data about a bridged frame - see br_netfilter.c
       
   247  *	@iif: ifindex of device we arrived on
       
   248  *	@queue_mapping: Queue mapping for multiqueue devices
       
   249  *	@tc_index: Traffic control index
       
   250  *	@tc_verd: traffic control verdict
       
   251  *	@ndisc_nodetype: router type (from link layer)
       
   252  *	@do_not_encrypt: set to prevent encryption of this frame
       
   253  *	@dma_cookie: a cookie to one of several possible DMA operations
       
   254  *		done by skb DMA functions
       
   255  *	@secmark: security marking
       
   256  *	@vlan_tci: vlan tag control information
       
   257  */
       
   258 
       
   259 struct sk_buff {
       
   260 	/* These two members must be first. */
       
   261 	struct sk_buff		*next;
       
   262 	struct sk_buff		*prev;
       
   263 
       
   264 	struct sock		*sk;
       
   265 	ktime_t			tstamp;
       
   266 	struct net_device	*dev;
       
   267 
       
   268 	union {
       
   269 		struct  dst_entry	*dst;
       
   270 		struct  rtable		*rtable;
       
   271 	};
       
   272 	struct	sec_path	*sp;
       
   273 
       
   274 	/*
       
   275 	 * This is the control buffer. It is free to use for every
       
   276 	 * layer. Please put your private variables there. If you
       
   277 	 * want to keep them across layers you have to do a skb_clone()
       
   278 	 * first. This is owned by whoever has the skb queued ATM.
       
   279 	 */
       
   280 	char			cb[48];
       
   281 
       
   282 	unsigned int		len,
       
   283 				data_len;
       
   284 	__u16			mac_len,
       
   285 				hdr_len;
       
   286 	union {
       
   287 		__wsum		csum;
       
   288 		struct {
       
   289 			__u16	csum_start;
       
   290 			__u16	csum_offset;
       
   291 		};
       
   292 	};
       
   293 	__u32			priority;
       
   294 	__u8			local_df:1,
       
   295 				cloned:1,
       
   296 				ip_summed:2,
       
   297 				nohdr:1,
       
   298 				nfctinfo:3;
       
   299 	__u8			pkt_type:3,
       
   300 				fclone:2,
       
   301 				ipvs_property:1,
       
   302 				peeked:1,
       
   303 				nf_trace:1;
       
   304 	__be16			protocol;
       
   305 
       
   306 	void			(*destructor)(struct sk_buff *skb);
       
   307 #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
       
   308 	struct nf_conntrack	*nfct;
       
   309 	struct sk_buff		*nfct_reasm;
       
   310 #endif
       
   311 #ifdef CONFIG_BRIDGE_NETFILTER
       
   312 	struct nf_bridge_info	*nf_bridge;
       
   313 #endif
       
   314 
       
   315 	int			iif;
       
   316 	__u16			queue_mapping;
       
   317 #ifdef CONFIG_NET_SCHED
       
   318 	__u16			tc_index;	/* traffic control index */
       
   319 #ifdef CONFIG_NET_CLS_ACT
       
   320 	__u16			tc_verd;	/* traffic control verdict */
       
   321 #endif
       
   322 #endif
       
   323 #ifdef CONFIG_IPV6_NDISC_NODETYPE
       
   324 	__u8			ndisc_nodetype:2;
       
   325 #endif
       
   326 #if defined(CONFIG_MAC80211) || defined(CONFIG_MAC80211_MODULE)
       
   327 	__u8			do_not_encrypt:1;
       
   328 #endif
       
   329 	/* 0/13/14 bit hole */
       
   330 
       
   331 #ifdef CONFIG_NET_DMA
       
   332 	dma_cookie_t		dma_cookie;
       
   333 #endif
       
   334 #ifdef CONFIG_NETWORK_SECMARK
       
   335 	__u32			secmark;
       
   336 #endif
       
   337 
       
   338 	__u32			mark;
       
   339 
       
   340 	__u16			vlan_tci;
       
   341 
       
   342 	sk_buff_data_t		transport_header;
       
   343 	sk_buff_data_t		network_header;
       
   344 	sk_buff_data_t		mac_header;
       
   345 	/* These elements must be at the end, see alloc_skb() for details.  */
       
   346 	sk_buff_data_t		tail;
       
   347 	sk_buff_data_t		end;
       
   348 	unsigned char		*head,
       
   349 				*data;
       
   350 	unsigned int		truesize;
       
   351 	atomic_t		users;
       
   352 };
       
   353 
       
   354 #ifdef __KERNEL__
       
   355 /*
       
   356  *	Handling routines are only of interest to the kernel
       
   357  */
       
   358 #include <linux/slab.h>
       
   359 
       
   360 #include <asm/system.h>
       
   361 
       
   362 #ifdef CONFIG_HAS_DMA
       
   363 #include <linux/dma-mapping.h>
       
   364 extern int skb_dma_map(struct device *dev, struct sk_buff *skb,
       
   365 		       enum dma_data_direction dir);
       
   366 extern void skb_dma_unmap(struct device *dev, struct sk_buff *skb,
       
   367 			  enum dma_data_direction dir);
       
   368 #endif
       
   369 
       
   370 extern void kfree_skb(struct sk_buff *skb);
       
   371 extern void	       __kfree_skb(struct sk_buff *skb);
       
   372 extern struct sk_buff *__alloc_skb(unsigned int size,
       
   373 				   gfp_t priority, int fclone, int node);
       
   374 static inline struct sk_buff *alloc_skb(unsigned int size,
       
   375 					gfp_t priority)
       
   376 {
       
   377 	return __alloc_skb(size, priority, 0, -1);
       
   378 }
       
   379 
       
   380 static inline struct sk_buff *alloc_skb_fclone(unsigned int size,
       
   381 					       gfp_t priority)
       
   382 {
       
   383 	return __alloc_skb(size, priority, 1, -1);
       
   384 }
       
   385 
       
   386 extern int skb_recycle_check(struct sk_buff *skb, int skb_size);
       
   387 
       
   388 extern struct sk_buff *skb_morph(struct sk_buff *dst, struct sk_buff *src);
       
   389 extern struct sk_buff *skb_clone(struct sk_buff *skb,
       
   390 				 gfp_t priority);
       
   391 extern struct sk_buff *skb_copy(const struct sk_buff *skb,
       
   392 				gfp_t priority);
       
   393 extern struct sk_buff *pskb_copy(struct sk_buff *skb,
       
   394 				 gfp_t gfp_mask);
       
   395 extern int	       pskb_expand_head(struct sk_buff *skb,
       
   396 					int nhead, int ntail,
       
   397 					gfp_t gfp_mask);
       
   398 extern struct sk_buff *skb_realloc_headroom(struct sk_buff *skb,
       
   399 					    unsigned int headroom);
       
   400 extern struct sk_buff *skb_copy_expand(const struct sk_buff *skb,
       
   401 				       int newheadroom, int newtailroom,
       
   402 				       gfp_t priority);
       
   403 extern int	       skb_to_sgvec(struct sk_buff *skb,
       
   404 				    struct scatterlist *sg, int offset,
       
   405 				    int len);
       
   406 extern int	       skb_cow_data(struct sk_buff *skb, int tailbits,
       
   407 				    struct sk_buff **trailer);
       
   408 extern int	       skb_pad(struct sk_buff *skb, int pad);
       
   409 #define dev_kfree_skb(a)	kfree_skb(a)
       
   410 extern void	      skb_over_panic(struct sk_buff *skb, int len,
       
   411 				     void *here);
       
   412 extern void	      skb_under_panic(struct sk_buff *skb, int len,
       
   413 				      void *here);
       
   414 extern void	      skb_truesize_bug(struct sk_buff *skb);
       
   415 
       
   416 static inline void skb_truesize_check(struct sk_buff *skb)
       
   417 {
       
   418 	int len = sizeof(struct sk_buff) + skb->len;
       
   419 
       
   420 	if (unlikely((int)skb->truesize < len))
       
   421 		skb_truesize_bug(skb);
       
   422 }
       
   423 
       
   424 extern int skb_append_datato_frags(struct sock *sk, struct sk_buff *skb,
       
   425 			int getfrag(void *from, char *to, int offset,
       
   426 			int len,int odd, struct sk_buff *skb),
       
   427 			void *from, int length);
       
   428 
       
   429 struct skb_seq_state
       
   430 {
       
   431 	__u32		lower_offset;
       
   432 	__u32		upper_offset;
       
   433 	__u32		frag_idx;
       
   434 	__u32		stepped_offset;
       
   435 	struct sk_buff	*root_skb;
       
   436 	struct sk_buff	*cur_skb;
       
   437 	__u8		*frag_data;
       
   438 };
       
   439 
       
   440 extern void	      skb_prepare_seq_read(struct sk_buff *skb,
       
   441 					   unsigned int from, unsigned int to,
       
   442 					   struct skb_seq_state *st);
       
   443 extern unsigned int   skb_seq_read(unsigned int consumed, const u8 **data,
       
   444 				   struct skb_seq_state *st);
       
   445 extern void	      skb_abort_seq_read(struct skb_seq_state *st);
       
   446 
       
   447 extern unsigned int   skb_find_text(struct sk_buff *skb, unsigned int from,
       
   448 				    unsigned int to, struct ts_config *config,
       
   449 				    struct ts_state *state);
       
   450 
       
   451 #ifdef NET_SKBUFF_DATA_USES_OFFSET
       
   452 static inline unsigned char *skb_end_pointer(const struct sk_buff *skb)
       
   453 {
       
   454 	return skb->head + skb->end;
       
   455 }
       
   456 #else
       
   457 static inline unsigned char *skb_end_pointer(const struct sk_buff *skb)
       
   458 {
       
   459 	return skb->end;
       
   460 }
       
   461 #endif
       
   462 
       
   463 /* Internal */
       
   464 #define skb_shinfo(SKB)	((struct skb_shared_info *)(skb_end_pointer(SKB)))
       
   465 
       
   466 /**
       
   467  *	skb_queue_empty - check if a queue is empty
       
   468  *	@list: queue head
       
   469  *
       
   470  *	Returns true if the queue is empty, false otherwise.
       
   471  */
       
   472 static inline int skb_queue_empty(const struct sk_buff_head *list)
       
   473 {
       
   474 	return list->next == (struct sk_buff *)list;
       
   475 }
       
   476 
       
   477 /**
       
   478  *	skb_queue_is_last - check if skb is the last entry in the queue
       
   479  *	@list: queue head
       
   480  *	@skb: buffer
       
   481  *
       
   482  *	Returns true if @skb is the last buffer on the list.
       
   483  */
       
   484 static inline bool skb_queue_is_last(const struct sk_buff_head *list,
       
   485 				     const struct sk_buff *skb)
       
   486 {
       
   487 	return (skb->next == (struct sk_buff *) list);
       
   488 }
       
   489 
       
   490 /**
       
   491  *	skb_queue_next - return the next packet in the queue
       
   492  *	@list: queue head
       
   493  *	@skb: current buffer
       
   494  *
       
   495  *	Return the next packet in @list after @skb.  It is only valid to
       
   496  *	call this if skb_queue_is_last() evaluates to false.
       
   497  */
       
   498 static inline struct sk_buff *skb_queue_next(const struct sk_buff_head *list,
       
   499 					     const struct sk_buff *skb)
       
   500 {
       
   501 	/* This BUG_ON may seem severe, but if we just return then we
       
   502 	 * are going to dereference garbage.
       
   503 	 */
       
   504 	BUG_ON(skb_queue_is_last(list, skb));
       
   505 	return skb->next;
       
   506 }
       
   507 
       
   508 /**
       
   509  *	skb_get - reference buffer
       
   510  *	@skb: buffer to reference
       
   511  *
       
   512  *	Makes another reference to a socket buffer and returns a pointer
       
   513  *	to the buffer.
       
   514  */
       
   515 static inline struct sk_buff *skb_get(struct sk_buff *skb)
       
   516 {
       
   517 	atomic_inc(&skb->users);
       
   518 	return skb;
       
   519 }
       
   520 
       
   521 /*
       
   522  * If users == 1, we are the only owner and are can avoid redundant
       
   523  * atomic change.
       
   524  */
       
   525 
       
   526 /**
       
   527  *	skb_cloned - is the buffer a clone
       
   528  *	@skb: buffer to check
       
   529  *
       
   530  *	Returns true if the buffer was generated with skb_clone() and is
       
   531  *	one of multiple shared copies of the buffer. Cloned buffers are
       
   532  *	shared data so must not be written to under normal circumstances.
       
   533  */
       
   534 static inline int skb_cloned(const struct sk_buff *skb)
       
   535 {
       
   536 	return skb->cloned &&
       
   537 	       (atomic_read(&skb_shinfo(skb)->dataref) & SKB_DATAREF_MASK) != 1;
       
   538 }
       
   539 
       
   540 /**
       
   541  *	skb_header_cloned - is the header a clone
       
   542  *	@skb: buffer to check
       
   543  *
       
   544  *	Returns true if modifying the header part of the buffer requires
       
   545  *	the data to be copied.
       
   546  */
       
   547 static inline int skb_header_cloned(const struct sk_buff *skb)
       
   548 {
       
   549 	int dataref;
       
   550 
       
   551 	if (!skb->cloned)
       
   552 		return 0;
       
   553 
       
   554 	dataref = atomic_read(&skb_shinfo(skb)->dataref);
       
   555 	dataref = (dataref & SKB_DATAREF_MASK) - (dataref >> SKB_DATAREF_SHIFT);
       
   556 	return dataref != 1;
       
   557 }
       
   558 
       
   559 /**
       
   560  *	skb_header_release - release reference to header
       
   561  *	@skb: buffer to operate on
       
   562  *
       
   563  *	Drop a reference to the header part of the buffer.  This is done
       
   564  *	by acquiring a payload reference.  You must not read from the header
       
   565  *	part of skb->data after this.
       
   566  */
       
   567 static inline void skb_header_release(struct sk_buff *skb)
       
   568 {
       
   569 	BUG_ON(skb->nohdr);
       
   570 	skb->nohdr = 1;
       
   571 	atomic_add(1 << SKB_DATAREF_SHIFT, &skb_shinfo(skb)->dataref);
       
   572 }
       
   573 
       
   574 /**
       
   575  *	skb_shared - is the buffer shared
       
   576  *	@skb: buffer to check
       
   577  *
       
   578  *	Returns true if more than one person has a reference to this
       
   579  *	buffer.
       
   580  */
       
   581 static inline int skb_shared(const struct sk_buff *skb)
       
   582 {
       
   583 	return atomic_read(&skb->users) != 1;
       
   584 }
       
   585 
       
   586 /**
       
   587  *	skb_share_check - check if buffer is shared and if so clone it
       
   588  *	@skb: buffer to check
       
   589  *	@pri: priority for memory allocation
       
   590  *
       
   591  *	If the buffer is shared the buffer is cloned and the old copy
       
   592  *	drops a reference. A new clone with a single reference is returned.
       
   593  *	If the buffer is not shared the original buffer is returned. When
       
   594  *	being called from interrupt status or with spinlocks held pri must
       
   595  *	be GFP_ATOMIC.
       
   596  *
       
   597  *	NULL is returned on a memory allocation failure.
       
   598  */
       
   599 static inline struct sk_buff *skb_share_check(struct sk_buff *skb,
       
   600 					      gfp_t pri)
       
   601 {
       
   602 	might_sleep_if(pri & __GFP_WAIT);
       
   603 	if (skb_shared(skb)) {
       
   604 		struct sk_buff *nskb = skb_clone(skb, pri);
       
   605 		kfree_skb(skb);
       
   606 		skb = nskb;
       
   607 	}
       
   608 	return skb;
       
   609 }
       
   610 
       
   611 /*
       
   612  *	Copy shared buffers into a new sk_buff. We effectively do COW on
       
   613  *	packets to handle cases where we have a local reader and forward
       
   614  *	and a couple of other messy ones. The normal one is tcpdumping
       
   615  *	a packet thats being forwarded.
       
   616  */
       
   617 
       
   618 /**
       
   619  *	skb_unshare - make a copy of a shared buffer
       
   620  *	@skb: buffer to check
       
   621  *	@pri: priority for memory allocation
       
   622  *
       
   623  *	If the socket buffer is a clone then this function creates a new
       
   624  *	copy of the data, drops a reference count on the old copy and returns
       
   625  *	the new copy with the reference count at 1. If the buffer is not a clone
       
   626  *	the original buffer is returned. When called with a spinlock held or
       
   627  *	from interrupt state @pri must be %GFP_ATOMIC
       
   628  *
       
   629  *	%NULL is returned on a memory allocation failure.
       
   630  */
       
   631 static inline struct sk_buff *skb_unshare(struct sk_buff *skb,
       
   632 					  gfp_t pri)
       
   633 {
       
   634 	might_sleep_if(pri & __GFP_WAIT);
       
   635 	if (skb_cloned(skb)) {
       
   636 		struct sk_buff *nskb = skb_copy(skb, pri);
       
   637 		kfree_skb(skb);	/* Free our shared copy */
       
   638 		skb = nskb;
       
   639 	}
       
   640 	return skb;
       
   641 }
       
   642 
       
   643 /**
       
   644  *	skb_peek
       
   645  *	@list_: list to peek at
       
   646  *
       
   647  *	Peek an &sk_buff. Unlike most other operations you _MUST_
       
   648  *	be careful with this one. A peek leaves the buffer on the
       
   649  *	list and someone else may run off with it. You must hold
       
   650  *	the appropriate locks or have a private queue to do this.
       
   651  *
       
   652  *	Returns %NULL for an empty list or a pointer to the head element.
       
   653  *	The reference count is not incremented and the reference is therefore
       
   654  *	volatile. Use with caution.
       
   655  */
       
   656 static inline struct sk_buff *skb_peek(struct sk_buff_head *list_)
       
   657 {
       
   658 	struct sk_buff *list = ((struct sk_buff *)list_)->next;
       
   659 	if (list == (struct sk_buff *)list_)
       
   660 		list = NULL;
       
   661 	return list;
       
   662 }
       
   663 
       
   664 /**
       
   665  *	skb_peek_tail
       
   666  *	@list_: list to peek at
       
   667  *
       
   668  *	Peek an &sk_buff. Unlike most other operations you _MUST_
       
   669  *	be careful with this one. A peek leaves the buffer on the
       
   670  *	list and someone else may run off with it. You must hold
       
   671  *	the appropriate locks or have a private queue to do this.
       
   672  *
       
   673  *	Returns %NULL for an empty list or a pointer to the tail element.
       
   674  *	The reference count is not incremented and the reference is therefore
       
   675  *	volatile. Use with caution.
       
   676  */
       
   677 static inline struct sk_buff *skb_peek_tail(struct sk_buff_head *list_)
       
   678 {
       
   679 	struct sk_buff *list = ((struct sk_buff *)list_)->prev;
       
   680 	if (list == (struct sk_buff *)list_)
       
   681 		list = NULL;
       
   682 	return list;
       
   683 }
       
   684 
       
   685 /**
       
   686  *	skb_queue_len	- get queue length
       
   687  *	@list_: list to measure
       
   688  *
       
   689  *	Return the length of an &sk_buff queue.
       
   690  */
       
   691 static inline __u32 skb_queue_len(const struct sk_buff_head *list_)
       
   692 {
       
   693 	return list_->qlen;
       
   694 }
       
   695 
       
   696 /**
       
   697  *	__skb_queue_head_init - initialize non-spinlock portions of sk_buff_head
       
   698  *	@list: queue to initialize
       
   699  *
       
   700  *	This initializes only the list and queue length aspects of
       
   701  *	an sk_buff_head object.  This allows to initialize the list
       
   702  *	aspects of an sk_buff_head without reinitializing things like
       
   703  *	the spinlock.  It can also be used for on-stack sk_buff_head
       
   704  *	objects where the spinlock is known to not be used.
       
   705  */
       
   706 static inline void __skb_queue_head_init(struct sk_buff_head *list)
       
   707 {
       
   708 	list->prev = list->next = (struct sk_buff *)list;
       
   709 	list->qlen = 0;
       
   710 }
       
   711 
       
   712 /*
       
   713  * This function creates a split out lock class for each invocation;
       
   714  * this is needed for now since a whole lot of users of the skb-queue
       
   715  * infrastructure in drivers have different locking usage (in hardirq)
       
   716  * than the networking core (in softirq only). In the long run either the
       
   717  * network layer or drivers should need annotation to consolidate the
       
   718  * main types of usage into 3 classes.
       
   719  */
       
   720 static inline void skb_queue_head_init(struct sk_buff_head *list)
       
   721 {
       
   722 	spin_lock_init(&list->lock);
       
   723 	__skb_queue_head_init(list);
       
   724 }
       
   725 
       
   726 static inline void skb_queue_head_init_class(struct sk_buff_head *list,
       
   727 		struct lock_class_key *class)
       
   728 {
       
   729 	skb_queue_head_init(list);
       
   730 	lockdep_set_class(&list->lock, class);
       
   731 }
       
   732 
       
   733 /*
       
   734  *	Insert an sk_buff on a list.
       
   735  *
       
   736  *	The "__skb_xxxx()" functions are the non-atomic ones that
       
   737  *	can only be called with interrupts disabled.
       
   738  */
       
   739 extern void        skb_insert(struct sk_buff *old, struct sk_buff *newsk, struct sk_buff_head *list);
       
   740 static inline void __skb_insert(struct sk_buff *newsk,
       
   741 				struct sk_buff *prev, struct sk_buff *next,
       
   742 				struct sk_buff_head *list)
       
   743 {
       
   744 	newsk->next = next;
       
   745 	newsk->prev = prev;
       
   746 	next->prev  = prev->next = newsk;
       
   747 	list->qlen++;
       
   748 }
       
   749 
       
   750 static inline void __skb_queue_splice(const struct sk_buff_head *list,
       
   751 				      struct sk_buff *prev,
       
   752 				      struct sk_buff *next)
       
   753 {
       
   754 	struct sk_buff *first = list->next;
       
   755 	struct sk_buff *last = list->prev;
       
   756 
       
   757 	first->prev = prev;
       
   758 	prev->next = first;
       
   759 
       
   760 	last->next = next;
       
   761 	next->prev = last;
       
   762 }
       
   763 
       
   764 /**
       
   765  *	skb_queue_splice - join two skb lists, this is designed for stacks
       
   766  *	@list: the new list to add
       
   767  *	@head: the place to add it in the first list
       
   768  */
       
   769 static inline void skb_queue_splice(const struct sk_buff_head *list,
       
   770 				    struct sk_buff_head *head)
       
   771 {
       
   772 	if (!skb_queue_empty(list)) {
       
   773 		__skb_queue_splice(list, (struct sk_buff *) head, head->next);
       
   774 		head->qlen += list->qlen;
       
   775 	}
       
   776 }
       
   777 
       
   778 /**
       
   779  *	skb_queue_splice - join two skb lists and reinitialise the emptied list
       
   780  *	@list: the new list to add
       
   781  *	@head: the place to add it in the first list
       
   782  *
       
   783  *	The list at @list is reinitialised
       
   784  */
       
   785 static inline void skb_queue_splice_init(struct sk_buff_head *list,
       
   786 					 struct sk_buff_head *head)
       
   787 {
       
   788 	if (!skb_queue_empty(list)) {
       
   789 		__skb_queue_splice(list, (struct sk_buff *) head, head->next);
       
   790 		head->qlen += list->qlen;
       
   791 		__skb_queue_head_init(list);
       
   792 	}
       
   793 }
       
   794 
       
   795 /**
       
   796  *	skb_queue_splice_tail - join two skb lists, each list being a queue
       
   797  *	@list: the new list to add
       
   798  *	@head: the place to add it in the first list
       
   799  */
       
   800 static inline void skb_queue_splice_tail(const struct sk_buff_head *list,
       
   801 					 struct sk_buff_head *head)
       
   802 {
       
   803 	if (!skb_queue_empty(list)) {
       
   804 		__skb_queue_splice(list, head->prev, (struct sk_buff *) head);
       
   805 		head->qlen += list->qlen;
       
   806 	}
       
   807 }
       
   808 
       
   809 /**
       
   810  *	skb_queue_splice_tail - join two skb lists and reinitialise the emptied list
       
   811  *	@list: the new list to add
       
   812  *	@head: the place to add it in the first list
       
   813  *
       
   814  *	Each of the lists is a queue.
       
   815  *	The list at @list is reinitialised
       
   816  */
       
   817 static inline void skb_queue_splice_tail_init(struct sk_buff_head *list,
       
   818 					      struct sk_buff_head *head)
       
   819 {
       
   820 	if (!skb_queue_empty(list)) {
       
   821 		__skb_queue_splice(list, head->prev, (struct sk_buff *) head);
       
   822 		head->qlen += list->qlen;
       
   823 		__skb_queue_head_init(list);
       
   824 	}
       
   825 }
       
   826 
       
   827 /**
       
   828  *	__skb_queue_after - queue a buffer at the list head
       
   829  *	@list: list to use
       
   830  *	@prev: place after this buffer
       
   831  *	@newsk: buffer to queue
       
   832  *
       
   833  *	Queue a buffer int the middle of a list. This function takes no locks
       
   834  *	and you must therefore hold required locks before calling it.
       
   835  *
       
   836  *	A buffer cannot be placed on two lists at the same time.
       
   837  */
       
   838 static inline void __skb_queue_after(struct sk_buff_head *list,
       
   839 				     struct sk_buff *prev,
       
   840 				     struct sk_buff *newsk)
       
   841 {
       
   842 	__skb_insert(newsk, prev, prev->next, list);
       
   843 }
       
   844 
       
   845 extern void skb_append(struct sk_buff *old, struct sk_buff *newsk,
       
   846 		       struct sk_buff_head *list);
       
   847 
       
   848 static inline void __skb_queue_before(struct sk_buff_head *list,
       
   849 				      struct sk_buff *next,
       
   850 				      struct sk_buff *newsk)
       
   851 {
       
   852 	__skb_insert(newsk, next->prev, next, list);
       
   853 }
       
   854 
       
   855 /**
       
   856  *	__skb_queue_head - queue a buffer at the list head
       
   857  *	@list: list to use
       
   858  *	@newsk: buffer to queue
       
   859  *
       
   860  *	Queue a buffer at the start of a list. This function takes no locks
       
   861  *	and you must therefore hold required locks before calling it.
       
   862  *
       
   863  *	A buffer cannot be placed on two lists at the same time.
       
   864  */
       
   865 extern void skb_queue_head(struct sk_buff_head *list, struct sk_buff *newsk);
       
   866 static inline void __skb_queue_head(struct sk_buff_head *list,
       
   867 				    struct sk_buff *newsk)
       
   868 {
       
   869 	__skb_queue_after(list, (struct sk_buff *)list, newsk);
       
   870 }
       
   871 
       
   872 /**
       
   873  *	__skb_queue_tail - queue a buffer at the list tail
       
   874  *	@list: list to use
       
   875  *	@newsk: buffer to queue
       
   876  *
       
   877  *	Queue a buffer at the end of a list. This function takes no locks
       
   878  *	and you must therefore hold required locks before calling it.
       
   879  *
       
   880  *	A buffer cannot be placed on two lists at the same time.
       
   881  */
       
   882 extern void skb_queue_tail(struct sk_buff_head *list, struct sk_buff *newsk);
       
   883 static inline void __skb_queue_tail(struct sk_buff_head *list,
       
   884 				   struct sk_buff *newsk)
       
   885 {
       
   886 	__skb_queue_before(list, (struct sk_buff *)list, newsk);
       
   887 }
       
   888 
       
   889 /*
       
   890  * remove sk_buff from list. _Must_ be called atomically, and with
       
   891  * the list known..
       
   892  */
       
   893 extern void	   skb_unlink(struct sk_buff *skb, struct sk_buff_head *list);
       
   894 static inline void __skb_unlink(struct sk_buff *skb, struct sk_buff_head *list)
       
   895 {
       
   896 	struct sk_buff *next, *prev;
       
   897 
       
   898 	list->qlen--;
       
   899 	next	   = skb->next;
       
   900 	prev	   = skb->prev;
       
   901 	skb->next  = skb->prev = NULL;
       
   902 	next->prev = prev;
       
   903 	prev->next = next;
       
   904 }
       
   905 
       
   906 /**
       
   907  *	__skb_dequeue - remove from the head of the queue
       
   908  *	@list: list to dequeue from
       
   909  *
       
   910  *	Remove the head of the list. This function does not take any locks
       
   911  *	so must be used with appropriate locks held only. The head item is
       
   912  *	returned or %NULL if the list is empty.
       
   913  */
       
   914 extern struct sk_buff *skb_dequeue(struct sk_buff_head *list);
       
   915 static inline struct sk_buff *__skb_dequeue(struct sk_buff_head *list)
       
   916 {
       
   917 	struct sk_buff *skb = skb_peek(list);
       
   918 	if (skb)
       
   919 		__skb_unlink(skb, list);
       
   920 	return skb;
       
   921 }
       
   922 
       
   923 /**
       
   924  *	__skb_dequeue_tail - remove from the tail of the queue
       
   925  *	@list: list to dequeue from
       
   926  *
       
   927  *	Remove the tail of the list. This function does not take any locks
       
   928  *	so must be used with appropriate locks held only. The tail item is
       
   929  *	returned or %NULL if the list is empty.
       
   930  */
       
   931 extern struct sk_buff *skb_dequeue_tail(struct sk_buff_head *list);
       
   932 static inline struct sk_buff *__skb_dequeue_tail(struct sk_buff_head *list)
       
   933 {
       
   934 	struct sk_buff *skb = skb_peek_tail(list);
       
   935 	if (skb)
       
   936 		__skb_unlink(skb, list);
       
   937 	return skb;
       
   938 }
       
   939 
       
   940 
       
   941 static inline int skb_is_nonlinear(const struct sk_buff *skb)
       
   942 {
       
   943 	return skb->data_len;
       
   944 }
       
   945 
       
   946 static inline unsigned int skb_headlen(const struct sk_buff *skb)
       
   947 {
       
   948 	return skb->len - skb->data_len;
       
   949 }
       
   950 
       
   951 static inline int skb_pagelen(const struct sk_buff *skb)
       
   952 {
       
   953 	int i, len = 0;
       
   954 
       
   955 	for (i = (int)skb_shinfo(skb)->nr_frags - 1; i >= 0; i--)
       
   956 		len += skb_shinfo(skb)->frags[i].size;
       
   957 	return len + skb_headlen(skb);
       
   958 }
       
   959 
       
   960 static inline void skb_fill_page_desc(struct sk_buff *skb, int i,
       
   961 				      struct page *page, int off, int size)
       
   962 {
       
   963 	skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
       
   964 
       
   965 	frag->page		  = page;
       
   966 	frag->page_offset	  = off;
       
   967 	frag->size		  = size;
       
   968 	skb_shinfo(skb)->nr_frags = i + 1;
       
   969 }
       
   970 
       
   971 extern void skb_add_rx_frag(struct sk_buff *skb, int i, struct page *page,
       
   972 			    int off, int size);
       
   973 
       
   974 #define SKB_PAGE_ASSERT(skb) 	BUG_ON(skb_shinfo(skb)->nr_frags)
       
   975 #define SKB_FRAG_ASSERT(skb) 	BUG_ON(skb_shinfo(skb)->frag_list)
       
   976 #define SKB_LINEAR_ASSERT(skb)  BUG_ON(skb_is_nonlinear(skb))
       
   977 
       
   978 #ifdef NET_SKBUFF_DATA_USES_OFFSET
       
   979 static inline unsigned char *skb_tail_pointer(const struct sk_buff *skb)
       
   980 {
       
   981 	return skb->head + skb->tail;
       
   982 }
       
   983 
       
   984 static inline void skb_reset_tail_pointer(struct sk_buff *skb)
       
   985 {
       
   986 	skb->tail = skb->data - skb->head;
       
   987 }
       
   988 
       
   989 static inline void skb_set_tail_pointer(struct sk_buff *skb, const int offset)
       
   990 {
       
   991 	skb_reset_tail_pointer(skb);
       
   992 	skb->tail += offset;
       
   993 }
       
   994 #else /* NET_SKBUFF_DATA_USES_OFFSET */
       
   995 static inline unsigned char *skb_tail_pointer(const struct sk_buff *skb)
       
   996 {
       
   997 	return skb->tail;
       
   998 }
       
   999 
       
  1000 static inline void skb_reset_tail_pointer(struct sk_buff *skb)
       
  1001 {
       
  1002 	skb->tail = skb->data;
       
  1003 }
       
  1004 
       
  1005 static inline void skb_set_tail_pointer(struct sk_buff *skb, const int offset)
       
  1006 {
       
  1007 	skb->tail = skb->data + offset;
       
  1008 }
       
  1009 
       
  1010 #endif /* NET_SKBUFF_DATA_USES_OFFSET */
       
  1011 
       
  1012 /*
       
  1013  *	Add data to an sk_buff
       
  1014  */
       
  1015 extern unsigned char *skb_put(struct sk_buff *skb, unsigned int len);
       
  1016 static inline unsigned char *__skb_put(struct sk_buff *skb, unsigned int len)
       
  1017 {
       
  1018 	unsigned char *tmp = skb_tail_pointer(skb);
       
  1019 	SKB_LINEAR_ASSERT(skb);
       
  1020 	skb->tail += len;
       
  1021 	skb->len  += len;
       
  1022 	return tmp;
       
  1023 }
       
  1024 
       
  1025 extern unsigned char *skb_push(struct sk_buff *skb, unsigned int len);
       
  1026 static inline unsigned char *__skb_push(struct sk_buff *skb, unsigned int len)
       
  1027 {
       
  1028 	skb->data -= len;
       
  1029 	skb->len  += len;
       
  1030 	return skb->data;
       
  1031 }
       
  1032 
       
  1033 extern unsigned char *skb_pull(struct sk_buff *skb, unsigned int len);
       
  1034 static inline unsigned char *__skb_pull(struct sk_buff *skb, unsigned int len)
       
  1035 {
       
  1036 	skb->len -= len;
       
  1037 	BUG_ON(skb->len < skb->data_len);
       
  1038 	return skb->data += len;
       
  1039 }
       
  1040 
       
  1041 extern unsigned char *__pskb_pull_tail(struct sk_buff *skb, int delta);
       
  1042 
       
  1043 static inline unsigned char *__pskb_pull(struct sk_buff *skb, unsigned int len)
       
  1044 {
       
  1045 	if (len > skb_headlen(skb) &&
       
  1046 	    !__pskb_pull_tail(skb, len - skb_headlen(skb)))
       
  1047 		return NULL;
       
  1048 	skb->len -= len;
       
  1049 	return skb->data += len;
       
  1050 }
       
  1051 
       
  1052 static inline unsigned char *pskb_pull(struct sk_buff *skb, unsigned int len)
       
  1053 {
       
  1054 	return unlikely(len > skb->len) ? NULL : __pskb_pull(skb, len);
       
  1055 }
       
  1056 
       
  1057 static inline int pskb_may_pull(struct sk_buff *skb, unsigned int len)
       
  1058 {
       
  1059 	if (likely(len <= skb_headlen(skb)))
       
  1060 		return 1;
       
  1061 	if (unlikely(len > skb->len))
       
  1062 		return 0;
       
  1063 	return __pskb_pull_tail(skb, len - skb_headlen(skb)) != NULL;
       
  1064 }
       
  1065 
       
  1066 /**
       
  1067  *	skb_headroom - bytes at buffer head
       
  1068  *	@skb: buffer to check
       
  1069  *
       
  1070  *	Return the number of bytes of free space at the head of an &sk_buff.
       
  1071  */
       
  1072 static inline unsigned int skb_headroom(const struct sk_buff *skb)
       
  1073 {
       
  1074 	return skb->data - skb->head;
       
  1075 }
       
  1076 
       
  1077 /**
       
  1078  *	skb_tailroom - bytes at buffer end
       
  1079  *	@skb: buffer to check
       
  1080  *
       
  1081  *	Return the number of bytes of free space at the tail of an sk_buff
       
  1082  */
       
  1083 static inline int skb_tailroom(const struct sk_buff *skb)
       
  1084 {
       
  1085 	return skb_is_nonlinear(skb) ? 0 : skb->end - skb->tail;
       
  1086 }
       
  1087 
       
  1088 /**
       
  1089  *	skb_reserve - adjust headroom
       
  1090  *	@skb: buffer to alter
       
  1091  *	@len: bytes to move
       
  1092  *
       
  1093  *	Increase the headroom of an empty &sk_buff by reducing the tail
       
  1094  *	room. This is only allowed for an empty buffer.
       
  1095  */
       
  1096 static inline void skb_reserve(struct sk_buff *skb, int len)
       
  1097 {
       
  1098 	skb->data += len;
       
  1099 	skb->tail += len;
       
  1100 }
       
  1101 
       
  1102 #ifdef NET_SKBUFF_DATA_USES_OFFSET
       
  1103 static inline unsigned char *skb_transport_header(const struct sk_buff *skb)
       
  1104 {
       
  1105 	return skb->head + skb->transport_header;
       
  1106 }
       
  1107 
       
  1108 static inline void skb_reset_transport_header(struct sk_buff *skb)
       
  1109 {
       
  1110 	skb->transport_header = skb->data - skb->head;
       
  1111 }
       
  1112 
       
  1113 static inline void skb_set_transport_header(struct sk_buff *skb,
       
  1114 					    const int offset)
       
  1115 {
       
  1116 	skb_reset_transport_header(skb);
       
  1117 	skb->transport_header += offset;
       
  1118 }
       
  1119 
       
  1120 static inline unsigned char *skb_network_header(const struct sk_buff *skb)
       
  1121 {
       
  1122 	return skb->head + skb->network_header;
       
  1123 }
       
  1124 
       
  1125 static inline void skb_reset_network_header(struct sk_buff *skb)
       
  1126 {
       
  1127 	skb->network_header = skb->data - skb->head;
       
  1128 }
       
  1129 
       
  1130 static inline void skb_set_network_header(struct sk_buff *skb, const int offset)
       
  1131 {
       
  1132 	skb_reset_network_header(skb);
       
  1133 	skb->network_header += offset;
       
  1134 }
       
  1135 
       
  1136 static inline unsigned char *skb_mac_header(const struct sk_buff *skb)
       
  1137 {
       
  1138 	return skb->head + skb->mac_header;
       
  1139 }
       
  1140 
       
  1141 static inline int skb_mac_header_was_set(const struct sk_buff *skb)
       
  1142 {
       
  1143 	return skb->mac_header != ~0U;
       
  1144 }
       
  1145 
       
  1146 static inline void skb_reset_mac_header(struct sk_buff *skb)
       
  1147 {
       
  1148 	skb->mac_header = skb->data - skb->head;
       
  1149 }
       
  1150 
       
  1151 static inline void skb_set_mac_header(struct sk_buff *skb, const int offset)
       
  1152 {
       
  1153 	skb_reset_mac_header(skb);
       
  1154 	skb->mac_header += offset;
       
  1155 }
       
  1156 
       
  1157 #else /* NET_SKBUFF_DATA_USES_OFFSET */
       
  1158 
       
  1159 static inline unsigned char *skb_transport_header(const struct sk_buff *skb)
       
  1160 {
       
  1161 	return skb->transport_header;
       
  1162 }
       
  1163 
       
  1164 static inline void skb_reset_transport_header(struct sk_buff *skb)
       
  1165 {
       
  1166 	skb->transport_header = skb->data;
       
  1167 }
       
  1168 
       
  1169 static inline void skb_set_transport_header(struct sk_buff *skb,
       
  1170 					    const int offset)
       
  1171 {
       
  1172 	skb->transport_header = skb->data + offset;
       
  1173 }
       
  1174 
       
  1175 static inline unsigned char *skb_network_header(const struct sk_buff *skb)
       
  1176 {
       
  1177 	return skb->network_header;
       
  1178 }
       
  1179 
       
  1180 static inline void skb_reset_network_header(struct sk_buff *skb)
       
  1181 {
       
  1182 	skb->network_header = skb->data;
       
  1183 }
       
  1184 
       
  1185 static inline void skb_set_network_header(struct sk_buff *skb, const int offset)
       
  1186 {
       
  1187 	skb->network_header = skb->data + offset;
       
  1188 }
       
  1189 
       
  1190 static inline unsigned char *skb_mac_header(const struct sk_buff *skb)
       
  1191 {
       
  1192 	return skb->mac_header;
       
  1193 }
       
  1194 
       
  1195 static inline int skb_mac_header_was_set(const struct sk_buff *skb)
       
  1196 {
       
  1197 	return skb->mac_header != NULL;
       
  1198 }
       
  1199 
       
  1200 static inline void skb_reset_mac_header(struct sk_buff *skb)
       
  1201 {
       
  1202 	skb->mac_header = skb->data;
       
  1203 }
       
  1204 
       
  1205 static inline void skb_set_mac_header(struct sk_buff *skb, const int offset)
       
  1206 {
       
  1207 	skb->mac_header = skb->data + offset;
       
  1208 }
       
  1209 #endif /* NET_SKBUFF_DATA_USES_OFFSET */
       
  1210 
       
  1211 static inline int skb_transport_offset(const struct sk_buff *skb)
       
  1212 {
       
  1213 	return skb_transport_header(skb) - skb->data;
       
  1214 }
       
  1215 
       
  1216 static inline u32 skb_network_header_len(const struct sk_buff *skb)
       
  1217 {
       
  1218 	return skb->transport_header - skb->network_header;
       
  1219 }
       
  1220 
       
  1221 static inline int skb_network_offset(const struct sk_buff *skb)
       
  1222 {
       
  1223 	return skb_network_header(skb) - skb->data;
       
  1224 }
       
  1225 
       
  1226 /*
       
  1227  * CPUs often take a performance hit when accessing unaligned memory
       
  1228  * locations. The actual performance hit varies, it can be small if the
       
  1229  * hardware handles it or large if we have to take an exception and fix it
       
  1230  * in software.
       
  1231  *
       
  1232  * Since an ethernet header is 14 bytes network drivers often end up with
       
  1233  * the IP header at an unaligned offset. The IP header can be aligned by
       
  1234  * shifting the start of the packet by 2 bytes. Drivers should do this
       
  1235  * with:
       
  1236  *
       
  1237  * skb_reserve(NET_IP_ALIGN);
       
  1238  *
       
  1239  * The downside to this alignment of the IP header is that the DMA is now
       
  1240  * unaligned. On some architectures the cost of an unaligned DMA is high
       
  1241  * and this cost outweighs the gains made by aligning the IP header.
       
  1242  * 
       
  1243  * Since this trade off varies between architectures, we allow NET_IP_ALIGN
       
  1244  * to be overridden.
       
  1245  */
       
  1246 #ifndef NET_IP_ALIGN
       
  1247 #define NET_IP_ALIGN	2
       
  1248 #endif
       
  1249 
       
  1250 /*
       
  1251  * The networking layer reserves some headroom in skb data (via
       
  1252  * dev_alloc_skb). This is used to avoid having to reallocate skb data when
       
  1253  * the header has to grow. In the default case, if the header has to grow
       
  1254  * 16 bytes or less we avoid the reallocation.
       
  1255  *
       
  1256  * Unfortunately this headroom changes the DMA alignment of the resulting
       
  1257  * network packet. As for NET_IP_ALIGN, this unaligned DMA is expensive
       
  1258  * on some architectures. An architecture can override this value,
       
  1259  * perhaps setting it to a cacheline in size (since that will maintain
       
  1260  * cacheline alignment of the DMA). It must be a power of 2.
       
  1261  *
       
  1262  * Various parts of the networking layer expect at least 16 bytes of
       
  1263  * headroom, you should not reduce this.
       
  1264  */
       
  1265 #ifndef NET_SKB_PAD
       
  1266 #define NET_SKB_PAD	16
       
  1267 #endif
       
  1268 
       
  1269 extern int ___pskb_trim(struct sk_buff *skb, unsigned int len);
       
  1270 
       
  1271 static inline void __skb_trim(struct sk_buff *skb, unsigned int len)
       
  1272 {
       
  1273 	if (unlikely(skb->data_len)) {
       
  1274 		WARN_ON(1);
       
  1275 		return;
       
  1276 	}
       
  1277 	skb->len = len;
       
  1278 	skb_set_tail_pointer(skb, len);
       
  1279 }
       
  1280 
       
  1281 extern void skb_trim(struct sk_buff *skb, unsigned int len);
       
  1282 
       
  1283 static inline int __pskb_trim(struct sk_buff *skb, unsigned int len)
       
  1284 {
       
  1285 	if (skb->data_len)
       
  1286 		return ___pskb_trim(skb, len);
       
  1287 	__skb_trim(skb, len);
       
  1288 	return 0;
       
  1289 }
       
  1290 
       
  1291 static inline int pskb_trim(struct sk_buff *skb, unsigned int len)
       
  1292 {
       
  1293 	return (len < skb->len) ? __pskb_trim(skb, len) : 0;
       
  1294 }
       
  1295 
       
  1296 /**
       
  1297  *	pskb_trim_unique - remove end from a paged unique (not cloned) buffer
       
  1298  *	@skb: buffer to alter
       
  1299  *	@len: new length
       
  1300  *
       
  1301  *	This is identical to pskb_trim except that the caller knows that
       
  1302  *	the skb is not cloned so we should never get an error due to out-
       
  1303  *	of-memory.
       
  1304  */
       
  1305 static inline void pskb_trim_unique(struct sk_buff *skb, unsigned int len)
       
  1306 {
       
  1307 	int err = pskb_trim(skb, len);
       
  1308 	BUG_ON(err);
       
  1309 }
       
  1310 
       
  1311 /**
       
  1312  *	skb_orphan - orphan a buffer
       
  1313  *	@skb: buffer to orphan
       
  1314  *
       
  1315  *	If a buffer currently has an owner then we call the owner's
       
  1316  *	destructor function and make the @skb unowned. The buffer continues
       
  1317  *	to exist but is no longer charged to its former owner.
       
  1318  */
       
  1319 static inline void skb_orphan(struct sk_buff *skb)
       
  1320 {
       
  1321 	if (skb->destructor)
       
  1322 		skb->destructor(skb);
       
  1323 	skb->destructor = NULL;
       
  1324 	skb->sk		= NULL;
       
  1325 }
       
  1326 
       
  1327 /**
       
  1328  *	__skb_queue_purge - empty a list
       
  1329  *	@list: list to empty
       
  1330  *
       
  1331  *	Delete all buffers on an &sk_buff list. Each buffer is removed from
       
  1332  *	the list and one reference dropped. This function does not take the
       
  1333  *	list lock and the caller must hold the relevant locks to use it.
       
  1334  */
       
  1335 extern void skb_queue_purge(struct sk_buff_head *list);
       
  1336 static inline void __skb_queue_purge(struct sk_buff_head *list)
       
  1337 {
       
  1338 	struct sk_buff *skb;
       
  1339 	while ((skb = __skb_dequeue(list)) != NULL)
       
  1340 		kfree_skb(skb);
       
  1341 }
       
  1342 
       
  1343 /**
       
  1344  *	__dev_alloc_skb - allocate an skbuff for receiving
       
  1345  *	@length: length to allocate
       
  1346  *	@gfp_mask: get_free_pages mask, passed to alloc_skb
       
  1347  *
       
  1348  *	Allocate a new &sk_buff and assign it a usage count of one. The
       
  1349  *	buffer has unspecified headroom built in. Users should allocate
       
  1350  *	the headroom they think they need without accounting for the
       
  1351  *	built in space. The built in space is used for optimisations.
       
  1352  *
       
  1353  *	%NULL is returned if there is no free memory.
       
  1354  */
       
  1355 static inline struct sk_buff *__dev_alloc_skb(unsigned int length,
       
  1356 					      gfp_t gfp_mask)
       
  1357 {
       
  1358 	struct sk_buff *skb = alloc_skb(length + NET_SKB_PAD, gfp_mask);
       
  1359 	if (likely(skb))
       
  1360 		skb_reserve(skb, NET_SKB_PAD);
       
  1361 	return skb;
       
  1362 }
       
  1363 
       
  1364 extern struct sk_buff *dev_alloc_skb(unsigned int length);
       
  1365 
       
  1366 extern struct sk_buff *__netdev_alloc_skb(struct net_device *dev,
       
  1367 		unsigned int length, gfp_t gfp_mask);
       
  1368 
       
  1369 /**
       
  1370  *	netdev_alloc_skb - allocate an skbuff for rx on a specific device
       
  1371  *	@dev: network device to receive on
       
  1372  *	@length: length to allocate
       
  1373  *
       
  1374  *	Allocate a new &sk_buff and assign it a usage count of one. The
       
  1375  *	buffer has unspecified headroom built in. Users should allocate
       
  1376  *	the headroom they think they need without accounting for the
       
  1377  *	built in space. The built in space is used for optimisations.
       
  1378  *
       
  1379  *	%NULL is returned if there is no free memory. Although this function
       
  1380  *	allocates memory it can be called from an interrupt.
       
  1381  */
       
  1382 static inline struct sk_buff *netdev_alloc_skb(struct net_device *dev,
       
  1383 		unsigned int length)
       
  1384 {
       
  1385 	return __netdev_alloc_skb(dev, length, GFP_ATOMIC);
       
  1386 }
       
  1387 
       
  1388 extern struct page *__netdev_alloc_page(struct net_device *dev, gfp_t gfp_mask);
       
  1389 
       
  1390 /**
       
  1391  *	netdev_alloc_page - allocate a page for ps-rx on a specific device
       
  1392  *	@dev: network device to receive on
       
  1393  *
       
  1394  * 	Allocate a new page node local to the specified device.
       
  1395  *
       
  1396  * 	%NULL is returned if there is no free memory.
       
  1397  */
       
  1398 static inline struct page *netdev_alloc_page(struct net_device *dev)
       
  1399 {
       
  1400 	return __netdev_alloc_page(dev, GFP_ATOMIC);
       
  1401 }
       
  1402 
       
  1403 static inline void netdev_free_page(struct net_device *dev, struct page *page)
       
  1404 {
       
  1405 	__free_page(page);
       
  1406 }
       
  1407 
       
  1408 /**
       
  1409  *	skb_clone_writable - is the header of a clone writable
       
  1410  *	@skb: buffer to check
       
  1411  *	@len: length up to which to write
       
  1412  *
       
  1413  *	Returns true if modifying the header part of the cloned buffer
       
  1414  *	does not requires the data to be copied.
       
  1415  */
       
  1416 static inline int skb_clone_writable(struct sk_buff *skb, unsigned int len)
       
  1417 {
       
  1418 	return !skb_header_cloned(skb) &&
       
  1419 	       skb_headroom(skb) + len <= skb->hdr_len;
       
  1420 }
       
  1421 
       
  1422 static inline int __skb_cow(struct sk_buff *skb, unsigned int headroom,
       
  1423 			    int cloned)
       
  1424 {
       
  1425 	int delta = 0;
       
  1426 
       
  1427 	if (headroom < NET_SKB_PAD)
       
  1428 		headroom = NET_SKB_PAD;
       
  1429 	if (headroom > skb_headroom(skb))
       
  1430 		delta = headroom - skb_headroom(skb);
       
  1431 
       
  1432 	if (delta || cloned)
       
  1433 		return pskb_expand_head(skb, ALIGN(delta, NET_SKB_PAD), 0,
       
  1434 					GFP_ATOMIC);
       
  1435 	return 0;
       
  1436 }
       
  1437 
       
  1438 /**
       
  1439  *	skb_cow - copy header of skb when it is required
       
  1440  *	@skb: buffer to cow
       
  1441  *	@headroom: needed headroom
       
  1442  *
       
  1443  *	If the skb passed lacks sufficient headroom or its data part
       
  1444  *	is shared, data is reallocated. If reallocation fails, an error
       
  1445  *	is returned and original skb is not changed.
       
  1446  *
       
  1447  *	The result is skb with writable area skb->head...skb->tail
       
  1448  *	and at least @headroom of space at head.
       
  1449  */
       
  1450 static inline int skb_cow(struct sk_buff *skb, unsigned int headroom)
       
  1451 {
       
  1452 	return __skb_cow(skb, headroom, skb_cloned(skb));
       
  1453 }
       
  1454 
       
  1455 /**
       
  1456  *	skb_cow_head - skb_cow but only making the head writable
       
  1457  *	@skb: buffer to cow
       
  1458  *	@headroom: needed headroom
       
  1459  *
       
  1460  *	This function is identical to skb_cow except that we replace the
       
  1461  *	skb_cloned check by skb_header_cloned.  It should be used when
       
  1462  *	you only need to push on some header and do not need to modify
       
  1463  *	the data.
       
  1464  */
       
  1465 static inline int skb_cow_head(struct sk_buff *skb, unsigned int headroom)
       
  1466 {
       
  1467 	return __skb_cow(skb, headroom, skb_header_cloned(skb));
       
  1468 }
       
  1469 
       
  1470 /**
       
  1471  *	skb_padto	- pad an skbuff up to a minimal size
       
  1472  *	@skb: buffer to pad
       
  1473  *	@len: minimal length
       
  1474  *
       
  1475  *	Pads up a buffer to ensure the trailing bytes exist and are
       
  1476  *	blanked. If the buffer already contains sufficient data it
       
  1477  *	is untouched. Otherwise it is extended. Returns zero on
       
  1478  *	success. The skb is freed on error.
       
  1479  */
       
  1480  
       
  1481 static inline int skb_padto(struct sk_buff *skb, unsigned int len)
       
  1482 {
       
  1483 	unsigned int size = skb->len;
       
  1484 	if (likely(size >= len))
       
  1485 		return 0;
       
  1486 	return skb_pad(skb, len - size);
       
  1487 }
       
  1488 
       
  1489 static inline int skb_add_data(struct sk_buff *skb,
       
  1490 			       char __user *from, int copy)
       
  1491 {
       
  1492 	const int off = skb->len;
       
  1493 
       
  1494 	if (skb->ip_summed == CHECKSUM_NONE) {
       
  1495 		int err = 0;
       
  1496 		__wsum csum = csum_and_copy_from_user(from, skb_put(skb, copy),
       
  1497 							    copy, 0, &err);
       
  1498 		if (!err) {
       
  1499 			skb->csum = csum_block_add(skb->csum, csum, off);
       
  1500 			return 0;
       
  1501 		}
       
  1502 	} else if (!copy_from_user(skb_put(skb, copy), from, copy))
       
  1503 		return 0;
       
  1504 
       
  1505 	__skb_trim(skb, off);
       
  1506 	return -EFAULT;
       
  1507 }
       
  1508 
       
  1509 static inline int skb_can_coalesce(struct sk_buff *skb, int i,
       
  1510 				   struct page *page, int off)
       
  1511 {
       
  1512 	if (i) {
       
  1513 		struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i - 1];
       
  1514 
       
  1515 		return page == frag->page &&
       
  1516 		       off == frag->page_offset + frag->size;
       
  1517 	}
       
  1518 	return 0;
       
  1519 }
       
  1520 
       
  1521 static inline int __skb_linearize(struct sk_buff *skb)
       
  1522 {
       
  1523 	return __pskb_pull_tail(skb, skb->data_len) ? 0 : -ENOMEM;
       
  1524 }
       
  1525 
       
  1526 /**
       
  1527  *	skb_linearize - convert paged skb to linear one
       
  1528  *	@skb: buffer to linarize
       
  1529  *
       
  1530  *	If there is no free memory -ENOMEM is returned, otherwise zero
       
  1531  *	is returned and the old skb data released.
       
  1532  */
       
  1533 static inline int skb_linearize(struct sk_buff *skb)
       
  1534 {
       
  1535 	return skb_is_nonlinear(skb) ? __skb_linearize(skb) : 0;
       
  1536 }
       
  1537 
       
  1538 /**
       
  1539  *	skb_linearize_cow - make sure skb is linear and writable
       
  1540  *	@skb: buffer to process
       
  1541  *
       
  1542  *	If there is no free memory -ENOMEM is returned, otherwise zero
       
  1543  *	is returned and the old skb data released.
       
  1544  */
       
  1545 static inline int skb_linearize_cow(struct sk_buff *skb)
       
  1546 {
       
  1547 	return skb_is_nonlinear(skb) || skb_cloned(skb) ?
       
  1548 	       __skb_linearize(skb) : 0;
       
  1549 }
       
  1550 
       
  1551 /**
       
  1552  *	skb_postpull_rcsum - update checksum for received skb after pull
       
  1553  *	@skb: buffer to update
       
  1554  *	@start: start of data before pull
       
  1555  *	@len: length of data pulled
       
  1556  *
       
  1557  *	After doing a pull on a received packet, you need to call this to
       
  1558  *	update the CHECKSUM_COMPLETE checksum, or set ip_summed to
       
  1559  *	CHECKSUM_NONE so that it can be recomputed from scratch.
       
  1560  */
       
  1561 
       
  1562 static inline void skb_postpull_rcsum(struct sk_buff *skb,
       
  1563 				      const void *start, unsigned int len)
       
  1564 {
       
  1565 	if (skb->ip_summed == CHECKSUM_COMPLETE)
       
  1566 		skb->csum = csum_sub(skb->csum, csum_partial(start, len, 0));
       
  1567 }
       
  1568 
       
  1569 unsigned char *skb_pull_rcsum(struct sk_buff *skb, unsigned int len);
       
  1570 
       
  1571 /**
       
  1572  *	pskb_trim_rcsum - trim received skb and update checksum
       
  1573  *	@skb: buffer to trim
       
  1574  *	@len: new length
       
  1575  *
       
  1576  *	This is exactly the same as pskb_trim except that it ensures the
       
  1577  *	checksum of received packets are still valid after the operation.
       
  1578  */
       
  1579 
       
  1580 static inline int pskb_trim_rcsum(struct sk_buff *skb, unsigned int len)
       
  1581 {
       
  1582 	if (likely(len >= skb->len))
       
  1583 		return 0;
       
  1584 	if (skb->ip_summed == CHECKSUM_COMPLETE)
       
  1585 		skb->ip_summed = CHECKSUM_NONE;
       
  1586 	return __pskb_trim(skb, len);
       
  1587 }
       
  1588 
       
  1589 #define skb_queue_walk(queue, skb) \
       
  1590 		for (skb = (queue)->next;					\
       
  1591 		     prefetch(skb->next), (skb != (struct sk_buff *)(queue));	\
       
  1592 		     skb = skb->next)
       
  1593 
       
  1594 #define skb_queue_walk_safe(queue, skb, tmp)					\
       
  1595 		for (skb = (queue)->next, tmp = skb->next;			\
       
  1596 		     skb != (struct sk_buff *)(queue);				\
       
  1597 		     skb = tmp, tmp = skb->next)
       
  1598 
       
  1599 #define skb_queue_walk_from(queue, skb)						\
       
  1600 		for (; prefetch(skb->next), (skb != (struct sk_buff *)(queue));	\
       
  1601 		     skb = skb->next)
       
  1602 
       
  1603 #define skb_queue_walk_from_safe(queue, skb, tmp)				\
       
  1604 		for (tmp = skb->next;						\
       
  1605 		     skb != (struct sk_buff *)(queue);				\
       
  1606 		     skb = tmp, tmp = skb->next)
       
  1607 
       
  1608 #define skb_queue_reverse_walk(queue, skb) \
       
  1609 		for (skb = (queue)->prev;					\
       
  1610 		     prefetch(skb->prev), (skb != (struct sk_buff *)(queue));	\
       
  1611 		     skb = skb->prev)
       
  1612 
       
  1613 
       
  1614 extern struct sk_buff *__skb_recv_datagram(struct sock *sk, unsigned flags,
       
  1615 					   int *peeked, int *err);
       
  1616 extern struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned flags,
       
  1617 					 int noblock, int *err);
       
  1618 extern unsigned int    datagram_poll(struct file *file, struct socket *sock,
       
  1619 				     struct poll_table_struct *wait);
       
  1620 extern int	       skb_copy_datagram_iovec(const struct sk_buff *from,
       
  1621 					       int offset, struct iovec *to,
       
  1622 					       int size);
       
  1623 extern int	       skb_copy_and_csum_datagram_iovec(struct sk_buff *skb,
       
  1624 							int hlen,
       
  1625 							struct iovec *iov);
       
  1626 extern int	       skb_copy_datagram_from_iovec(struct sk_buff *skb,
       
  1627 						    int offset,
       
  1628 						    struct iovec *from,
       
  1629 						    int len);
       
  1630 extern void	       skb_free_datagram(struct sock *sk, struct sk_buff *skb);
       
  1631 extern int	       skb_kill_datagram(struct sock *sk, struct sk_buff *skb,
       
  1632 					 unsigned int flags);
       
  1633 extern __wsum	       skb_checksum(const struct sk_buff *skb, int offset,
       
  1634 				    int len, __wsum csum);
       
  1635 extern int	       skb_copy_bits(const struct sk_buff *skb, int offset,
       
  1636 				     void *to, int len);
       
  1637 extern int	       skb_store_bits(struct sk_buff *skb, int offset,
       
  1638 				      const void *from, int len);
       
  1639 extern __wsum	       skb_copy_and_csum_bits(const struct sk_buff *skb,
       
  1640 					      int offset, u8 *to, int len,
       
  1641 					      __wsum csum);
       
  1642 extern int             skb_splice_bits(struct sk_buff *skb,
       
  1643 						unsigned int offset,
       
  1644 						struct pipe_inode_info *pipe,
       
  1645 						unsigned int len,
       
  1646 						unsigned int flags);
       
  1647 extern void	       skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to);
       
  1648 extern void	       skb_split(struct sk_buff *skb,
       
  1649 				 struct sk_buff *skb1, const u32 len);
       
  1650 
       
  1651 extern struct sk_buff *skb_segment(struct sk_buff *skb, int features);
       
  1652 
       
  1653 static inline void *skb_header_pointer(const struct sk_buff *skb, int offset,
       
  1654 				       int len, void *buffer)
       
  1655 {
       
  1656 	int hlen = skb_headlen(skb);
       
  1657 
       
  1658 	if (hlen - offset >= len)
       
  1659 		return skb->data + offset;
       
  1660 
       
  1661 	if (skb_copy_bits(skb, offset, buffer, len) < 0)
       
  1662 		return NULL;
       
  1663 
       
  1664 	return buffer;
       
  1665 }
       
  1666 
       
  1667 static inline void skb_copy_from_linear_data(const struct sk_buff *skb,
       
  1668 					     void *to,
       
  1669 					     const unsigned int len)
       
  1670 {
       
  1671 	memcpy(to, skb->data, len);
       
  1672 }
       
  1673 
       
  1674 static inline void skb_copy_from_linear_data_offset(const struct sk_buff *skb,
       
  1675 						    const int offset, void *to,
       
  1676 						    const unsigned int len)
       
  1677 {
       
  1678 	memcpy(to, skb->data + offset, len);
       
  1679 }
       
  1680 
       
  1681 static inline void skb_copy_to_linear_data(struct sk_buff *skb,
       
  1682 					   const void *from,
       
  1683 					   const unsigned int len)
       
  1684 {
       
  1685 	memcpy(skb->data, from, len);
       
  1686 }
       
  1687 
       
  1688 static inline void skb_copy_to_linear_data_offset(struct sk_buff *skb,
       
  1689 						  const int offset,
       
  1690 						  const void *from,
       
  1691 						  const unsigned int len)
       
  1692 {
       
  1693 	memcpy(skb->data + offset, from, len);
       
  1694 }
       
  1695 
       
  1696 extern void skb_init(void);
       
  1697 
       
  1698 /**
       
  1699  *	skb_get_timestamp - get timestamp from a skb
       
  1700  *	@skb: skb to get stamp from
       
  1701  *	@stamp: pointer to struct timeval to store stamp in
       
  1702  *
       
  1703  *	Timestamps are stored in the skb as offsets to a base timestamp.
       
  1704  *	This function converts the offset back to a struct timeval and stores
       
  1705  *	it in stamp.
       
  1706  */
       
  1707 static inline void skb_get_timestamp(const struct sk_buff *skb, struct timeval *stamp)
       
  1708 {
       
  1709 	*stamp = ktime_to_timeval(skb->tstamp);
       
  1710 }
       
  1711 
       
  1712 static inline void __net_timestamp(struct sk_buff *skb)
       
  1713 {
       
  1714 	skb->tstamp = ktime_get_real();
       
  1715 }
       
  1716 
       
  1717 static inline ktime_t net_timedelta(ktime_t t)
       
  1718 {
       
  1719 	return ktime_sub(ktime_get_real(), t);
       
  1720 }
       
  1721 
       
  1722 static inline ktime_t net_invalid_timestamp(void)
       
  1723 {
       
  1724 	return ktime_set(0, 0);
       
  1725 }
       
  1726 
       
  1727 extern __sum16 __skb_checksum_complete_head(struct sk_buff *skb, int len);
       
  1728 extern __sum16 __skb_checksum_complete(struct sk_buff *skb);
       
  1729 
       
  1730 static inline int skb_csum_unnecessary(const struct sk_buff *skb)
       
  1731 {
       
  1732 	return skb->ip_summed & CHECKSUM_UNNECESSARY;
       
  1733 }
       
  1734 
       
  1735 /**
       
  1736  *	skb_checksum_complete - Calculate checksum of an entire packet
       
  1737  *	@skb: packet to process
       
  1738  *
       
  1739  *	This function calculates the checksum over the entire packet plus
       
  1740  *	the value of skb->csum.  The latter can be used to supply the
       
  1741  *	checksum of a pseudo header as used by TCP/UDP.  It returns the
       
  1742  *	checksum.
       
  1743  *
       
  1744  *	For protocols that contain complete checksums such as ICMP/TCP/UDP,
       
  1745  *	this function can be used to verify that checksum on received
       
  1746  *	packets.  In that case the function should return zero if the
       
  1747  *	checksum is correct.  In particular, this function will return zero
       
  1748  *	if skb->ip_summed is CHECKSUM_UNNECESSARY which indicates that the
       
  1749  *	hardware has already verified the correctness of the checksum.
       
  1750  */
       
  1751 static inline __sum16 skb_checksum_complete(struct sk_buff *skb)
       
  1752 {
       
  1753 	return skb_csum_unnecessary(skb) ?
       
  1754 	       0 : __skb_checksum_complete(skb);
       
  1755 }
       
  1756 
       
  1757 #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
       
  1758 extern void nf_conntrack_destroy(struct nf_conntrack *nfct);
       
  1759 static inline void nf_conntrack_put(struct nf_conntrack *nfct)
       
  1760 {
       
  1761 	if (nfct && atomic_dec_and_test(&nfct->use))
       
  1762 		nf_conntrack_destroy(nfct);
       
  1763 }
       
  1764 static inline void nf_conntrack_get(struct nf_conntrack *nfct)
       
  1765 {
       
  1766 	if (nfct)
       
  1767 		atomic_inc(&nfct->use);
       
  1768 }
       
  1769 static inline void nf_conntrack_get_reasm(struct sk_buff *skb)
       
  1770 {
       
  1771 	if (skb)
       
  1772 		atomic_inc(&skb->users);
       
  1773 }
       
  1774 static inline void nf_conntrack_put_reasm(struct sk_buff *skb)
       
  1775 {
       
  1776 	if (skb)
       
  1777 		kfree_skb(skb);
       
  1778 }
       
  1779 #endif
       
  1780 #ifdef CONFIG_BRIDGE_NETFILTER
       
  1781 static inline void nf_bridge_put(struct nf_bridge_info *nf_bridge)
       
  1782 {
       
  1783 	if (nf_bridge && atomic_dec_and_test(&nf_bridge->use))
       
  1784 		kfree(nf_bridge);
       
  1785 }
       
  1786 static inline void nf_bridge_get(struct nf_bridge_info *nf_bridge)
       
  1787 {
       
  1788 	if (nf_bridge)
       
  1789 		atomic_inc(&nf_bridge->use);
       
  1790 }
       
  1791 #endif /* CONFIG_BRIDGE_NETFILTER */
       
  1792 static inline void nf_reset(struct sk_buff *skb)
       
  1793 {
       
  1794 #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
       
  1795 	nf_conntrack_put(skb->nfct);
       
  1796 	skb->nfct = NULL;
       
  1797 	nf_conntrack_put_reasm(skb->nfct_reasm);
       
  1798 	skb->nfct_reasm = NULL;
       
  1799 #endif
       
  1800 #ifdef CONFIG_BRIDGE_NETFILTER
       
  1801 	nf_bridge_put(skb->nf_bridge);
       
  1802 	skb->nf_bridge = NULL;
       
  1803 #endif
       
  1804 }
       
  1805 
       
  1806 /* Note: This doesn't put any conntrack and bridge info in dst. */
       
  1807 static inline void __nf_copy(struct sk_buff *dst, const struct sk_buff *src)
       
  1808 {
       
  1809 #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
       
  1810 	dst->nfct = src->nfct;
       
  1811 	nf_conntrack_get(src->nfct);
       
  1812 	dst->nfctinfo = src->nfctinfo;
       
  1813 	dst->nfct_reasm = src->nfct_reasm;
       
  1814 	nf_conntrack_get_reasm(src->nfct_reasm);
       
  1815 #endif
       
  1816 #ifdef CONFIG_BRIDGE_NETFILTER
       
  1817 	dst->nf_bridge  = src->nf_bridge;
       
  1818 	nf_bridge_get(src->nf_bridge);
       
  1819 #endif
       
  1820 }
       
  1821 
       
  1822 static inline void nf_copy(struct sk_buff *dst, const struct sk_buff *src)
       
  1823 {
       
  1824 #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
       
  1825 	nf_conntrack_put(dst->nfct);
       
  1826 	nf_conntrack_put_reasm(dst->nfct_reasm);
       
  1827 #endif
       
  1828 #ifdef CONFIG_BRIDGE_NETFILTER
       
  1829 	nf_bridge_put(dst->nf_bridge);
       
  1830 #endif
       
  1831 	__nf_copy(dst, src);
       
  1832 }
       
  1833 
       
  1834 #ifdef CONFIG_NETWORK_SECMARK
       
  1835 static inline void skb_copy_secmark(struct sk_buff *to, const struct sk_buff *from)
       
  1836 {
       
  1837 	to->secmark = from->secmark;
       
  1838 }
       
  1839 
       
  1840 static inline void skb_init_secmark(struct sk_buff *skb)
       
  1841 {
       
  1842 	skb->secmark = 0;
       
  1843 }
       
  1844 #else
       
  1845 static inline void skb_copy_secmark(struct sk_buff *to, const struct sk_buff *from)
       
  1846 { }
       
  1847 
       
  1848 static inline void skb_init_secmark(struct sk_buff *skb)
       
  1849 { }
       
  1850 #endif
       
  1851 
       
  1852 static inline void skb_set_queue_mapping(struct sk_buff *skb, u16 queue_mapping)
       
  1853 {
       
  1854 	skb->queue_mapping = queue_mapping;
       
  1855 }
       
  1856 
       
  1857 static inline u16 skb_get_queue_mapping(struct sk_buff *skb)
       
  1858 {
       
  1859 	return skb->queue_mapping;
       
  1860 }
       
  1861 
       
  1862 static inline void skb_copy_queue_mapping(struct sk_buff *to, const struct sk_buff *from)
       
  1863 {
       
  1864 	to->queue_mapping = from->queue_mapping;
       
  1865 }
       
  1866 
       
  1867 static inline int skb_is_gso(const struct sk_buff *skb)
       
  1868 {
       
  1869 	return skb_shinfo(skb)->gso_size;
       
  1870 }
       
  1871 
       
  1872 static inline int skb_is_gso_v6(const struct sk_buff *skb)
       
  1873 {
       
  1874 	return skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6;
       
  1875 }
       
  1876 
       
  1877 extern void __skb_warn_lro_forwarding(const struct sk_buff *skb);
       
  1878 
       
  1879 static inline bool skb_warn_if_lro(const struct sk_buff *skb)
       
  1880 {
       
  1881 	/* LRO sets gso_size but not gso_type, whereas if GSO is really
       
  1882 	 * wanted then gso_type will be set. */
       
  1883 	struct skb_shared_info *shinfo = skb_shinfo(skb);
       
  1884 	if (shinfo->gso_size != 0 && unlikely(shinfo->gso_type == 0)) {
       
  1885 		__skb_warn_lro_forwarding(skb);
       
  1886 		return true;
       
  1887 	}
       
  1888 	return false;
       
  1889 }
       
  1890 
       
  1891 static inline void skb_forward_csum(struct sk_buff *skb)
       
  1892 {
       
  1893 	/* Unfortunately we don't support this one.  Any brave souls? */
       
  1894 	if (skb->ip_summed == CHECKSUM_COMPLETE)
       
  1895 		skb->ip_summed = CHECKSUM_NONE;
       
  1896 }
       
  1897 
       
  1898 bool skb_partial_csum_set(struct sk_buff *skb, u16 start, u16 off);
       
  1899 #endif	/* __KERNEL__ */
       
  1900 #endif	/* _LINUX_SKBUFF_H */