patch-2.3.99-pre4 linux/include/linux/skbuff.h
Next file: linux/include/linux/sunrpc/sched.h
Previous file: linux/include/linux/sched.h
Back to the patch index
Back to the overall index
- Lines: 592
- Date:
Mon Apr 10 23:23:51 2000
- Orig file:
v2.3.99-pre3/linux/include/linux/skbuff.h
- Orig date:
Sun Mar 19 18:35:31 2000
diff -u --recursive --new-file v2.3.99-pre3/linux/include/linux/skbuff.h linux/include/linux/skbuff.h
@@ -207,20 +207,45 @@
return (atomic_t *)(skb->end);
}
+/**
+ * skb_queue_empty - check if a queue is empty
+ * @list: queue head
+ *
+ * Returns true if the queue is empty, false otherwise
+ */
+
extern __inline__ int skb_queue_empty(struct sk_buff_head *list)
{
return (list->next == (struct sk_buff *) list);
}
+/**
+ * skb_get - reference buffer
+ * @skb: buffer to reference
+ *
+ * Makes another reference to a socket buffer and returns a pointer
+ * to the buffer.
+ */
+
extern __inline__ struct sk_buff *skb_get(struct sk_buff *skb)
{
atomic_inc(&skb->users);
return skb;
}
-/* If users==1, we are the only owner and are can avoid redundant
+/*
+ * If users==1, we are the only owner and are can avoid redundant
* atomic change.
*/
+
+/**
+ * kfree_skb - free an sk_buff
+ * @skb: The buffer to free
+ *
+ * Drop a reference to the buffer and free it if the usage count has
+ * hit zero.
+ */
+
extern __inline__ void kfree_skb(struct sk_buff *skb)
{
if (atomic_read(&skb->users) == 1 || atomic_dec_and_test(&skb->users))
@@ -234,16 +259,47 @@
kfree_skbmem(skb);
}
+/**
+ * skb_cloned - is the buffer a clone
+ * @skb: Buffer to check
+ *
+ * Returns true if the buffer was generated with skb_clone and is
+ * one of multiple shared copies of the buffer. Cloned buffers are
+ * shared data so must not be written to under normal circumstances.
+ */
+
extern __inline__ int skb_cloned(struct sk_buff *skb)
{
return skb->cloned && atomic_read(skb_datarefp(skb)) != 1;
}
+/**
+ * skb_shared - is the buffer shared
+ * @skb: buffer to check
+ *
+ * Returns true if more than one person has a reference to this
+ * buffer.
+ */
+
extern __inline__ int skb_shared(struct sk_buff *skb)
{
return (atomic_read(&skb->users) != 1);
}
+/**
+ * skb_share_check - check if buffer is shared and if so clone it
+ * @skb: buffer to check
+ * @pri: priority for memory allocation
+ *
+ * If the buffer is shared the buffer is cloned and the old copy
+ * drops a reference. A new clone with a single reference is returned.
+ * If the buffer is not shared the original buffer is returned. When
+ * being called from interrupt status or with spinlocks held pri must
+ * be GFP_ATOMIC.
+ *
+ * NULL is returned on a memory allocation failure.
+ */
+
extern __inline__ struct sk_buff *skb_share_check(struct sk_buff *skb, int pri)
{
if (skb_shared(skb)) {
@@ -263,6 +319,20 @@
* a packet thats being forwarded.
*/
+/**
+ * skb_unshare - make a copy of a shared buffer
+ * @skb: buffer to check
+ * @pri: priority for memory allocation
+ *
+ * If the socket buffer is a clone then this function creates a new
+ * copy of the data, drops a reference count on the old copy and returns
+ * the new copy with the reference count at 1. If the buffer is not a clone
+ * the original buffer is returned. When called with a spinlock held or
+ * from interrupt state pri must be GFP_ATOMIC
+ *
+ * NULL is returned on a memory allocation failure.
+ */
+
extern __inline__ struct sk_buff *skb_unshare(struct sk_buff *skb, int pri)
{
struct sk_buff *nskb;
@@ -273,11 +343,18 @@
return nskb;
}
-/*
+/**
+ * skb_peek
+ * @list_: list to peek at
+ *
* Peek an sk_buff. Unlike most other operations you _MUST_
* be careful with this one. A peek leaves the buffer on the
- * list and someone else may run off with it. For an interrupt
- * type system cli() peek the buffer copy the data and sti();
+ * list and someone else may run off with it. You must hold
+ * the appropriate locks or have a private queue to do this.
+ *
+ * Returns NULL for an empty list or a pointer to the head element.
+ * The reference count is not incremented and the reference is therefore
+ * volatile. Use with caution.
*/
extern __inline__ struct sk_buff *skb_peek(struct sk_buff_head *list_)
@@ -288,6 +365,20 @@
return list;
}
+/**
+ * skb_peek_tail
+ * @list_: list to peek at
+ *
+ * Peek an sk_buff. Unlike most other operations you _MUST_
+ * be careful with this one. A peek leaves the buffer on the
+ * list and someone else may run off with it. You must hold
+ * the appropriate locks or have a private queue to do this.
+ *
+ * Returns NULL for an empty list or a pointer to the tail element.
+ * The reference count is not incremented and the reference is therefore
+ * volatile. Use with caution.
+ */
+
extern __inline__ struct sk_buff *skb_peek_tail(struct sk_buff_head *list_)
{
struct sk_buff *list = ((struct sk_buff *)list_)->prev;
@@ -296,8 +387,11 @@
return list;
}
-/*
- * Return the length of an sk_buff queue
+/**
+ * skb_queue_len - get queue length
+ * @list_: list to measure
+ *
+ * Return the length of an sk_buff queue.
*/
extern __inline__ __u32 skb_queue_len(struct sk_buff_head *list_)
@@ -320,6 +414,17 @@
* can only be called with interrupts disabled.
*/
+/**
+ * __skb_queue_head - queue a buffer at the list head
+ * @list: list to use
+ * @newsk: buffer to queue
+ *
+ * Queue a buffer at the start of a list. This function takes no locks
+ * and you must therefore hold required locks before calling it.
+ *
+ * A buffer cannot be placed on two lists at the same time.
+ */
+
extern __inline__ void __skb_queue_head(struct sk_buff_head *list, struct sk_buff *newsk)
{
struct sk_buff *prev, *next;
@@ -334,6 +439,19 @@
prev->next = newsk;
}
+
+/**
+ * skb_queue_head - queue a buffer at the list head
+ * @list: list to use
+ * @newsk: buffer to queue
+ *
+ * Queue a buffer at the start of the list. This function takes the
+ * list lock and can be used safely with other locking sk_buff functions
+ * safely.
+ *
+ * A buffer cannot be placed on two lists at the same time.
+ */
+
extern __inline__ void skb_queue_head(struct sk_buff_head *list, struct sk_buff *newsk)
{
unsigned long flags;
@@ -343,9 +461,17 @@
spin_unlock_irqrestore(&list->lock, flags);
}
-/*
- * Insert an sk_buff at the end of a list.
- */
+/**
+ * __skb_queue_tail - queue a buffer at the list tail
+ * @list: list to use
+ * @newsk: buffer to queue
+ *
+ * Queue a buffer at the end of a list. This function takes no locks
+ * and you must therefore hold required locks before calling it.
+ *
+ * A buffer cannot be placed on two lists at the same time.
+ */
+
extern __inline__ void __skb_queue_tail(struct sk_buff_head *list, struct sk_buff *newsk)
{
@@ -361,6 +487,18 @@
prev->next = newsk;
}
+/**
+ * skb_queue_tail - queue a buffer at the list tail
+ * @list: list to use
+ * @newsk: buffer to queue
+ *
+ * Queue a buffer at the tail of the list. This function takes the
+ * list lock and can be used safely with other locking sk_buff functions
+ * safely.
+ *
+ * A buffer cannot be placed on two lists at the same time.
+ */
+
extern __inline__ void skb_queue_tail(struct sk_buff_head *list, struct sk_buff *newsk)
{
unsigned long flags;
@@ -370,8 +508,13 @@
spin_unlock_irqrestore(&list->lock, flags);
}
-/*
- * Remove an sk_buff from a list.
+/**
+ * __skb_dequeue - remove from the head of the queue
+ * @list: list to dequeue from
+ *
+ * Remove the head of the list. This function does not take any locks
+ * so must be used with appropriate locks held only. The head item is
+ * returned or NULL if the list is empty.
*/
extern __inline__ struct sk_buff *__skb_dequeue(struct sk_buff_head *list)
@@ -394,6 +537,15 @@
return result;
}
+/**
+ * skb_dequeue - remove from the head of the queue
+ * @list: list to dequeue from
+ *
+ * Remove the head of the list. The list lock is taken so the function
+ * may be used safely with other locking list functions. The head item is
+ * returned or NULL if the list is empty.
+ */
+
extern __inline__ struct sk_buff *skb_dequeue(struct sk_buff_head *list)
{
long flags;
@@ -421,9 +573,16 @@
list->qlen++;
}
-/*
- * Place a packet before a given packet in a list
+/**
+ * skb_insert - insert a buffer
+ * @old: buffer to insert before
+ * @newsk: buffer to insert
+ *
+ * Place a packet before a given packet in a list. The list locks are taken
+ * and this function is atomic with respect to other list locked calls
+ * A buffer cannot be placed on two lists at the same time.
*/
+
extern __inline__ void skb_insert(struct sk_buff *old, struct sk_buff *newsk)
{
unsigned long flags;
@@ -442,6 +601,17 @@
__skb_insert(newsk, old, old->next, old->list);
}
+/**
+ * skb_append - append a buffer
+ * @old: buffer to insert after
+ * @newsk: buffer to insert
+ *
+ * Place a packet after a given packet in a list. The list locks are taken
+ * and this function is atomic with respect to other list locked calls.
+ * A buffer cannot be placed on two lists at the same time.
+ */
+
+
extern __inline__ void skb_append(struct sk_buff *old, struct sk_buff *newsk)
{
unsigned long flags;
@@ -455,6 +625,7 @@
* remove sk_buff from list. _Must_ be called atomically, and with
* the list known..
*/
+
extern __inline__ void __skb_unlink(struct sk_buff *skb, struct sk_buff_head *list)
{
struct sk_buff * next, * prev;
@@ -469,11 +640,17 @@
prev->next = next;
}
-/*
- * Remove an sk_buff from its list. Works even without knowing the list it
- * is sitting on, which can be handy at times. It also means that THE LIST
- * MUST EXIST when you unlink. Thus a list must have its contents unlinked
- * _FIRST_.
+/**
+ * skb_unlink - remove a buffer from a list
+ * @skb: buffer to remove
+ *
+ * Place a packet after a given packet in a list. The list locks are taken
+ * and this function is atomic with respect to other list locked calls
+ *
+ * Works even without knowing the list it is sitting on, which can be
+ * handy at times. It also means that THE LIST MUST EXIST when you
+ * unlink. Thus a list must have its contents unlinked before it is
+ * destroyed.
*/
extern __inline__ void skb_unlink(struct sk_buff *skb)
@@ -491,6 +668,16 @@
}
/* XXX: more streamlined implementation */
+
+/**
+ * __skb_dequeue_tail - remove from the tail of the queue
+ * @list: list to dequeue from
+ *
+ * Remove the tail of the list. This function does not take any locks
+ * so must be used with appropriate locks held only. The tail item is
+ * returned or NULL if the list is empty.
+ */
+
extern __inline__ struct sk_buff *__skb_dequeue_tail(struct sk_buff_head *list)
{
struct sk_buff *skb = skb_peek_tail(list);
@@ -499,6 +686,15 @@
return skb;
}
+/**
+ * skb_dequeue - remove from the head of the queue
+ * @list: list to dequeue from
+ *
+ * Remove the head of the list. The list lock is taken so the function
+ * may be used safely with other locking list functions. The tail item is
+ * returned or NULL if the list is empty.
+ */
+
extern __inline__ struct sk_buff *skb_dequeue_tail(struct sk_buff_head *list)
{
long flags;
@@ -522,6 +718,16 @@
return tmp;
}
+/**
+ * skb_put - add data to a buffer
+ * @skb: buffer to use
+ * @len: amount of data to add
+ *
+ * This function extends the used data area of the buffer. If this would
+ * exceed the total buffer size the kernel will panic. A pointer to the
+ * first byte of the extra data is returned
+ */
+
extern __inline__ unsigned char *skb_put(struct sk_buff *skb, unsigned int len)
{
unsigned char *tmp=skb->tail;
@@ -540,6 +746,16 @@
return skb->data;
}
+/**
+ * skb_push - add data to the start of a buffer
+ * @skb: buffer to use
+ * @len: amount of data to add
+ *
+ * This function extends the used data area of the buffer at the buffer
+ * start. If this would exceed the total buffer headroom the kernel will
+ * panic. A pointer to the first byte of the extra data is returned
+ */
+
extern __inline__ unsigned char *skb_push(struct sk_buff *skb, unsigned int len)
{
skb->data-=len;
@@ -556,6 +772,17 @@
return skb->data+=len;
}
+/**
+ * skb_pull - remove data from the start of a buffer
+ * @skb: buffer to use
+ * @len: amount of data to remove
+ *
+ * This function removes data from the start of a buffer, returning
+ * the memory to the headroom. A pointer to the next data in the buffer
+ * is returned. Once the data has been pulled future pushes will overwrite
+ * the old data
+ */
+
extern __inline__ unsigned char * skb_pull(struct sk_buff *skb, unsigned int len)
{
if (len > skb->len)
@@ -563,28 +790,61 @@
return __skb_pull(skb,len);
}
+/**
+ * skb_headroom - bytes at buffer head
+ * @skb: buffer to check
+ *
+ * Return the number of bytes of free space at the head of an sk_buff
+ */
+
extern __inline__ int skb_headroom(const struct sk_buff *skb)
{
return skb->data-skb->head;
}
+/**
+ * skb_tailroom - bytes at buffer end
+ * @skb: buffer to check
+ *
+ * Return the number of bytes of free space at the tail of an sk_buff
+ */
+
extern __inline__ int skb_tailroom(const struct sk_buff *skb)
{
return skb->end-skb->tail;
}
+/**
+ * skb_reserve - adjust headroom
+ * @skb: buffer to alter
+ * @len: bytes to move
+ *
+ * Increase the headroom of an empty sk_buff by reducing the tail
+ * room. This is only allowed for an empty buffer.
+ */
+
extern __inline__ void skb_reserve(struct sk_buff *skb, unsigned int len)
{
skb->data+=len;
skb->tail+=len;
}
+
extern __inline__ void __skb_trim(struct sk_buff *skb, unsigned int len)
{
skb->len = len;
skb->tail = skb->data+len;
}
+/**
+ * skb_trim - remove end from a buffer
+ * @skb: buffer to alter
+ * @len: new length
+ *
+ * Cut the length of a buffer down by removing data from the tail. If
+ * the buffer is already under the length specified it is not modified.
+ */
+
extern __inline__ void skb_trim(struct sk_buff *skb, unsigned int len)
{
if (skb->len > len) {
@@ -592,6 +852,16 @@
}
}
+/**
+ * skb_orphan - orphan a buffer
+ * @skb: buffer to orphan
+ *
+ * If a buffer currently has an owner then we call the owners
+ * destructor function and make the skb unowned. The buffer continues
+ * to exist but is no longer charged to its former owner.
+ */
+
+
extern __inline__ void skb_orphan(struct sk_buff *skb)
{
if (skb->destructor)
@@ -600,6 +870,16 @@
skb->sk = NULL;
}
+/**
+ * skb_purge - empty a list
+ * @list: list to empty
+ *
+ * Delete all buffers on an sk_buff list. Each buffer is removed from
+ * the list and one reference dropped. This function takes the list
+ * lock and is atomic with respect to other list locking functions.
+ */
+
+
extern __inline__ void skb_queue_purge(struct sk_buff_head *list)
{
struct sk_buff *skb;
@@ -607,6 +887,16 @@
kfree_skb(skb);
}
+/**
+ * __skb_purge - empty a list
+ * @list: list to empty
+ *
+ * Delete all buffers on an sk_buff list. Each buffer is removed from
+ * the list and one reference dropped. This function does not take the
+ * list lock and the caller must hold the relevant locks to use it.
+ */
+
+
extern __inline__ void __skb_queue_purge(struct sk_buff_head *list)
{
struct sk_buff *skb;
@@ -614,6 +904,19 @@
kfree_skb(skb);
}
+/**
+ * dev_alloc_skb - allocate an skbuff for sending
+ * @length: length to allocate
+ *
+ * Allocate a new sk_buff and assign it a usage count of one. The
+ * buffer has unspecified headroom built in. Users should allocate
+ * the headroom they think they need without accounting for the
+ * built in space. The built in space is used for optimisations.
+ *
+ * NULL is returned in there is no free memory. Although this function
+ * allocates memory it can be called from an interrupt.
+ */
+
extern __inline__ struct sk_buff *dev_alloc_skb(unsigned int length)
{
struct sk_buff *skb;
@@ -623,6 +926,22 @@
skb_reserve(skb,16);
return skb;
}
+
+/**
+ * skb_cow - copy a buffer if need be
+ * @skb: buffer to copy
+ * @headroom: needed headroom
+ *
+ * If the buffer passed lacks sufficient headroom or is a clone then
+ * it is copied and the additional headroom made available. If there
+ * is no free memory NULL is returned. The new buffer is returned if
+ * a copy was made (and the old one dropped a reference). The existing
+ * buffer is returned otherwise.
+ *
+ * This function primarily exists to avoid making two copies when making
+ * a writable copy of a buffer and then growing the headroom.
+ */
+
extern __inline__ struct sk_buff *
skb_cow(struct sk_buff *skb, unsigned int headroom)
FUNET's LINUX-ADM group, linux-adm@nic.funet.fi
TCL-scripts by Sam Shen (who was at: slshen@lbl.gov)