[TIPC] Initial merge

TIPC (Transparent Inter Process Communication) is a protocol designed for
intra cluster communication. For more information see
http://tipc.sourceforge.net

Signed-off-by: Per Liden <per.liden@nospam.ericsson.com>
diff --git a/include/linux/socket.h b/include/linux/socket.h
index 9f40191..b02dda4 100644
--- a/include/linux/socket.h
+++ b/include/linux/socket.h
@@ -186,6 +186,7 @@
 #define AF_PPPOX	24	/* PPPoX sockets		*/
 #define AF_WANPIPE	25	/* Wanpipe API Sockets */
 #define AF_LLC		26	/* Linux LLC			*/
+#define AF_TIPC		30	/* TIPC sockets			*/
 #define AF_BLUETOOTH	31	/* Bluetooth sockets 		*/
 #define AF_MAX		32	/* For now.. */
 
@@ -218,6 +219,7 @@
 #define PF_PPPOX	AF_PPPOX
 #define PF_WANPIPE	AF_WANPIPE
 #define PF_LLC		AF_LLC
+#define PF_TIPC		AF_TIPC
 #define PF_BLUETOOTH	AF_BLUETOOTH
 #define PF_MAX		AF_MAX
 
@@ -279,6 +281,7 @@
 #define SOL_LLC		268
 #define SOL_DCCP	269
 #define SOL_NETLINK	270
+#define SOL_TIPC	271
 
 /* IPX options */
 #define IPX_TYPE	1
diff --git a/include/linux/tipc.h b/include/linux/tipc.h
new file mode 100644
index 0000000..ca754f3
--- /dev/null
+++ b/include/linux/tipc.h
@@ -0,0 +1,598 @@
+/*
+ * include/linux/tipc.h: Include file for TIPC users
+ * 
+ * Copyright (c) 2003-2005, Ericsson Research Canada
+ * Copyright (c) 2005, Wind River Systems
+ * Copyright (c) 2005-2006, Ericsson AB
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without 
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this 
+ * list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright notice, 
+ * this list of conditions and the following disclaimer in the documentation 
+ * and/or other materials provided with the distribution.
+ * Neither the names of the copyright holders nor the names of its 
+ * contributors may be used to endorse or promote products derived from this 
+ * software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _LINUX_TIPC_H_
+#define _LINUX_TIPC_H_
+
+#include <linux/types.h>
+#include <linux/string.h>
+#include <asm/byteorder.h>
+
+/*
+ * TIPC addressing primitives
+ */
+ 
+struct tipc_portid {
+	__u32 ref;
+	__u32 node;
+};
+
+struct tipc_name {
+	__u32 type;
+	__u32 instance;
+};
+
+struct tipc_name_seq {
+	__u32 type;
+	__u32 lower;
+	__u32 upper;
+};
+
+static inline __u32 tipc_addr(unsigned int zone,
+			      unsigned int cluster,
+			      unsigned int node)
+{
+	return(zone << 24) | (cluster << 12) | node;
+}
+
+static inline unsigned int tipc_zone(__u32 addr)
+{
+	return  addr >> 24;
+}
+
+static inline unsigned int tipc_cluster(__u32 addr)
+{
+	return(addr >> 12) & 0xfff;
+}
+
+static inline unsigned int tipc_node(__u32 addr)
+{
+	return  addr & 0xfff;
+}
+
+/*
+ * Application-accessible port name types
+ */
+
+#define TIPC_NET_EVENTS		0	/* network event subscription name type */
+#define TIPC_TOP_SRV         	1  	/* topology service name type */
+#define TIPC_RESERVED_TYPES	64	/* lowest user-publishable name type */
+
+/* 
+ * Publication scopes when binding port names and port name sequences
+ */
+
+#define TIPC_ZONE_SCOPE         1
+#define TIPC_CLUSTER_SCOPE      2     
+#define TIPC_NODE_SCOPE         3
+
+/*
+ * Limiting values for messages
+ */
+
+#define TIPC_MAX_USER_MSG_SIZE 66000
+
+/* 
+ * Message importance levels
+ */
+
+#define TIPC_LOW_IMPORTANCE     	0  /* default */
+#define TIPC_MEDIUM_IMPORTANCE		1
+#define TIPC_HIGH_IMPORTANCE		2
+#define TIPC_CRITICAL_IMPORTANCE	3
+
+/* 
+ * Msg rejection/connection shutdown reasons
+ */
+
+#define TIPC_OK			0
+#define TIPC_ERR_NO_NAME	1
+#define TIPC_ERR_NO_PORT	2
+#define TIPC_ERR_NO_NODE	3
+#define TIPC_ERR_OVERLOAD	4
+#define TIPC_CONN_SHUTDOWN	5
+
+/*
+ * TIPC topology subscription service definitions
+ */
+
+#define TIPC_SUB_PORTS     	0x01  	/* filter for port availability */
+#define TIPC_SUB_SERVICE     	0x02  	/* filter for service availability */
+#if 0
+/* The following filter options are not currently implemented */
+#define TIPC_SUB_NO_BIND_EVTS	0x04	/* filter out "publish" events */
+#define TIPC_SUB_NO_UNBIND_EVTS	0x08	/* filter out "withdraw" events */
+#define TIPC_SUB_SINGLE_EVT	0x10	/* expire after first event */
+#endif
+
+#define TIPC_WAIT_FOREVER   	~0	/* timeout for permanent subscription */
+
+struct tipc_subscr {
+	struct tipc_name_seq seq;	/* name sequence of interest */
+	__u32 timeout;			/* subscription duration (in ms) */
+        __u32 filter;   		/* bitmask of filter options */
+	char usr_handle[8];		/* available for subscriber use */
+};
+
+
+#define TIPC_PUBLISHED          1	/* publication event */
+#define TIPC_WITHDRAWN          2	/* withdraw event */
+#define TIPC_SUBSCR_TIMEOUT     3	/* subscription timeout event */
+
+struct tipc_event {
+	__u32 event;			/* event type */
+	__u32 found_lower;		/* matching name seq instances */
+	__u32 found_upper;		/*    "      "    "     "      */
+        struct tipc_portid port;	/* associated port */
+	struct tipc_subscr s;		/* associated subscription */
+};
+
+/*
+ * Socket API
+ */
+
+#ifndef AF_TIPC
+#define AF_TIPC		30
+#endif
+
+#ifndef PF_TIPC
+#define PF_TIPC		AF_TIPC
+#endif
+
+#ifndef SOL_TIPC
+#define SOL_TIPC	271
+#endif
+
+#define TIPC_ADDR_NAMESEQ   	1
+#define TIPC_ADDR_MCAST         1
+#define TIPC_ADDR_NAME      	2
+#define TIPC_ADDR_ID        	3
+
+struct sockaddr_tipc {
+	unsigned short family;
+	unsigned char  addrtype;
+	signed   char  scope;
+	union {
+		struct tipc_portid id;
+		struct tipc_name_seq nameseq;
+		struct {
+			struct tipc_name name;
+			__u32 domain; /* 0: own zone */
+		} name;
+	} addr;
+};
+
+/*
+ * Ancillary data objects supported by recvmsg()
+ */
+
+#define TIPC_ERRINFO	1	/* error info */
+#define TIPC_RETDATA	2	/* returned data */
+#define TIPC_DESTNAME	3	/* destination name */
+
+/*
+ * TIPC-specific socket option values
+ */
+
+#define TIPC_IMPORTANCE		127	/* Default: TIPC_LOW_IMPORTANCE */
+#define TIPC_SRC_DROPPABLE	128	/* Default: 0 (resend congested msg) */
+#define TIPC_DEST_DROPPABLE	129	/* Default: based on socket type */
+#define TIPC_CONN_TIMEOUT     	130	/* Default: 8000 (ms)  */
+
+/*
+ * Bearer
+ */
+
+/* Identifiers of supported TIPC media types */
+
+#define TIPC_MEDIA_TYPE_ETH	1
+
+/* Maximum sizes of TIPC bearer-related names (including terminating NUL) */ 
+
+#define TIPC_MAX_MEDIA_NAME	16	/* format = media */
+#define TIPC_MAX_IF_NAME	16	/* format = interface */
+#define TIPC_MAX_BEARER_NAME	32	/* format = media:interface */
+#define TIPC_MAX_LINK_NAME	60	/* format = Z.C.N:interface-Z.C.N:interface */
+
+struct tipc_media_addr {
+	__u32  type;
+	union {
+		__u8   eth_addr[6];	/* Ethernet bearer */ 
+#if 0
+		/* Prototypes for other possible bearer types */
+
+		struct {
+			__u16 sin_family;
+			__u16 sin_port;
+			struct {
+				__u32 s_addr;
+			} sin_addr;
+			char pad[4];
+		} addr_in;		/* IP-based bearer */
+		__u16  sock_descr;	/* generic socket bearer */
+#endif
+	} dev_addr;
+};
+
+
+/* Link priority limits (range from 0 to # priorities - 1) */
+
+#define TIPC_NUM_LINK_PRI 32
+
+/* Link tolerance limits (min, default, max), in ms */
+
+#define TIPC_MIN_LINK_TOL 50
+#define TIPC_DEF_LINK_TOL 1500
+#define TIPC_MAX_LINK_TOL 30000
+
+/* Link window limits (min, default, max), in packets */
+
+#define TIPC_MIN_LINK_WIN 16
+#define TIPC_DEF_LINK_WIN 50
+#define TIPC_MAX_LINK_WIN 150
+
+/*
+ * Configuration
+ *
+ * All configuration management messaging involves sending a request message
+ * to the TIPC configuration service on a node, which sends a reply message
+ * back.  (In the future multi-message replies may be supported.)
+ *
+ * Both request and reply messages consist of a transport header and payload.
+ * The transport header contains info about the desired operation;
+ * the payload consists of zero or more type/length/value (TLV) items
+ * which specify parameters or results for the operation.
+ *
+ * For many operations, the request and reply messages have a fixed number
+ * of TLVs (usually zero or one); however, some reply messages may return 
+ * a variable number of TLVs.  A failed request is denoted by the presence
+ * of an "error string" TLV in the reply message instead of the TLV(s) the
+ * reply should contain if the request succeeds.
+ */
+ 
+#define TIPC_CFG_SRV	0		/* configuration service name type */
+
+/* 
+ * Public commands:
+ * May be issued by any process.
+ * Accepted by own node, or by remote node only if remote management enabled.                       
+ */
+ 
+#define  TIPC_CMD_NOOP   	    0x0000    /* tx none, rx none */
+#define  TIPC_CMD_GET_NODES         0x0001    /* tx net_addr, rx node_info(s) */
+#define  TIPC_CMD_GET_MEDIA_NAMES   0x0002    /* tx none, rx media_name(s) */
+#define  TIPC_CMD_GET_BEARER_NAMES  0x0003    /* tx none, rx bearer_name(s) */
+#define  TIPC_CMD_GET_LINKS         0x0004    /* tx net_addr, rx link_info(s) */
+#define  TIPC_CMD_SHOW_NAME_TABLE   0x0005    /* tx name_tbl_query, rx ultra_string */
+#define  TIPC_CMD_SHOW_PORTS        0x0006    /* tx none, rx ultra_string */
+#define  TIPC_CMD_SHOW_LINK_STATS   0x000B    /* tx link_name, rx ultra_string */
+
+#if 0
+#define  TIPC_CMD_SHOW_PORT_STATS   0x0008    /* tx port_ref, rx ultra_string */
+#define  TIPC_CMD_RESET_PORT_STATS  0x0009    /* tx port_ref, rx none */
+#define  TIPC_CMD_GET_ROUTES        0x000A    /* tx ?, rx ? */
+#define  TIPC_CMD_GET_LINK_PEER     0x000D    /* tx link_name, rx ? */
+#endif
+
+/* 
+ * Protected commands:
+ * May only be issued by "network administration capable" process.
+ * Accepted by own node, or by remote node only if remote management enabled
+ * and this node is zone manager.                       
+ */
+
+#define  TIPC_CMD_GET_REMOTE_MNG    0x4003    /* tx none, rx unsigned */
+#define  TIPC_CMD_GET_MAX_PORTS     0x4004    /* tx none, rx unsigned */
+#define  TIPC_CMD_GET_MAX_PUBL      0x4005    /* tx none, rx unsigned */
+#define  TIPC_CMD_GET_MAX_SUBSCR    0x4006    /* tx none, rx unsigned */
+#define  TIPC_CMD_GET_MAX_ZONES     0x4007    /* tx none, rx unsigned */
+#define  TIPC_CMD_GET_MAX_CLUSTERS  0x4008    /* tx none, rx unsigned */
+#define  TIPC_CMD_GET_MAX_NODES     0x4009    /* tx none, rx unsigned */
+#define  TIPC_CMD_GET_MAX_SLAVES    0x400A    /* tx none, rx unsigned */
+#define  TIPC_CMD_GET_NETID         0x400B    /* tx none, rx unsigned */
+
+#define  TIPC_CMD_ENABLE_BEARER     0x4101    /* tx bearer_config, rx none */
+#define  TIPC_CMD_DISABLE_BEARER    0x4102    /* tx bearer_name, rx none */
+#define  TIPC_CMD_SET_LINK_TOL      0x4107    /* tx link_config, rx none */
+#define  TIPC_CMD_SET_LINK_PRI      0x4108    /* tx link_config, rx none */
+#define  TIPC_CMD_SET_LINK_WINDOW   0x4109    /* tx link_config, rx none */
+#define  TIPC_CMD_SET_LOG_SIZE      0x410A    /* tx unsigned, rx none */
+#define  TIPC_CMD_DUMP_LOG          0x410B    /* tx none, rx ultra_string */
+#define  TIPC_CMD_RESET_LINK_STATS  0x410C    /* tx link_name, rx none */
+
+#if 0
+#define  TIPC_CMD_CREATE_LINK       0x4103    /* tx link_create, rx none */
+#define  TIPC_CMD_REMOVE_LINK       0x4104    /* tx link_name, rx none */
+#define  TIPC_CMD_BLOCK_LINK        0x4105    /* tx link_name, rx none */
+#define  TIPC_CMD_UNBLOCK_LINK      0x4106    /* tx link_name, rx none */
+#endif
+
+/* 
+ * Private commands:
+ * May only be issued by "network administration capable" process.
+ * Accepted by own node only; cannot be used on a remote node.                       
+ */
+
+#define  TIPC_CMD_SET_NODE_ADDR     0x8001    /* tx net_addr, rx none */
+#if 0
+#define  TIPC_CMD_SET_ZONE_MASTER   0x8002    /* tx none, rx none */
+#endif
+#define  TIPC_CMD_SET_REMOTE_MNG    0x8003    /* tx unsigned, rx none */
+#define  TIPC_CMD_SET_MAX_PORTS     0x8004    /* tx unsigned, rx none */
+#define  TIPC_CMD_SET_MAX_PUBL      0x8005    /* tx unsigned, rx none */
+#define  TIPC_CMD_SET_MAX_SUBSCR    0x8006    /* tx unsigned, rx none */
+#define  TIPC_CMD_SET_MAX_ZONES     0x8007    /* tx unsigned, rx none */
+#define  TIPC_CMD_SET_MAX_CLUSTERS  0x8008    /* tx unsigned, rx none */
+#define  TIPC_CMD_SET_MAX_NODES     0x8009    /* tx unsigned, rx none */
+#define  TIPC_CMD_SET_MAX_SLAVES    0x800A    /* tx unsigned, rx none */
+#define  TIPC_CMD_SET_NETID         0x800B    /* tx unsigned, rx none */
+
+/*
+ * TLV types defined for TIPC
+ */
+
+#define TIPC_TLV_NONE		0	/* no TLV present */
+#define TIPC_TLV_VOID		1	/* empty TLV (0 data bytes)*/
+#define TIPC_TLV_UNSIGNED	2	/* 32-bit integer */
+#define TIPC_TLV_STRING		3	/* char[128] (max) */
+#define TIPC_TLV_LARGE_STRING	4	/* char[2048] (max) */
+#define TIPC_TLV_ULTRA_STRING	5	/* char[32768] (max) */
+
+#define TIPC_TLV_ERROR_STRING	16	/* char[128] containing "error code" */
+#define TIPC_TLV_NET_ADDR   	17	/* 32-bit integer denoting <Z.C.N> */
+#define TIPC_TLV_MEDIA_NAME	18	/* char[MAX_MEDIA_NAME] */
+#define TIPC_TLV_BEARER_NAME	19	/* char[MAX_BEARER_NAME] */
+#define TIPC_TLV_LINK_NAME	20	/* char[MAX_LINK_NAME] */
+#define TIPC_TLV_NODE_INFO	21	/* struct tipc_node_info */
+#define TIPC_TLV_LINK_INFO	22	/* struct tipc_link_info */
+#define TIPC_TLV_BEARER_CONFIG  23	/* struct tipc_bearer_config */
+#define TIPC_TLV_LINK_CONFIG    24	/* struct tipc_link_config */
+#define TIPC_TLV_NAME_TBL_QUERY	25	/* struct tipc_name_table_query */
+#define TIPC_TLV_PORT_REF   	26	/* 32-bit port reference */
+
+struct tipc_node_info {
+	__u32 addr;			/* network address of node */
+	__u32 up;			/* 0=down, 1= up */
+};
+
+struct tipc_link_info {
+	__u32 dest;			/* network address of peer node */
+	__u32 up;			/* 0=down, 1=up */
+	char str[TIPC_MAX_LINK_NAME];	/* link name */
+};
+
+struct tipc_bearer_config {
+	__u32 priority;			/* Range [1,31]. Override per link  */
+	__u32 detect_scope;     
+	char name[TIPC_MAX_BEARER_NAME];
+};
+
+struct tipc_link_config {
+	__u32 value;
+	char name[TIPC_MAX_LINK_NAME];
+};
+
+#define TIPC_NTQ_ALLTYPES 0x80000000
+
+struct tipc_name_table_query {
+	__u32 depth;	/* 1:type, 2:+name info, 3:+port info, 4+:+debug info */
+	__u32 type;	/* {t,l,u} info ignored if high bit of "depth" is set */
+	__u32 lowbound; /* (i.e. displays all entries of name table) */
+	__u32 upbound;
+};
+
+/*
+ * The error string TLV is a null-terminated string describing the cause 
+ * of the request failure.  To simplify error processing (and to save space)
+ * the first character of the string can be a special error code character
+ * (lying by the range 0x80 to 0xFF) which represents a pre-defined reason.
+ */
+
+#define TIPC_CFG_TLV_ERROR      "\x80"  /* request contains incorrect TLV(s) */
+#define TIPC_CFG_NOT_NET_ADMIN  "\x81"	/* must be network administrator */
+#define TIPC_CFG_NOT_ZONE_MSTR	"\x82"	/* must be zone master */
+#define TIPC_CFG_NO_REMOTE	"\x83"	/* remote management not enabled */
+#define TIPC_CFG_NOT_SUPPORTED  "\x84"	/* request is not supported by TIPC */
+#define TIPC_CFG_INVALID_VALUE  "\x85"  /* request has invalid argument value */
+
+#if 0
+/* prototypes TLV structures for proposed commands */
+struct tipc_link_create {
+	__u32   domain;
+	struct tipc_media_addr peer_addr;
+	char bearer_name[MAX_BEARER_NAME];
+};
+
+struct tipc_route_info {
+	__u32 dest;
+	__u32 router;
+};
+#endif
+
+/*
+ * A TLV consists of a descriptor, followed by the TLV value.
+ * TLV descriptor fields are stored in network byte order; 
+ * TLV values must also be stored in network byte order (where applicable).
+ * TLV descriptors must be aligned to addresses which are multiple of 4,
+ * so up to 3 bytes of padding may exist at the end of the TLV value area.
+ * There must not be any padding between the TLV descriptor and its value.
+ */
+
+struct tlv_desc {
+	__u16 tlv_len;		/* TLV length (descriptor + value) */
+	__u16 tlv_type;		/* TLV identifier */
+};
+
+#define TLV_ALIGNTO 4
+
+#define TLV_ALIGN(datalen) (((datalen)+(TLV_ALIGNTO-1)) & ~(TLV_ALIGNTO-1))
+#define TLV_LENGTH(datalen) (sizeof(struct tlv_desc) + (datalen))
+#define TLV_SPACE(datalen) (TLV_ALIGN(TLV_LENGTH(datalen)))
+#define TLV_DATA(tlv) ((void *)((char *)(tlv) + TLV_LENGTH(0)))
+
+static inline int TLV_OK(const void *tlv, __u16 space)
+{
+	/*
+	 * Would also like to check that "tlv" is a multiple of 4,
+	 * but don't know how to do this in a portable way.
+	 * - Tried doing (!(tlv & (TLV_ALIGNTO-1))), but GCC compiler
+	 *   won't allow binary "&" with a pointer.
+	 * - Tried casting "tlv" to integer type, but causes warning about size
+	 *   mismatch when pointer is bigger than chosen type (int, long, ...).
+	 */
+
+	return (space >= TLV_SPACE(0)) &&
+		(ntohs(((struct tlv_desc *)tlv)->tlv_len) <= space);
+}
+
+static inline int TLV_CHECK(const void *tlv, __u16 space, __u16 exp_type)
+{
+	return TLV_OK(tlv, space) && 
+		(ntohs(((struct tlv_desc *)tlv)->tlv_type) == exp_type);
+}
+
+static inline int TLV_SET(void *tlv, __u16 type, void *data, __u16 len)
+{
+	struct tlv_desc *tlv_ptr;
+	int tlv_len;
+
+	tlv_len = TLV_LENGTH(len);
+	tlv_ptr = (struct tlv_desc *)tlv;
+	tlv_ptr->tlv_type = htons(type);
+	tlv_ptr->tlv_len  = htons(tlv_len);
+	if (len && data)
+		memcpy(TLV_DATA(tlv_ptr), data, tlv_len);
+	return TLV_SPACE(len);
+}
+
+/*
+ * A TLV list descriptor simplifies processing of messages 
+ * containing multiple TLVs.
+ */
+
+struct tlv_list_desc {
+	struct tlv_desc *tlv_ptr;	/* ptr to current TLV */
+	__u32 tlv_space;		/* # bytes from curr TLV to list end */
+};
+
+static inline void TLV_LIST_INIT(struct tlv_list_desc *list, 
+				 void *data, __u32 space)
+{
+	list->tlv_ptr = (struct tlv_desc *)data;
+	list->tlv_space = space;
+}
+	     
+static inline int TLV_LIST_EMPTY(struct tlv_list_desc *list)
+{ 
+	return (list->tlv_space == 0);
+}
+
+static inline int TLV_LIST_CHECK(struct tlv_list_desc *list, __u16 exp_type)
+{
+	return TLV_CHECK(list->tlv_ptr, list->tlv_space, exp_type);
+}
+
+static inline void *TLV_LIST_DATA(struct tlv_list_desc *list)
+{
+	return TLV_DATA(list->tlv_ptr);
+}
+
+static inline void TLV_LIST_STEP(struct tlv_list_desc *list)
+{
+	__u16 tlv_space = TLV_ALIGN(ntohs(list->tlv_ptr->tlv_len));
+
+        list->tlv_ptr = (struct tlv_desc *)((char *)list->tlv_ptr + tlv_space);
+	list->tlv_space -= tlv_space;
+}
+
+/*
+ * Configuration messages exchanged via NETLINK_GENERIC use the following
+ * family id, name, version and command.
+ */
+#define TIPC_GENL_FAMILY	0x222
+#define TIPC_GENL_NAME		"TIPC"
+#define TIPC_GENL_VERSION	0x1
+#define TIPC_GENL_CMD		0x1
+
+/*
+ * TIPC specific header used in NETLINK_GENERIC requests.
+ */
+struct tipc_genlmsghdr {
+	__u32 dest;		/* Destination address */
+	__u16 cmd;		/* Command */
+	__u16 reserved;		/* Unused */
+};
+
+#define TIPC_GENL_HDRLEN	NLMSG_ALIGN(sizeof(struct tipc_genlmsghdr))
+
+/*
+ * Configuration messages exchanged via TIPC sockets use the TIPC configuration 
+ * message header, which is defined below.  This structure is analogous 
+ * to the Netlink message header, but fields are stored in network byte order 
+ * and no padding is permitted between the header and the message data 
+ * that follows.
+ */
+
+struct tipc_cfg_msg_hdr
+{
+	__u32 tcm_len;		/* Message length (including header) */
+	__u16 tcm_type;		/* Command type */
+	__u16 tcm_flags;	/* Additional flags */
+	char  tcm_reserved[8];	/* Unused */
+};
+
+#define TCM_F_REQUEST	0x1	/* Flag: Request message */
+#define TCM_F_MORE	0x2	/* Flag: Message to be continued */
+
+#define TCM_ALIGN(datalen)  (((datalen)+3) & ~3)
+#define TCM_LENGTH(datalen) (sizeof(struct tipc_cfg_msg_hdr) + datalen)
+#define TCM_SPACE(datalen)  (TCM_ALIGN(TCM_LENGTH(datalen)))
+#define TCM_DATA(tcm_hdr)   ((void *)((char *)(tcm_hdr) + TCM_LENGTH(0)))
+
+static inline int TCM_SET(void *msg, __u16 cmd, __u16 flags,
+			  void *data, __u16 data_len)
+{
+	struct tipc_cfg_msg_hdr *tcm_hdr;
+	int msg_len;
+
+	msg_len = TCM_LENGTH(data_len);
+	tcm_hdr = (struct tipc_cfg_msg_hdr *)msg;
+	tcm_hdr->tcm_len   = htonl(msg_len);
+	tcm_hdr->tcm_type  = htons(cmd);
+	tcm_hdr->tcm_flags = htons(flags);
+	if (data_len && data)
+		memcpy(TCM_DATA(msg), data, data_len);
+	return TCM_SPACE(data_len);
+}
+
+#endif
diff --git a/include/net/tipc/tipc.h b/include/net/tipc/tipc.h
new file mode 100644
index 0000000..1d4d8d0
--- /dev/null
+++ b/include/net/tipc/tipc.h
@@ -0,0 +1,255 @@
+/*
+ * include/net/tipc/tipc.h: Main include file for TIPC users
+ * 
+ * Copyright (c) 2003-2005, Ericsson Research Canada
+ * Copyright (c) 2005, Wind River Systems
+ * Copyright (c) 2005-2006, Ericsson AB
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without 
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this 
+ * list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright notice, 
+ * this list of conditions and the following disclaimer in the documentation 
+ * and/or other materials provided with the distribution.
+ * Neither the names of the copyright holders nor the names of its 
+ * contributors may be used to endorse or promote products derived from this 
+ * software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _NET_TIPC_H_
+#define _NET_TIPC_H_
+
+#ifdef __KERNEL__
+
+#include <linux/tipc.h>
+#include <linux/skbuff.h>
+
+/* 
+ * Native API
+ * ----------
+ */
+
+/*
+ * TIPC operating mode routines
+ */
+
+u32 tipc_get_addr(void);
+
+#define TIPC_NOT_RUNNING  0
+#define TIPC_NODE_MODE    1
+#define TIPC_NET_MODE     2
+
+typedef void (*tipc_mode_event)(void *usr_handle, int mode, u32 addr);
+
+int tipc_attach(unsigned int *userref, tipc_mode_event, void *usr_handle);
+
+void tipc_detach(unsigned int userref);
+
+int tipc_get_mode(void);
+
+/*
+ * TIPC port manipulation routines
+ */
+
+typedef void (*tipc_msg_err_event) (void *usr_handle,
+				    u32 portref,
+				    struct sk_buff **buf,
+				    unsigned char const *data,
+				    unsigned int size,
+				    int reason, 
+				    struct tipc_portid const *attmpt_destid);
+
+typedef void (*tipc_named_msg_err_event) (void *usr_handle,
+					  u32 portref,
+					  struct sk_buff **buf,
+					  unsigned char const *data,
+					  unsigned int size,
+					  int reason, 
+					  struct tipc_name_seq const *attmpt_dest);
+
+typedef void (*tipc_conn_shutdown_event) (void *usr_handle,
+					  u32 portref,
+					  struct sk_buff **buf,
+					  unsigned char const *data,
+					  unsigned int size,
+					  int reason);
+
+typedef void (*tipc_msg_event) (void *usr_handle,
+				u32 portref,
+				struct sk_buff **buf,
+				unsigned char const *data,
+				unsigned int size,
+				unsigned int importance, 
+				struct tipc_portid const *origin);
+
+typedef void (*tipc_named_msg_event) (void *usr_handle,
+				      u32 portref,
+				      struct sk_buff **buf,
+				      unsigned char const *data,
+				      unsigned int size,
+				      unsigned int importance, 
+				      struct tipc_portid const *orig,
+				      struct tipc_name_seq const *dest);
+
+typedef void (*tipc_conn_msg_event) (void *usr_handle,
+				     u32 portref,
+				     struct sk_buff **buf,
+				     unsigned char const *data,
+				     unsigned int size);
+
+typedef void (*tipc_continue_event) (void *usr_handle, 
+				     u32 portref);
+
+int tipc_createport(unsigned int tipc_user, 
+		    void *usr_handle, 
+		    unsigned int importance, 
+		    tipc_msg_err_event error_cb, 
+		    tipc_named_msg_err_event named_error_cb, 
+		    tipc_conn_shutdown_event conn_error_cb, 
+		    tipc_msg_event message_cb, 
+		    tipc_named_msg_event named_message_cb, 
+		    tipc_conn_msg_event conn_message_cb, 
+		    tipc_continue_event continue_event_cb,/* May be zero */
+		    u32 *portref);
+
+int tipc_deleteport(u32 portref);
+
+int tipc_ownidentity(u32 portref, struct tipc_portid *port);
+
+int tipc_portimportance(u32 portref, unsigned int *importance);
+int tipc_set_portimportance(u32 portref, unsigned int importance);
+
+int tipc_portunreliable(u32 portref, unsigned int *isunreliable);
+int tipc_set_portunreliable(u32 portref, unsigned int isunreliable);
+
+int tipc_portunreturnable(u32 portref, unsigned int *isunreturnable);
+int tipc_set_portunreturnable(u32 portref, unsigned int isunreturnable);
+
+int tipc_publish(u32 portref, unsigned int scope, 
+		 struct tipc_name_seq const *name_seq);
+int tipc_withdraw(u32 portref, unsigned int scope,
+		  struct tipc_name_seq const *name_seq); /* 0: all */
+
+int tipc_connect2port(u32 portref, struct tipc_portid const *port);
+
+int tipc_disconnect(u32 portref);
+
+int tipc_shutdown(u32 ref); /* Sends SHUTDOWN msg */
+
+int tipc_isconnected(u32 portref, int *isconnected);
+
+int tipc_peer(u32 portref, struct tipc_portid *peer);
+
+int tipc_ref_valid(u32 portref); 
+
+/*
+ * TIPC messaging routines
+ */
+
+#define TIPC_PORT_IMPORTANCE 100	/* send using current port setting */
+
+
+int tipc_send(u32 portref,
+	      unsigned int num_sect,
+	      struct iovec const *msg_sect);
+
+int tipc_send_buf(u32 portref,
+		  struct sk_buff *buf,
+		  unsigned int dsz);
+
+int tipc_send2name(u32 portref, 
+		   struct tipc_name const *name, 
+		   u32 domain,	/* 0:own zone */
+		   unsigned int num_sect,
+		   struct iovec const *msg_sect);
+
+int tipc_send_buf2name(u32 portref,
+		       struct tipc_name const *name,
+		       u32 domain,
+		       struct sk_buff *buf,
+		       unsigned int dsz);
+
+int tipc_forward2name(u32 portref, 
+		      struct tipc_name const *name, 
+		      u32 domain,   /*0: own zone */
+		      unsigned int section_count,
+		      struct iovec const *msg_sect,
+		      struct tipc_portid const *origin,
+		      unsigned int importance);
+
+int tipc_forward_buf2name(u32 portref,
+			  struct tipc_name const *name,
+			  u32 domain,
+			  struct sk_buff *buf,
+			  unsigned int dsz,
+			  struct tipc_portid const *orig,
+			  unsigned int importance);
+
+int tipc_send2port(u32 portref,
+		   struct tipc_portid const *dest,
+		   unsigned int num_sect,
+		   struct iovec const *msg_sect);
+
+int tipc_send_buf2port(u32 portref,
+		       struct tipc_portid const *dest,
+		       struct sk_buff *buf,
+		       unsigned int dsz);
+
+int tipc_forward2port(u32 portref,
+		      struct tipc_portid const *dest,
+		      unsigned int num_sect,
+		      struct iovec const *msg_sect,
+		      struct tipc_portid const *origin,
+		      unsigned int importance);
+
+int tipc_forward_buf2port(u32 portref,
+			  struct tipc_portid const *dest,
+			  struct sk_buff *buf,
+			  unsigned int dsz,
+			  struct tipc_portid const *orig,
+			  unsigned int importance);
+
+int tipc_multicast(u32 portref, 
+		   struct tipc_name_seq const *seq, 
+		   u32 domain,	/* 0:own zone */
+		   unsigned int section_count,
+		   struct iovec const *msg);
+
+#if 0
+int tipc_multicast_buf(u32 portref, 
+		       struct tipc_name_seq const *seq, 
+		       u32 domain,	/* 0:own zone */
+		       void *buf,
+		       unsigned int size);
+#endif
+
+/*
+ * TIPC subscription routines
+ */
+
+int tipc_ispublished(struct tipc_name const *name);
+
+/*
+ * Get number of available nodes within specified domain (excluding own node)
+ */
+
+unsigned int tipc_available_nodes(const u32 domain);
+
+#endif
+
+#endif
diff --git a/include/net/tipc/tipc_bearer.h b/include/net/tipc/tipc_bearer.h
new file mode 100644
index 0000000..a3daf69
--- /dev/null
+++ b/include/net/tipc/tipc_bearer.h
@@ -0,0 +1,92 @@
+/*
+ * include/net/tipc/tipc_bearer.h: Include file for privileged access to TIPC bearers
+ * 
+ * Copyright (c) 2003-2005, Ericsson Research Canada
+ * Copyright (c) 2005, Wind River Systems
+ * Copyright (c) 2005-2006, Ericsson AB
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without 
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this 
+ * list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright notice, 
+ * this list of conditions and the following disclaimer in the documentation 
+ * and/or other materials provided with the distribution.
+ * Neither the names of the copyright holders nor the names of its 
+ * contributors may be used to endorse or promote products derived from this 
+ * software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _NET_TIPC_BEARER_H_
+#define _NET_TIPC_BEARER_H_
+
+#ifdef __KERNEL__
+
+#include <linux/tipc.h>
+#include <linux/skbuff.h>
+#include <linux/spinlock.h>
+
+/**
+ * struct tipc_bearer - TIPC bearer info available to privileged users
+ * @usr_handle: pointer to additional user-defined information about bearer
+ * @mtu: max packet size bearer can support
+ * @blocked: non-zero if bearer is blocked
+ * @lock: spinlock for controlling access to bearer
+ * @addr: media-specific address associated with bearer
+ * @name: bearer name (format = media:interface)
+ * 
+ * Note: TIPC initializes "name" and "lock" fields; user is responsible for
+ * initialization all other fields when a bearer is enabled.
+ */
+
+struct tipc_bearer {
+	void *usr_handle;
+	u32 mtu;
+	int blocked;
+	spinlock_t lock;
+	struct tipc_media_addr addr;
+	char name[TIPC_MAX_BEARER_NAME];
+};
+
+
+int  tipc_register_media(u32 media_type,
+			 char *media_name, 
+			 int (*enable)(struct tipc_bearer *), 
+			 void (*disable)(struct tipc_bearer *), 
+			 int (*send_msg)(struct sk_buff *, 
+					 struct tipc_bearer *,
+					 struct tipc_media_addr *), 
+			 char *(*addr2str)(struct tipc_media_addr *a,
+					   char *str_buf,
+					   int str_size),
+			 struct tipc_media_addr *bcast_addr,
+			 const u32 bearer_priority,
+			 const u32 link_tolerance,  /* [ms] */
+			 const u32 send_window_limit); 
+
+void tipc_recv_msg(struct sk_buff *buf, struct tipc_bearer *tb_ptr);
+
+int  tipc_block_bearer(const char *name);
+void tipc_continue(struct tipc_bearer *tb_ptr); 
+
+int tipc_enable_bearer(const char *bearer_name, u32 bcast_scope, u32 priority);
+int tipc_disable_bearer(const char *name);
+
+
+#endif
+
+#endif
diff --git a/include/net/tipc/tipc_msg.h b/include/net/tipc/tipc_msg.h
new file mode 100644
index 0000000..78513f5
--- /dev/null
+++ b/include/net/tipc/tipc_msg.h
@@ -0,0 +1,220 @@
+/*
+ * include/net/tipc/tipc_msg.h: Include file for privileged access to TIPC message headers
+ * 
+ * Copyright (c) 2003-2005, Ericsson Research Canada
+ * Copyright (c) 2005, Wind River Systems
+ * Copyright (c) 2005-2006, Ericsson AB
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without 
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this 
+ * list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright notice, 
+ * this list of conditions and the following disclaimer in the documentation 
+ * and/or other materials provided with the distribution.
+ * Neither the names of the copyright holders nor the names of its 
+ * contributors may be used to endorse or promote products derived from this 
+ * software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _NET_TIPC_MSG_H_
+#define _NET_TIPC_MSG_H_
+
+#ifdef __KERNEL__
+
+struct tipc_msg {
+	u32 hdr[15];
+};
+
+
+/*
+		TIPC user data message header format, version 2:
+
+
+       1 0 9 8 7 6 5 4|3 2 1 0 9 8 7 6|5 4 3 2 1 0 9 8|7 6 5 4 3 2 1 0 
+      +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+   w0:|vers | user  |hdr sz |n|d|s|-|          message size           |
+      +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+   w1:|mstyp| error |rer cnt|lsc|opt p|      broadcast ack no         |
+      +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+   w2:|        link level ack no      |   broadcast/link level seq no |
+      +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+   w3:|                       previous node                           |
+      +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+   w4:|                      originating port                         |
+      +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+   w5:|                      destination port                         |
+      +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+    
+   w6:|                      originating node                         |
+      +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+   w7:|                      destination node                         |
+      +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+   w8:|            name type / transport sequence number              |
+      +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+   w9:|              name instance/multicast lower bound              |
+      +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+    
+   wA:|                    multicast upper bound                      |
+      +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+    
+      /                                                               /
+      \                           options                             \
+      /                                                               /
+      +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+
+*/
+
+#define TIPC_CONN_MSG	0
+#define TIPC_MCAST_MSG	1
+#define TIPC_NAMED_MSG	2
+#define TIPC_DIRECT_MSG	3
+
+
+static inline u32 msg_word(struct tipc_msg *m, u32 pos)
+{
+	return ntohl(m->hdr[pos]);
+}
+
+static inline u32 msg_bits(struct tipc_msg *m, u32 w, u32 pos, u32 mask)
+{
+	return (msg_word(m, w) >> pos) & mask;
+}
+
+static inline u32 msg_importance(struct tipc_msg *m)
+{
+	return msg_bits(m, 0, 25, 0xf);
+}
+
+static inline u32 msg_hdr_sz(struct tipc_msg *m)
+{
+	return msg_bits(m, 0, 21, 0xf) << 2;
+}
+
+static inline int msg_short(struct tipc_msg *m)
+{
+	return (msg_hdr_sz(m) == 24);
+}
+
+static inline u32 msg_size(struct tipc_msg *m)
+{
+	return msg_bits(m, 0, 0, 0x1ffff);
+}
+
+static inline u32 msg_data_sz(struct tipc_msg *m)
+{
+	return (msg_size(m) - msg_hdr_sz(m));
+}
+
+static inline unchar *msg_data(struct tipc_msg *m)
+{
+	return ((unchar *)m) + msg_hdr_sz(m);
+}
+
+static inline u32 msg_type(struct tipc_msg *m)
+{
+	return msg_bits(m, 1, 29, 0x7);
+}
+
+static inline u32 msg_direct(struct tipc_msg *m)
+{
+	return (msg_type(m) == TIPC_DIRECT_MSG);
+}
+
+static inline u32 msg_named(struct tipc_msg *m)
+{
+	return (msg_type(m) == TIPC_NAMED_MSG);
+}
+
+static inline u32 msg_mcast(struct tipc_msg *m)
+{
+	return (msg_type(m) == TIPC_MCAST_MSG);
+}
+
+static inline u32 msg_connected(struct tipc_msg *m)
+{
+	return (msg_type(m) == TIPC_CONN_MSG);
+}
+
+static inline u32 msg_errcode(struct tipc_msg *m)
+{
+	return msg_bits(m, 1, 25, 0xf);
+}
+
+static inline u32 msg_prevnode(struct tipc_msg *m)
+{
+	return msg_word(m, 3);
+}
+
+static inline u32 msg_origport(struct tipc_msg *m)
+{
+	return msg_word(m, 4);
+}
+
+static inline u32 msg_destport(struct tipc_msg *m)
+{
+	return msg_word(m, 5);
+}
+
+static inline u32 msg_mc_netid(struct tipc_msg *m)
+{
+	return msg_word(m, 5);
+}
+
+static inline u32 msg_orignode(struct tipc_msg *m)
+{
+	if (likely(msg_short(m)))
+		return msg_prevnode(m);
+	return msg_word(m, 6);
+}
+
+static inline u32 msg_destnode(struct tipc_msg *m)
+{
+	return msg_word(m, 7);
+}
+
+static inline u32 msg_nametype(struct tipc_msg *m)
+{
+	return msg_word(m, 8);
+}
+
+static inline u32 msg_nameinst(struct tipc_msg *m)
+{
+	return msg_word(m, 9);
+}
+
+static inline u32 msg_namelower(struct tipc_msg *m)
+{
+	return msg_nameinst(m);
+}
+
+static inline u32 msg_nameupper(struct tipc_msg *m)
+{
+	return msg_word(m, 10);
+}
+
+static inline char *msg_options(struct tipc_msg *m, u32 *len)
+{
+	u32 pos = msg_bits(m, 1, 16, 0x7);
+
+	if (!pos)
+		return 0;
+	pos = (pos * 4) + 28;
+	*len = msg_hdr_sz(m) - pos;
+	return (char *)&m->hdr[pos/4];
+}
+
+#endif
+
+#endif
diff --git a/include/net/tipc/tipc_port.h b/include/net/tipc/tipc_port.h
new file mode 100644
index 0000000..ec0f0de
--- /dev/null
+++ b/include/net/tipc/tipc_port.h
@@ -0,0 +1,105 @@
+/*
+ * include/net/tipc/tipc_port.h: Include file for privileged access to TIPC ports
+ * 
+ * Copyright (c) 2003-2005, Ericsson Research Canada
+ * Copyright (c) 2005, Wind River Systems
+ * Copyright (c) 2005-2006, Ericsson AB
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without 
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this 
+ * list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright notice, 
+ * this list of conditions and the following disclaimer in the documentation 
+ * and/or other materials provided with the distribution.
+ * Neither the names of the copyright holders nor the names of its 
+ * contributors may be used to endorse or promote products derived from this 
+ * software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _NET_TIPC_PORT_H_
+#define _NET_TIPC_PORT_H_
+
+#ifdef __KERNEL__
+
+#include <linux/tipc.h>
+#include <linux/skbuff.h>
+#include <net/tipc/tipc_msg.h>
+
+#define TIPC_FLOW_CONTROL_WIN 512
+
+/**
+ * struct tipc_port - native TIPC port info available to privileged users
+ * @usr_handle: pointer to additional user-defined information about port
+ * @lock: pointer to spinlock for controlling access to port
+ * @connected: non-zero if port is currently connected to a peer port
+ * @conn_type: TIPC type used when connection was established
+ * @conn_instance: TIPC instance used when connection was established
+ * @conn_unacked: number of unacknowledged messages received from peer port
+ * @published: non-zero if port has one or more associated names
+ * @congested: non-zero if cannot send because of link or port congestion
+ * @ref: unique reference to port in TIPC object registry
+ * @phdr: preformatted message header used when sending messages
+ */
+
+struct tipc_port {
+        void *usr_handle;
+        spinlock_t *lock;
+	int connected;
+        u32 conn_type;
+        u32 conn_instance;
+	u32 conn_unacked;
+	int published;
+	u32 congested;
+	u32 ref;
+	struct tipc_msg phdr;
+};
+
+
+/**
+ * tipc_createport_raw - create a native TIPC port and return it's reference
+ *
+ * Note: 'dispatcher' and 'wakeup' deliver a locked port.
+ */
+
+u32 tipc_createport_raw(void *usr_handle,
+			u32 (*dispatcher)(struct tipc_port *, struct sk_buff *),
+			void (*wakeup)(struct tipc_port *),
+			const u32 importance);
+
+/*
+ * tipc_set_msg_option(): port must be locked.
+ */
+int tipc_set_msg_option(struct tipc_port *tp_ptr,
+			const char *opt,
+			const u32 len);
+
+int tipc_reject_msg(struct sk_buff *buf, u32 err);
+
+int tipc_send_buf_fast(struct sk_buff *buf, u32 destnode);
+
+void tipc_acknowledge(u32 port_ref,u32 ack);
+
+struct tipc_port *tipc_get_port(const u32 ref);
+
+void *tipc_get_handle(const u32 ref);
+
+
+#endif
+
+#endif
+
diff --git a/net/Kconfig b/net/Kconfig
index 60f6f32..9296b269 100644
--- a/net/Kconfig
+++ b/net/Kconfig
@@ -159,6 +159,7 @@
 source "drivers/net/appletalk/Kconfig"
 source "net/x25/Kconfig"
 source "net/lapb/Kconfig"
+source "net/tipc/Kconfig"
 
 config NET_DIVERT
 	bool "Frame Diverter (EXPERIMENTAL)"
diff --git a/net/Makefile b/net/Makefile
index f5141b9..065796f 100644
--- a/net/Makefile
+++ b/net/Makefile
@@ -45,6 +45,7 @@
 obj-$(CONFIG_IP_DCCP)		+= dccp/
 obj-$(CONFIG_IP_SCTP)		+= sctp/
 obj-$(CONFIG_IEEE80211)		+= ieee80211/
+obj-$(CONFIG_TIPC)		+= tipc/
 
 ifeq ($(CONFIG_NET),y)
 obj-$(CONFIG_SYSCTL)		+= sysctl_net.o
diff --git a/net/tipc/Kconfig b/net/tipc/Kconfig
new file mode 100644
index 0000000..05ab18e
--- /dev/null
+++ b/net/tipc/Kconfig
@@ -0,0 +1,112 @@
+#
+# TIPC configuration
+#
+
+menu "TIPC Configuration (EXPERIMENTAL)"
+	depends on INET && EXPERIMENTAL
+
+config TIPC
+	tristate "The TIPC Protocol (EXPERIMENTAL)"
+	---help---
+	  TBD.
+
+	  This protocol support is also available as a module ( = code which
+	  can be inserted in and removed from the running kernel whenever you
+	  want). The module will be called tipc. If you want to compile it
+	  as a module, say M here and read <file:Documentation/modules.txt>.
+
+	  If in doubt, say N.
+
+config TIPC_ADVANCED
+	bool "TIPC: Advanced configuration"
+	depends on TIPC
+	default n
+	help
+	  Saying Y here will open some advanced configuration
+          for TIPC. Most users do not need to bother, so if
+          unsure, just say N.
+
+config TIPC_ZONES
+	int "Maximum number of zones in network"
+	depends on TIPC && TIPC_ADVANCED
+	default "3"
+	help
+	 Max number of zones inside TIPC network. Max supported value 
+         is 255 zones, minimum is 1
+
+	 Default is 3 zones in a network; setting this to higher
+	 allows more zones but might use more memory.
+
+config TIPC_CLUSTERS
+	int "Maximum number of clusters in a zone"
+	depends on TIPC && TIPC_ADVANCED
+	default "1"
+	help
+          ***Only 1 (one cluster in a zone) is supported by current code.
+          Any value set here will be overridden.***
+
+          (Max number of clusters inside TIPC zone. Max supported 
+          value is 4095 clusters, minimum is 1.
+
+	  Default is 1; setting this to smaller value might save 
+          some memory, setting it to higher
+	  allows more clusters and might consume more memory.)
+
+config TIPC_NODES
+	int "Maximum number of nodes in cluster"
+	depends on TIPC && TIPC_ADVANCED
+	default "255"
+	help
+	  Maximum number of nodes inside a TIPC cluster. Maximum 
+          supported value is 2047 nodes, minimum is 8. 
+
+	  Setting this to a smaller value saves some memory, 
+	  setting it to higher allows more nodes.
+
+config TIPC_SLAVE_NODES
+	int "Maximum number of slave nodes in cluster"
+	depends on TIPC && TIPC_ADVANCED
+	default "0"
+	help
+          ***This capability is not supported by current code.***
+	  
+	  Maximum number of slave nodes inside a TIPC cluster. Maximum 
+          supported value is 2047 nodes, minimum is 0. 
+
+	  Setting this to a smaller value saves some memory, 
+	  setting it to higher allows more nodes.
+
+config TIPC_PORTS
+	int "Maximum number of ports in a node"
+	depends on TIPC && TIPC_ADVANCED
+	default "8191"
+	help
+	  Maximum number of ports within a node. Maximum 
+          supported value is 64535 nodes, minimum is 127. 
+
+	  Setting this to a smaller value saves some memory, 
+	  setting it to higher allows more ports.
+
+config TIPC_LOG
+	int "Size of log buffer"
+	depends on TIPC && TIPC_ADVANCED
+	default 0
+	help
+ 	  Size (in bytes) of TIPC's internal log buffer, which records the
+	  occurrence of significant events.  Maximum supported value
+	  is 32768 bytes, minimum is 0.
+
+	  There is no need to enable the log buffer unless the node will be
+	  managed remotely via TIPC.
+
+config TIPC_DEBUG
+	bool "Enable debugging support"
+	depends on TIPC
+	default n
+	help
+ 	  This will enable debugging of TIPC.
+
+	  Only say Y here if you are having trouble with TIPC.  It will
+	  enable the display of detailed information about what is going on.
+
+endmenu
diff --git a/net/tipc/Makefile b/net/tipc/Makefile
new file mode 100644
index 0000000..dceb702
--- /dev/null
+++ b/net/tipc/Makefile
@@ -0,0 +1,13 @@
+#
+# Makefile for the Linux TIPC layer
+#
+
+obj-$(CONFIG_TIPC) := tipc.o
+
+tipc-y	+= addr.o bcast.o bearer.o config.o cluster.o \
+	   core.o handler.o link.o discover.o msg.o  \
+	   name_distr.o  subscr.o name_table.o net.o  \
+	   netlink.o node.o node_subscr.o port.o ref.o  \
+	   socket.o user_reg.o zone.o dbg.o eth_media.o
+
+# End of file
diff --git a/net/tipc/addr.c b/net/tipc/addr.c
new file mode 100644
index 0000000..bc35363
--- /dev/null
+++ b/net/tipc/addr.c
@@ -0,0 +1,91 @@
+/*
+ * net/tipc/addr.c: TIPC address utility routines
+ *     
+ * Copyright (c) 2003-2005, Ericsson Research Canada
+ * Copyright (c) 2004-2005, Wind River Systems
+ * Copyright (c) 2005-2006, Ericsson AB
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without 
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this 
+ * list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright notice, 
+ * this list of conditions and the following disclaimer in the documentation 
+ * and/or other materials provided with the distribution.
+ * Neither the names of the copyright holders nor the names of its 
+ * contributors may be used to endorse or promote products derived from this 
+ * software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "core.h"
+#include "dbg.h"
+#include "addr.h"
+#include "zone.h"
+#include "cluster.h"
+#include "net.h"
+
+u32 tipc_get_addr(void)
+{
+	return tipc_own_addr;
+}
+
+/**
+ * addr_domain_valid - validates a network domain address
+ * 
+ * Accepts <Z.C.N>, <Z.C.0>, <Z.0.0>, and <0.0.0>, 
+ * where Z, C, and N are non-zero and do not exceed the configured limits.
+ * 
+ * Returns 1 if domain address is valid, otherwise 0
+ */
+
+int addr_domain_valid(u32 addr)
+{
+	u32 n = tipc_node(addr);
+	u32 c = tipc_cluster(addr);
+	u32 z = tipc_zone(addr);
+	u32 max_nodes = tipc_max_nodes;
+
+	if (is_slave(addr))
+		max_nodes = LOWEST_SLAVE + tipc_max_slaves;
+	if (n > max_nodes)
+		return 0;
+	if (c > tipc_max_clusters)
+		return 0;
+	if (z > tipc_max_zones)
+		return 0;
+
+	if (n && (!z || !c))
+		return 0;
+	if (c && !z)
+		return 0;
+	return 1;
+}
+
+/**
+ * addr_node_valid - validates a proposed network address for this node
+ * 
+ * Accepts <Z.C.N>, where Z, C, and N are non-zero and do not exceed 
+ * the configured limits.
+ * 
+ * Returns 1 if address can be used, otherwise 0
+ */
+
+int addr_node_valid(u32 addr)
+{
+	return (addr_domain_valid(addr) && tipc_node(addr));
+}
+
diff --git a/net/tipc/addr.h b/net/tipc/addr.h
new file mode 100644
index 0000000..9dabebc
--- /dev/null
+++ b/net/tipc/addr.h
@@ -0,0 +1,125 @@
+/*
+ * net/tipc/addr.h: Include file for TIPC address utility routines
+ * 
+ * Copyright (c) 2003-2005, Ericsson Research Canada
+ * Copyright (c) 2004-2005, Wind River Systems
+ * Copyright (c) 2005-2006, Ericsson AB
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without 
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this 
+ * list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright notice, 
+ * this list of conditions and the following disclaimer in the documentation 
+ * and/or other materials provided with the distribution.
+ * Neither the names of the copyright holders nor the names of its 
+ * contributors may be used to endorse or promote products derived from this 
+ * software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _TIPC_ADDR_H
+#define _TIPC_ADDR_H
+
+static inline u32 own_node(void)
+{
+	return tipc_node(tipc_own_addr);
+}
+
+static inline u32 own_cluster(void)
+{
+	return tipc_cluster(tipc_own_addr);
+}
+
+static inline u32 own_zone(void)
+{
+	return tipc_zone(tipc_own_addr);
+}
+
+static inline int in_own_cluster(u32 addr)
+{
+	return !((addr ^ tipc_own_addr) >> 12);
+}
+
+static inline int in_own_zone(u32 addr)
+{
+	return !((addr ^ tipc_own_addr) >> 24);
+}
+
+static inline int is_slave(u32 addr)
+{
+	return addr & 0x800;
+}
+
+static inline int may_route(u32 addr)
+{
+	return(addr ^ tipc_own_addr) >> 11;
+}
+
+static inline int in_scope(u32 domain, u32 addr)
+{
+	if (!domain || (domain == addr))
+		return 1;
+	if (domain == (addr & 0xfffff000u)) /* domain <Z.C.0> */
+		return 1;
+	if (domain == (addr & 0xff000000u)) /* domain <Z.0.0> */
+		return 1;
+	return 0;
+}
+
+/**
+ * addr_scope - convert message lookup domain to equivalent 2-bit scope value
+ */
+
+static inline int addr_scope(u32 domain)
+{
+	if (likely(!domain))
+		return TIPC_ZONE_SCOPE;
+	if (tipc_node(domain))
+		return TIPC_NODE_SCOPE;
+	if (tipc_cluster(domain))
+		return TIPC_CLUSTER_SCOPE;
+	return TIPC_ZONE_SCOPE;
+}
+
+/**
+ * addr_domain - convert 2-bit scope value to equivalent message lookup domain
+ *  
+ * Needed when address of a named message must be looked up a second time 
+ * after a network hop.
+ */
+
+static inline int addr_domain(int sc)
+{
+	if (likely(sc == TIPC_NODE_SCOPE))
+		return tipc_own_addr;
+	if (sc == TIPC_CLUSTER_SCOPE)
+		return tipc_addr(tipc_zone(tipc_own_addr),
+				 tipc_cluster(tipc_own_addr), 0);
+	return tipc_addr(tipc_zone(tipc_own_addr), 0, 0);
+}
+
+static inline char *addr_string_fill(char *string, u32 addr)
+{
+	snprintf(string, 16, "<%u.%u.%u>",
+		 tipc_zone(addr), tipc_cluster(addr), tipc_node(addr));
+	return string;
+}
+
+int addr_domain_valid(u32);
+int addr_node_valid(u32 addr);
+
+#endif
diff --git a/net/tipc/bcast.c b/net/tipc/bcast.c
new file mode 100644
index 0000000..35ca906
--- /dev/null
+++ b/net/tipc/bcast.c
@@ -0,0 +1,803 @@
+/*
+ * net/tipc/bcast.c: TIPC broadcast code
+ *     
+ * Copyright (c) 2003-2005, Ericsson Research Canada
+ * Copyright (c) 2004, Intel Corporation.
+ * Copyright (c) 2005, Wind River Systems
+ * Copyright (c) 2005-2006, Ericsson AB
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without 
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this 
+ * list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright notice, 
+ * this list of conditions and the following disclaimer in the documentation 
+ * and/or other materials provided with the distribution.
+ * Neither the names of the copyright holders nor the names of its 
+ * contributors may be used to endorse or promote products derived from this 
+ * software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "core.h"
+#include "msg.h"
+#include "dbg.h"
+#include "link.h"
+#include "net.h"
+#include "node.h"
+#include "port.h"
+#include "addr.h"
+#include "node_subscr.h"
+#include "name_distr.h"
+#include "bearer.h"
+#include "name_table.h"
+#include "bcast.h"
+
+
+#define MAX_PKT_DEFAULT_MCAST 1500	/* bcast link max packet size (fixed) */
+
+#define BCLINK_WIN_DEFAULT 20		/* bcast link window size (default) */
+
+#define BCLINK_LOG_BUF_SIZE 0
+
+/**
+ * struct bcbearer_pair - a pair of bearers used by broadcast link
+ * @primary: pointer to primary bearer
+ * @secondary: pointer to secondary bearer
+ * 
+ * Bearers must have same priority and same set of reachable destinations 
+ * to be paired.
+ */
+
+struct bcbearer_pair {
+	struct bearer *primary;
+	struct bearer *secondary;
+};
+
+/**
+ * struct bcbearer - bearer used by broadcast link
+ * @bearer: (non-standard) broadcast bearer structure
+ * @media: (non-standard) broadcast media structure
+ * @bpairs: array of bearer pairs
+ * @bpairs_temp: array of bearer pairs used during creation of "bpairs"
+ */
+
+struct bcbearer {
+	struct bearer bearer;
+	struct media media;
+	struct bcbearer_pair bpairs[MAX_BEARERS];
+	struct bcbearer_pair bpairs_temp[TIPC_NUM_LINK_PRI];
+};
+
+/**
+ * struct bclink - link used for broadcast messages
+ * @link: (non-standard) broadcast link structure
+ * @node: (non-standard) node structure representing b'cast link's peer node
+ * 
+ * Handles sequence numbering, fragmentation, bundling, etc.
+ */
+
+struct bclink {
+	struct link link;
+	struct node node;
+};
+
+
+static struct bcbearer *bcbearer = NULL;
+static struct bclink *bclink = NULL;
+static struct link *bcl = NULL;
+static spinlock_t bc_lock = SPIN_LOCK_UNLOCKED;
+
+char bc_link_name[] = "multicast-link";
+
+
+static inline u32 buf_seqno(struct sk_buff *buf)
+{
+	return msg_seqno(buf_msg(buf));
+} 
+
+static inline u32 bcbuf_acks(struct sk_buff *buf)
+{
+	return (u32)TIPC_SKB_CB(buf)->handle;
+}
+
+static inline void bcbuf_set_acks(struct sk_buff *buf, u32 acks)
+{
+	TIPC_SKB_CB(buf)->handle = (void *)acks;
+}
+
+static inline void bcbuf_decr_acks(struct sk_buff *buf)
+{
+	bcbuf_set_acks(buf, bcbuf_acks(buf) - 1);
+}
+
+
+/** 
+ * bclink_set_gap - set gap according to contents of current deferred pkt queue
+ * 
+ * Called with 'node' locked, bc_lock unlocked
+ */
+
+static inline void bclink_set_gap(struct node *n_ptr)
+{
+	struct sk_buff *buf = n_ptr->bclink.deferred_head;
+
+	n_ptr->bclink.gap_after = n_ptr->bclink.gap_to =
+		mod(n_ptr->bclink.last_in);
+	if (unlikely(buf != NULL))
+		n_ptr->bclink.gap_to = mod(buf_seqno(buf) - 1);
+}
+
+/** 
+ * bclink_ack_allowed - test if ACK or NACK message can be sent at this moment
+ * 
+ * This mechanism endeavours to prevent all nodes in network from trying
+ * to ACK or NACK at the same time.
+ * 
+ * Note: TIPC uses a different trigger to distribute ACKs than it does to
+ *       distribute NACKs, but tries to use the same spacing (divide by 16). 
+ */
+
+static inline int bclink_ack_allowed(u32 n)
+{
+	return((n % TIPC_MIN_LINK_WIN) == tipc_own_tag);
+}
+
+
+/** 
+ * bclink_retransmit_pkt - retransmit broadcast packets
+ * @after: sequence number of last packet to *not* retransmit
+ * @to: sequence number of last packet to retransmit
+ * 
+ * Called with 'node' locked, bc_lock unlocked
+ */
+
+static void bclink_retransmit_pkt(u32 after, u32 to)
+{
+	struct sk_buff *buf;
+
+	spin_lock_bh(&bc_lock);
+	buf = bcl->first_out;
+	while (buf && less_eq(buf_seqno(buf), after)) {
+		buf = buf->next;                
+	}
+	if (buf != NULL)
+		link_retransmit(bcl, buf, mod(to - after));
+	spin_unlock_bh(&bc_lock);              
+}
+
+/** 
+ * bclink_acknowledge - handle acknowledgement of broadcast packets
+ * @n_ptr: node that sent acknowledgement info
+ * @acked: broadcast sequence # that has been acknowledged
+ * 
+ * Node is locked, bc_lock unlocked.
+ */
+
+void bclink_acknowledge(struct node *n_ptr, u32 acked)
+{
+	struct sk_buff *crs;
+	struct sk_buff *next;
+	unsigned int released = 0;
+
+	if (less_eq(acked, n_ptr->bclink.acked))
+		return;
+
+	spin_lock_bh(&bc_lock);
+
+	/* Skip over packets that node has previously acknowledged */
+
+	crs = bcl->first_out;
+	while (crs && less_eq(buf_seqno(crs), n_ptr->bclink.acked)) {
+		crs = crs->next;
+	}
+
+	/* Update packets that node is now acknowledging */
+
+	while (crs && less_eq(buf_seqno(crs), acked)) {
+		next = crs->next;
+		bcbuf_decr_acks(crs);
+		if (bcbuf_acks(crs) == 0) {
+			bcl->first_out = next;
+			bcl->out_queue_size--;
+			buf_discard(crs);
+			released = 1;
+		}
+		crs = next;
+	}
+	n_ptr->bclink.acked = acked;
+
+	/* Try resolving broadcast link congestion, if necessary */
+
+	if (unlikely(bcl->next_out))
+		link_push_queue(bcl);
+	if (unlikely(released && !list_empty(&bcl->waiting_ports)))
+		link_wakeup_ports(bcl, 0);
+	spin_unlock_bh(&bc_lock);
+}
+
+/** 
+ * bclink_send_ack - unicast an ACK msg
+ * 
+ * net_lock and node lock set
+ */
+
+static void bclink_send_ack(struct node *n_ptr)
+{
+	struct link *l_ptr = n_ptr->active_links[n_ptr->addr & 1];
+
+	if (l_ptr != NULL)
+		link_send_proto_msg(l_ptr, STATE_MSG, 0, 0, 0, 0, 0);
+}
+
+/** 
+ * bclink_send_nack- broadcast a NACK msg
+ * 
+ * net_lock and node lock set
+ */
+
+static void bclink_send_nack(struct node *n_ptr)
+{
+	struct sk_buff *buf;
+	struct tipc_msg *msg;
+
+	if (!less(n_ptr->bclink.gap_after, n_ptr->bclink.gap_to))
+		return;
+
+	buf = buf_acquire(INT_H_SIZE);
+	if (buf) {
+		msg = buf_msg(buf);
+		msg_init(msg, BCAST_PROTOCOL, STATE_MSG,
+			 TIPC_OK, INT_H_SIZE, n_ptr->addr);
+		msg_set_mc_netid(msg, tipc_net_id);
+		msg_set_bcast_ack(msg, mod(n_ptr->bclink.last_in)); 
+		msg_set_bcgap_after(msg, n_ptr->bclink.gap_after);
+		msg_set_bcgap_to(msg, n_ptr->bclink.gap_to);
+		msg_set_bcast_tag(msg, tipc_own_tag);
+
+		if (bearer_send(&bcbearer->bearer, buf, 0)) {
+			bcl->stats.sent_nacks++;
+			buf_discard(buf);
+		} else {
+			bearer_schedule(bcl->b_ptr, bcl);
+			bcl->proto_msg_queue = buf;
+			bcl->stats.bearer_congs++;
+		}
+
+		/* 
+		 * Ensure we doesn't send another NACK msg to the node
+		 * until 16 more deferred messages arrive from it
+		 * (i.e. helps prevent all nodes from NACK'ing at same time)
+		 */
+		
+		n_ptr->bclink.nack_sync = tipc_own_tag;
+	}
+}
+
+/** 
+ * bclink_check_gap - send a NACK if a sequence gap exists
+ *
+ * net_lock and node lock set
+ */
+
+void bclink_check_gap(struct node *n_ptr, u32 last_sent)
+{
+	if (!n_ptr->bclink.supported ||
+	    less_eq(last_sent, mod(n_ptr->bclink.last_in)))
+		return;
+
+	bclink_set_gap(n_ptr);
+	if (n_ptr->bclink.gap_after == n_ptr->bclink.gap_to)
+		n_ptr->bclink.gap_to = last_sent;
+	bclink_send_nack(n_ptr);
+}
+
+/** 
+ * bclink_peek_nack - process a NACK msg meant for another node
+ * 
+ * Only net_lock set.
+ */
+
+void bclink_peek_nack(u32 dest, u32 sender_tag, u32 gap_after, u32 gap_to)
+{
+	struct node *n_ptr = node_find(dest);
+	u32 my_after, my_to;
+
+	if (unlikely(!n_ptr || !node_is_up(n_ptr)))
+		return;
+	node_lock(n_ptr);
+	/*
+	 * Modify gap to suppress unnecessary NACKs from this node
+	 */
+	my_after = n_ptr->bclink.gap_after;
+	my_to = n_ptr->bclink.gap_to;
+
+	if (less_eq(gap_after, my_after)) {
+		if (less(my_after, gap_to) && less(gap_to, my_to))
+			n_ptr->bclink.gap_after = gap_to;
+		else if (less_eq(my_to, gap_to))
+			n_ptr->bclink.gap_to = n_ptr->bclink.gap_after;
+	} else if (less_eq(gap_after, my_to)) {
+		if (less_eq(my_to, gap_to))
+			n_ptr->bclink.gap_to = gap_after;
+	} else {
+		/* 
+		 * Expand gap if missing bufs not in deferred queue:
+		 */
+		struct sk_buff *buf = n_ptr->bclink.deferred_head;
+		u32 prev = n_ptr->bclink.gap_to;
+
+		for (; buf; buf = buf->next) {
+			u32 seqno = buf_seqno(buf);
+
+			if (mod(seqno - prev) != 1)
+				buf = NULL;
+			if (seqno == gap_after)
+				break;
+			prev = seqno;
+		}
+		if (buf == NULL)
+			n_ptr->bclink.gap_to = gap_after;
+	}
+	/*
+	 * Some nodes may send a complementary NACK now:
+	 */ 
+	if (bclink_ack_allowed(sender_tag + 1)) {
+		if (n_ptr->bclink.gap_to != n_ptr->bclink.gap_after) {
+			bclink_send_nack(n_ptr);
+			bclink_set_gap(n_ptr);
+		}
+	}
+	node_unlock(n_ptr);
+}
+
+/**
+ * bclink_send_msg - broadcast a packet to all nodes in cluster
+ */
+
+int bclink_send_msg(struct sk_buff *buf)
+{
+	int res;
+
+	spin_lock_bh(&bc_lock);
+
+	res = link_send_buf(bcl, buf);
+	if (unlikely(res == -ELINKCONG))
+		buf_discard(buf);
+	else
+		bcl->stats.sent_info++;
+
+	if (bcl->out_queue_size > bcl->stats.max_queue_sz)
+		bcl->stats.max_queue_sz = bcl->out_queue_size;
+	bcl->stats.queue_sz_counts++;
+	bcl->stats.accu_queue_sz += bcl->out_queue_size;
+
+	spin_unlock_bh(&bc_lock);
+	return res;
+}
+
+/**
+ * bclink_recv_pkt - receive a broadcast packet, and deliver upwards
+ * 
+ * net_lock is read_locked, no other locks set
+ */
+
+void bclink_recv_pkt(struct sk_buff *buf)
+{        
+	struct tipc_msg *msg = buf_msg(buf);
+	struct node* node = node_find(msg_prevnode(msg));
+	u32 next_in;
+	u32 seqno;
+	struct sk_buff *deferred;
+
+	msg_dbg(msg, "<BC<<<");
+
+	if (unlikely(!node || !node_is_up(node) || !node->bclink.supported || 
+		     (msg_mc_netid(msg) != tipc_net_id))) {
+		buf_discard(buf);
+		return;
+	}
+
+	if (unlikely(msg_user(msg) == BCAST_PROTOCOL)) {
+		msg_dbg(msg, "<BCNACK<<<");
+		if (msg_destnode(msg) == tipc_own_addr) {
+			node_lock(node);
+			bclink_acknowledge(node, msg_bcast_ack(msg));
+			node_unlock(node);
+			bcl->stats.recv_nacks++;
+			bclink_retransmit_pkt(msg_bcgap_after(msg),
+					      msg_bcgap_to(msg));
+		} else {
+			bclink_peek_nack(msg_destnode(msg),
+					 msg_bcast_tag(msg),
+					 msg_bcgap_after(msg),
+					 msg_bcgap_to(msg));
+		}
+		buf_discard(buf);
+		return;
+	}
+
+	node_lock(node);
+receive:
+	deferred = node->bclink.deferred_head;
+	next_in = mod(node->bclink.last_in + 1);
+	seqno = msg_seqno(msg);
+
+	if (likely(seqno == next_in)) {
+		bcl->stats.recv_info++;
+		node->bclink.last_in++;
+		bclink_set_gap(node);
+		if (unlikely(bclink_ack_allowed(seqno))) {
+			bclink_send_ack(node);
+			bcl->stats.sent_acks++;
+		}
+		if (likely(msg_isdata(msg))) {
+			node_unlock(node);
+			port_recv_mcast(buf, NULL);
+		} else if (msg_user(msg) == MSG_BUNDLER) {
+			bcl->stats.recv_bundles++;
+			bcl->stats.recv_bundled += msg_msgcnt(msg);
+			node_unlock(node);
+			link_recv_bundle(buf);
+		} else if (msg_user(msg) == MSG_FRAGMENTER) {
+			bcl->stats.recv_fragments++;
+			if (link_recv_fragment(&node->bclink.defragm,
+					       &buf, &msg))
+				bcl->stats.recv_fragmented++;
+			node_unlock(node);
+			net_route_msg(buf);
+		} else {
+			node_unlock(node);
+			net_route_msg(buf);
+		}
+		if (deferred && (buf_seqno(deferred) == mod(next_in + 1))) {
+			node_lock(node);
+			buf = deferred;
+			msg = buf_msg(buf);
+			node->bclink.deferred_head = deferred->next;
+			goto receive;
+		}
+		return;
+	} else if (less(next_in, seqno)) {
+		u32 gap_after = node->bclink.gap_after;
+		u32 gap_to = node->bclink.gap_to;
+
+		if (link_defer_pkt(&node->bclink.deferred_head,
+				   &node->bclink.deferred_tail,
+				   buf)) {
+			node->bclink.nack_sync++;
+			bcl->stats.deferred_recv++;
+			if (seqno == mod(gap_after + 1))
+				node->bclink.gap_after = seqno;
+			else if (less(gap_after, seqno) && less(seqno, gap_to))
+				node->bclink.gap_to = seqno;
+		}
+		if (bclink_ack_allowed(node->bclink.nack_sync)) {
+			if (gap_to != gap_after)
+				bclink_send_nack(node);
+			bclink_set_gap(node);
+		}
+	} else {
+		bcl->stats.duplicates++;
+		buf_discard(buf);
+	}
+	node_unlock(node);
+}
+
+u32 bclink_get_last_sent(void)
+{
+	u32 last_sent = mod(bcl->next_out_no - 1);
+
+	if (bcl->next_out)
+		last_sent = mod(buf_seqno(bcl->next_out) - 1);
+	return last_sent;
+}
+
+u32 bclink_acks_missing(struct node *n_ptr)
+{
+	return (n_ptr->bclink.supported &&
+		(bclink_get_last_sent() != n_ptr->bclink.acked));
+}
+
+
+/**
+ * bcbearer_send - send a packet through the broadcast pseudo-bearer
+ * 
+ * Send through as many bearers as necessary to reach all nodes
+ * that support TIPC multicasting.
+ * 
+ * Returns 0 if packet sent successfully, non-zero if not
+ */
+
+int bcbearer_send(struct sk_buff *buf,
+		  struct tipc_bearer *unused1,
+		  struct tipc_media_addr *unused2)
+{
+	static int send_count = 0;
+
+	struct node_map remains;
+	struct node_map remains_new;
+	int bp_index;
+	int swap_time;
+
+	/* Prepare buffer for broadcasting (if first time trying to send it) */
+
+	if (likely(!msg_non_seq(buf_msg(buf)))) {
+		struct tipc_msg *msg;
+
+		assert(cluster_bcast_nodes.count != 0);
+		bcbuf_set_acks(buf, cluster_bcast_nodes.count);
+		msg = buf_msg(buf);
+		msg_set_non_seq(msg);
+		msg_set_mc_netid(msg, tipc_net_id);
+	}
+
+	/* Determine if bearer pairs should be swapped following this attempt */
+
+	if ((swap_time = (++send_count >= 10)))
+		send_count = 0;
+
+	/* Send buffer over bearers until all targets reached */
+	
+	remains = cluster_bcast_nodes;
+
+	for (bp_index = 0; bp_index < MAX_BEARERS; bp_index++) {
+		struct bearer *p = bcbearer->bpairs[bp_index].primary;
+		struct bearer *s = bcbearer->bpairs[bp_index].secondary;
+
+		if (!p)
+			break;	/* no more bearers to try */
+
+		nmap_diff(&remains, &p->nodes, &remains_new);
+		if (remains_new.count == remains.count)
+			continue;	/* bearer pair doesn't add anything */
+
+		if (!p->publ.blocked &&
+		    !p->media->send_msg(buf, &p->publ, &p->media->bcast_addr)) {
+			if (swap_time && s && !s->publ.blocked)
+				goto swap;
+			else
+				goto update;
+		}
+
+		if (!s || s->publ.blocked ||
+		    s->media->send_msg(buf, &s->publ, &s->media->bcast_addr))
+			continue;	/* unable to send using bearer pair */
+swap:
+		bcbearer->bpairs[bp_index].primary = s;
+		bcbearer->bpairs[bp_index].secondary = p;
+update:
+		if (remains_new.count == 0)
+			return TIPC_OK;
+
+		remains = remains_new;
+	}
+	
+	/* Unable to reach all targets */
+
+	bcbearer->bearer.publ.blocked = 1;
+	bcl->stats.bearer_congs++;
+	return ~TIPC_OK;
+}
+
+/**
+ * bcbearer_sort - create sets of bearer pairs used by broadcast bearer
+ */
+
+void bcbearer_sort(void)
+{
+	struct bcbearer_pair *bp_temp = bcbearer->bpairs_temp;
+	struct bcbearer_pair *bp_curr;
+	int b_index;
+	int pri;
+
+	spin_lock_bh(&bc_lock);
+
+	/* Group bearers by priority (can assume max of two per priority) */
+
+	memset(bp_temp, 0, sizeof(bcbearer->bpairs_temp));
+
+	for (b_index = 0; b_index < MAX_BEARERS; b_index++) {
+		struct bearer *b = &bearers[b_index];
+
+		if (!b->active || !b->nodes.count)
+			continue;
+
+		if (!bp_temp[b->priority].primary)
+			bp_temp[b->priority].primary = b;
+		else
+			bp_temp[b->priority].secondary = b;
+	}
+
+	/* Create array of bearer pairs for broadcasting */
+
+	bp_curr = bcbearer->bpairs;
+	memset(bcbearer->bpairs, 0, sizeof(bcbearer->bpairs));
+
+	for (pri = (TIPC_NUM_LINK_PRI - 1); pri >= 0; pri--) {
+
+		if (!bp_temp[pri].primary)
+			continue;
+
+		bp_curr->primary = bp_temp[pri].primary;
+
+		if (bp_temp[pri].secondary) {
+			if (nmap_equal(&bp_temp[pri].primary->nodes,
+				       &bp_temp[pri].secondary->nodes)) {
+				bp_curr->secondary = bp_temp[pri].secondary;
+			} else {
+				bp_curr++;
+				bp_curr->primary = bp_temp[pri].secondary;
+			}
+		}
+
+		bp_curr++;
+	}
+
+	spin_unlock_bh(&bc_lock);
+}
+
+/**
+ * bcbearer_push - resolve bearer congestion
+ * 
+ * Forces bclink to push out any unsent packets, until all packets are gone
+ * or congestion reoccurs.
+ * No locks set when function called
+ */
+
+void bcbearer_push(void)
+{
+	struct bearer *b_ptr;
+
+	spin_lock_bh(&bc_lock);
+	b_ptr = &bcbearer->bearer;
+	if (b_ptr->publ.blocked) {
+		b_ptr->publ.blocked = 0;
+		bearer_lock_push(b_ptr);
+	}
+	spin_unlock_bh(&bc_lock);
+}
+
+
+int bclink_stats(char *buf, const u32 buf_size)
+{
+	struct print_buf pb;
+
+	if (!bcl)
+		return 0;
+
+	printbuf_init(&pb, buf, buf_size);
+
+	spin_lock_bh(&bc_lock);
+
+	tipc_printf(&pb, "Link <%s>\n"
+		         "  Window:%u packets\n", 
+		    bcl->name, bcl->queue_limit[0]);
+	tipc_printf(&pb, "  RX packets:%u fragments:%u/%u bundles:%u/%u\n", 
+		    bcl->stats.recv_info,
+		    bcl->stats.recv_fragments,
+		    bcl->stats.recv_fragmented,
+		    bcl->stats.recv_bundles,
+		    bcl->stats.recv_bundled);
+	tipc_printf(&pb, "  TX packets:%u fragments:%u/%u bundles:%u/%u\n", 
+		    bcl->stats.sent_info,
+		    bcl->stats.sent_fragments,
+		    bcl->stats.sent_fragmented, 
+		    bcl->stats.sent_bundles,
+		    bcl->stats.sent_bundled);
+	tipc_printf(&pb, "  RX naks:%u defs:%u dups:%u\n", 
+		    bcl->stats.recv_nacks,
+		    bcl->stats.deferred_recv, 
+		    bcl->stats.duplicates);
+	tipc_printf(&pb, "  TX naks:%u acks:%u dups:%u\n", 
+		    bcl->stats.sent_nacks, 
+		    bcl->stats.sent_acks, 
+		    bcl->stats.retransmitted);
+	tipc_printf(&pb, "  Congestion bearer:%u link:%u  Send queue max:%u avg:%u\n",
+		    bcl->stats.bearer_congs,
+		    bcl->stats.link_congs,
+		    bcl->stats.max_queue_sz,
+		    bcl->stats.queue_sz_counts
+		    ? (bcl->stats.accu_queue_sz / bcl->stats.queue_sz_counts)
+		    : 0);
+
+	spin_unlock_bh(&bc_lock);
+	return printbuf_validate(&pb);
+}
+
+int bclink_reset_stats(void)
+{
+	if (!bcl)
+		return -ENOPROTOOPT;
+
+	spin_lock_bh(&bc_lock);
+	memset(&bcl->stats, 0, sizeof(bcl->stats));
+	spin_unlock_bh(&bc_lock);
+	return TIPC_OK;
+}
+
+int bclink_set_queue_limits(u32 limit)
+{
+	if (!bcl)
+		return -ENOPROTOOPT;
+	if ((limit < TIPC_MIN_LINK_WIN) || (limit > TIPC_MAX_LINK_WIN))
+		return -EINVAL;
+
+	spin_lock_bh(&bc_lock);
+	link_set_queue_limits(bcl, limit);
+	spin_unlock_bh(&bc_lock);
+	return TIPC_OK;
+}
+
+int bclink_init(void)
+{
+	bcbearer = kmalloc(sizeof(*bcbearer), GFP_ATOMIC);
+	bclink = kmalloc(sizeof(*bclink), GFP_ATOMIC);
+	if (!bcbearer || !bclink) {
+ nomem:
+	 	warn("Memory squeeze; Failed to create multicast link\n");
+		kfree(bcbearer);
+		bcbearer = NULL;
+		kfree(bclink);
+		bclink = NULL;
+		return -ENOMEM;
+	}
+
+	memset(bcbearer, 0, sizeof(struct bcbearer));
+	INIT_LIST_HEAD(&bcbearer->bearer.cong_links);
+	bcbearer->bearer.media = &bcbearer->media;
+	bcbearer->media.send_msg = bcbearer_send;
+	sprintf(bcbearer->media.name, "tipc-multicast");
+
+	bcl = &bclink->link;
+	memset(bclink, 0, sizeof(struct bclink));
+	INIT_LIST_HEAD(&bcl->waiting_ports);
+	bcl->next_out_no = 1;
+	bclink->node.lock =  SPIN_LOCK_UNLOCKED;        
+	bcl->owner = &bclink->node;
+        bcl->max_pkt = MAX_PKT_DEFAULT_MCAST;
+	link_set_queue_limits(bcl, BCLINK_WIN_DEFAULT);
+	bcl->b_ptr = &bcbearer->bearer;
+	bcl->state = WORKING_WORKING;
+	sprintf(bcl->name, bc_link_name);
+
+	if (BCLINK_LOG_BUF_SIZE) {
+		char *pb = kmalloc(BCLINK_LOG_BUF_SIZE, GFP_ATOMIC);
+
+		if (!pb)
+			goto nomem;
+		printbuf_init(&bcl->print_buf, pb, BCLINK_LOG_BUF_SIZE);
+	}
+
+	return TIPC_OK;
+}
+
+void bclink_stop(void)
+{
+	spin_lock_bh(&bc_lock);
+	if (bcbearer) {
+		link_stop(bcl);
+		if (BCLINK_LOG_BUF_SIZE)
+			kfree(bcl->print_buf.buf);
+		bcl = NULL;
+		kfree(bclink);
+		bclink = NULL;
+		kfree(bcbearer);
+		bcbearer = NULL;
+	}
+	spin_unlock_bh(&bc_lock);
+}
+
diff --git a/net/tipc/bcast.h b/net/tipc/bcast.h
new file mode 100644
index 0000000..cc2ede1
--- /dev/null
+++ b/net/tipc/bcast.h
@@ -0,0 +1,220 @@
+/*
+ * net/tipc/bcast.h: Include file for TIPC broadcast code
+ * 
+ * Copyright (c) 2003-2005, Ericsson Research Canada
+ * Copyright (c) 2005, Wind River Systems
+ * Copyright (c) 2005-2006, Ericsson AB
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without 
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this 
+ * list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright notice, 
+ * this list of conditions and the following disclaimer in the documentation 
+ * and/or other materials provided with the distribution.
+ * Neither the names of the copyright holders nor the names of its 
+ * contributors may be used to endorse or promote products derived from this 
+ * software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _TIPC_BCAST_H
+#define _TIPC_BCAST_H
+
+#define MAX_NODES 4096
+#define WSIZE 32
+
+/**
+ * struct node_map - set of node identifiers
+ * @count: # of nodes in set
+ * @map: bitmap of node identifiers that are in the set
+ */
+
+struct node_map {
+	u32 count;
+	u32 map[MAX_NODES / WSIZE];
+};
+
+
+#define PLSIZE 32
+
+/**
+ * struct port_list - set of node local destination ports
+ * @count: # of ports in set (only valid for first entry in list)
+ * @next: pointer to next entry in list
+ * @ports: array of port references
+ */
+
+struct port_list {
+	int count;
+	struct port_list *next;
+	u32 ports[PLSIZE];
+};
+
+
+struct node;
+
+extern char bc_link_name[];
+
+
+/**
+ * nmap_get - determine if node exists in a node map
+ */
+
+static inline int nmap_get(struct node_map *nm_ptr, u32 node)
+{
+	int n = tipc_node(node);
+	int w = n / WSIZE;
+	int b = n % WSIZE;
+
+	return nm_ptr->map[w] & (1 << b);
+}
+
+/**
+ * nmap_add - add a node to a node map
+ */
+
+static inline void nmap_add(struct node_map *nm_ptr, u32 node)
+{
+	int n = tipc_node(node);
+	int w = n / WSIZE;
+	u32 mask = (1 << (n % WSIZE));
+
+	if ((nm_ptr->map[w] & mask) == 0) {
+		nm_ptr->count++;
+		nm_ptr->map[w] |= mask;
+	}
+}
+
+/** 
+ * nmap_remove - remove a node from a node map
+ */
+
+static inline void nmap_remove(struct node_map *nm_ptr, u32 node)
+{
+	int n = tipc_node(node);
+	int w = n / WSIZE;
+	u32 mask = (1 << (n % WSIZE));
+
+	if ((nm_ptr->map[w] & mask) != 0) {
+		nm_ptr->map[w] &= ~mask;
+		nm_ptr->count--;
+	}
+}
+
+/**
+ * nmap_equal - test for equality of node maps
+ */
+
+static inline int nmap_equal(struct node_map *nm_a, struct node_map *nm_b)
+{
+	return !memcmp(nm_a, nm_b, sizeof(*nm_a));
+}
+
+/**
+ * nmap_diff - find differences between node maps
+ * @nm_a: input node map A
+ * @nm_b: input node map B
+ * @nm_diff: output node map A-B (i.e. nodes of A that are not in B)
+ */
+
+static inline void nmap_diff(struct node_map *nm_a, struct node_map *nm_b,
+			     struct node_map *nm_diff)
+{
+	int stop = sizeof(nm_a->map) / sizeof(u32);
+	int w;
+	int b;
+	u32 map;
+
+	memset(nm_diff, 0, sizeof(*nm_diff));
+	for (w = 0; w < stop; w++) {
+		map = nm_a->map[w] ^ (nm_a->map[w] & nm_b->map[w]);
+		nm_diff->map[w] = map;
+		if (map != 0) {
+			for (b = 0 ; b < WSIZE; b++) {
+				if (map & (1 << b))
+					nm_diff->count++;
+			}
+		}
+	}
+}
+
+/**
+ * port_list_add - add a port to a port list, ensuring no duplicates
+ */
+
+static inline void port_list_add(struct port_list *pl_ptr, u32 port)
+{
+	struct port_list *item = pl_ptr;
+	int i;
+	int item_sz = PLSIZE;
+	int cnt = pl_ptr->count;
+
+	for (; ; cnt -= item_sz, item = item->next) {
+		if (cnt < PLSIZE)
+			item_sz = cnt;
+		for (i = 0; i < item_sz; i++)
+			if (item->ports[i] == port)
+				return;
+		if (i < PLSIZE) {
+			item->ports[i] = port;
+			pl_ptr->count++;
+			return;
+		}
+		if (!item->next) {
+			item->next = kmalloc(sizeof(*item), GFP_ATOMIC);
+			if (!item->next) {
+				warn("Memory squeeze: multicast destination port list is incomplete\n");
+				return;
+			}
+			item->next->next = NULL;
+		}
+	}
+}
+
+/**
+ * port_list_free - free dynamically created entries in port_list chain
+ * 
+ * Note: First item is on stack, so it doesn't need to be released
+ */
+
+static inline void port_list_free(struct port_list *pl_ptr)
+{
+	struct port_list *item;
+	struct port_list *next;
+
+	for (item = pl_ptr->next; item; item = next) {
+		next = item->next;
+		kfree(item);
+	}
+}
+
+
+int  bclink_init(void);
+void bclink_stop(void);
+void bclink_acknowledge(struct node *n_ptr, u32 acked);
+int  bclink_send_msg(struct sk_buff *buf);
+void bclink_recv_pkt(struct sk_buff *buf);
+u32  bclink_get_last_sent(void);
+u32  bclink_acks_missing(struct node *n_ptr);
+void bclink_check_gap(struct node *n_ptr, u32 seqno);
+int  bclink_stats(char *stats_buf, const u32 buf_size);
+int  bclink_reset_stats(void);
+int  bclink_set_queue_limits(u32 limit);
+void bcbearer_sort(void);
+void bcbearer_push(void);
+
+#endif
diff --git a/net/tipc/bearer.c b/net/tipc/bearer.c
new file mode 100644
index 0000000..c19465c
--- /dev/null
+++ b/net/tipc/bearer.c
@@ -0,0 +1,689 @@
+/*
+ * net/tipc/bearer.c: TIPC bearer code
+ * 
+ * Copyright (c) 2003-2005, Ericsson Research Canada
+ * Copyright (c) 2004-2005, Wind River Systems
+ * Copyright (c) 2005-2006, Ericsson AB
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without 
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this 
+ * list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright notice, 
+ * this list of conditions and the following disclaimer in the documentation 
+ * and/or other materials provided with the distribution.
+ * Neither the names of the copyright holders nor the names of its 
+ * contributors may be used to endorse or promote products derived from this 
+ * software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "core.h"
+#include "config.h"
+#include "dbg.h"
+#include "bearer.h"
+#include "link.h"
+#include "port.h"
+#include "discover.h"
+#include "bcast.h"
+
+#define MAX_ADDR_STR 32
+
+static struct media *media_list = 0;
+static u32 media_count = 0;
+
+struct bearer *bearers = 0;
+
+/**
+ * media_name_valid - validate media name
+ * 
+ * Returns 1 if media name is valid, otherwise 0.
+ */
+
+static int media_name_valid(const char *name)
+{
+	u32 len;
+
+	len = strlen(name);
+	if ((len + 1) > TIPC_MAX_MEDIA_NAME)
+		return 0;
+	return (strspn(name, tipc_alphabet) == len);
+}
+
+/**
+ * media_find - locates specified media object by name
+ */
+
+static struct media *media_find(const char *name)
+{
+	struct media *m_ptr;
+	u32 i;
+
+	for (i = 0, m_ptr = media_list; i < media_count; i++, m_ptr++) {
+		if (!strcmp(m_ptr->name, name))
+			return m_ptr;
+	}
+	return 0;
+}
+
+/**
+ * tipc_register_media - register a media type
+ * 
+ * Bearers for this media type must be activated separately at a later stage.
+ */
+
+int  tipc_register_media(u32 media_type,
+			 char *name, 
+			 int (*enable)(struct tipc_bearer *), 
+			 void (*disable)(struct tipc_bearer *), 
+			 int (*send_msg)(struct sk_buff *, 
+					 struct tipc_bearer *,
+					 struct tipc_media_addr *), 
+			 char *(*addr2str)(struct tipc_media_addr *a,
+					   char *str_buf, int str_size),
+			 struct tipc_media_addr *bcast_addr,
+			 const u32 bearer_priority,
+			 const u32 link_tolerance,  /* [ms] */
+			 const u32 send_window_limit)
+{
+	struct media *m_ptr;
+	u32 media_id;
+	u32 i;
+	int res = -EINVAL;
+
+	write_lock_bh(&net_lock);
+	if (!media_list)
+		goto exit;
+
+	if (!media_name_valid(name)) {
+		warn("Media registration error: illegal name <%s>\n", name);
+		goto exit;
+	}
+	if (!bcast_addr) {
+		warn("Media registration error: no broadcast address supplied\n");
+		goto exit;
+	}
+	if (bearer_priority >= TIPC_NUM_LINK_PRI) {
+		warn("Media registration error: priority %u\n", bearer_priority);
+		goto exit;
+	}
+	if ((link_tolerance < TIPC_MIN_LINK_TOL) || 
+	    (link_tolerance > TIPC_MAX_LINK_TOL)) {
+		warn("Media registration error: tolerance %u\n", link_tolerance);
+		goto exit;
+	}
+
+	media_id = media_count++;
+	if (media_id >= MAX_MEDIA) {
+		warn("Attempt to register more than %u media\n", MAX_MEDIA);
+		media_count--;
+		goto exit;
+	}
+	for (i = 0; i < media_id; i++) {
+		if (media_list[i].type_id == media_type) {
+			warn("Attempt to register second media with type %u\n", 
+			     media_type);
+			media_count--;
+			goto exit;
+		}
+		if (!strcmp(name, media_list[i].name)) {
+			warn("Attempt to re-register media name <%s>\n", name);
+			media_count--;
+			goto exit;
+		}
+	}
+
+	m_ptr = &media_list[media_id];
+	m_ptr->type_id = media_type;
+	m_ptr->send_msg = send_msg;
+	m_ptr->enable_bearer = enable;
+	m_ptr->disable_bearer = disable;
+	m_ptr->addr2str = addr2str;
+	memcpy(&m_ptr->bcast_addr, bcast_addr, sizeof(*bcast_addr));
+	m_ptr->bcast = 1;
+	strcpy(m_ptr->name, name);
+	m_ptr->priority = bearer_priority;
+	m_ptr->tolerance = link_tolerance;
+	m_ptr->window = send_window_limit;
+	dbg("Media <%s> registered\n", name);
+	res = 0;
+exit:
+	write_unlock_bh(&net_lock);
+	return res;
+}
+
+/**
+ * media_addr_printf - record media address in print buffer
+ */
+
+void media_addr_printf(struct print_buf *pb, struct tipc_media_addr *a)
+{
+	struct media *m_ptr;
+	u32 media_type;
+	u32 i;
+
+	media_type = ntohl(a->type);
+	for (i = 0, m_ptr = media_list; i < media_count; i++, m_ptr++) {
+		if (m_ptr->type_id == media_type)
+			break;
+	}
+
+	if ((i < media_count) && (m_ptr->addr2str != NULL)) {
+		char addr_str[MAX_ADDR_STR];
+
+		tipc_printf(pb, "%s(%s) ", m_ptr->name, 
+			    m_ptr->addr2str(a, addr_str, sizeof(addr_str)));
+	} else {
+		unchar *addr = (unchar *)&a->dev_addr;
+
+		tipc_printf(pb, "UNKNOWN(%u):", media_type);
+		for (i = 0; i < (sizeof(*a) - sizeof(a->type)); i++) {
+			tipc_printf(pb, "%02x ", addr[i]);
+		}
+	}
+}
+
+/**
+ * media_get_names - record names of registered media in buffer
+ */
+
+struct sk_buff *media_get_names(void)
+{
+	struct sk_buff *buf;
+	struct media *m_ptr;
+	int i;
+
+	buf = cfg_reply_alloc(MAX_MEDIA * TLV_SPACE(TIPC_MAX_MEDIA_NAME));
+	if (!buf)
+		return NULL;
+
+	read_lock_bh(&net_lock);
+	for (i = 0, m_ptr = media_list; i < media_count; i++, m_ptr++) {
+		cfg_append_tlv(buf, TIPC_TLV_MEDIA_NAME, m_ptr->name, 
+			       strlen(m_ptr->name) + 1);
+	}
+	read_unlock_bh(&net_lock);
+	return buf;
+}
+
+/**
+ * bearer_name_validate - validate & (optionally) deconstruct bearer name
+ * @name - ptr to bearer name string
+ * @name_parts - ptr to area for bearer name components (or NULL if not needed)
+ * 
+ * Returns 1 if bearer name is valid, otherwise 0.
+ */
+
+static int bearer_name_validate(const char *name, 
+				struct bearer_name *name_parts)
+{
+	char name_copy[TIPC_MAX_BEARER_NAME];
+	char *media_name;
+	char *if_name;
+	u32 media_len;
+	u32 if_len;
+
+	/* copy bearer name & ensure length is OK */
+
+	name_copy[TIPC_MAX_BEARER_NAME - 1] = 0;
+	/* need above in case non-Posix strncpy() doesn't pad with nulls */
+	strncpy(name_copy, name, TIPC_MAX_BEARER_NAME);
+	if (name_copy[TIPC_MAX_BEARER_NAME - 1] != 0)
+		return 0;
+
+	/* ensure all component parts of bearer name are present */
+
+	media_name = name_copy;
+	if ((if_name = strchr(media_name, ':')) == NULL)
+		return 0;
+	*(if_name++) = 0;
+	media_len = if_name - media_name;
+	if_len = strlen(if_name) + 1;
+
+	/* validate component parts of bearer name */
+
+	if ((media_len <= 1) || (media_len > TIPC_MAX_MEDIA_NAME) || 
+	    (if_len <= 1) || (if_len > TIPC_MAX_IF_NAME) || 
+	    (strspn(media_name, tipc_alphabet) != (media_len - 1)) ||
+	    (strspn(if_name, tipc_alphabet) != (if_len - 1)))
+		return 0;
+
+	/* return bearer name components, if necessary */
+
+	if (name_parts) {
+		strcpy(name_parts->media_name, media_name);
+		strcpy(name_parts->if_name, if_name);
+	}
+	return 1;
+}
+
+/**
+ * bearer_find - locates bearer object with matching bearer name
+ */
+
+static struct bearer *bearer_find(const char *name)
+{
+	struct bearer *b_ptr;
+	u32 i;
+
+	for (i = 0, b_ptr = bearers; i < MAX_BEARERS; i++, b_ptr++) {
+		if (b_ptr->active && (!strcmp(b_ptr->publ.name, name)))
+			return b_ptr;
+	}
+	return 0;
+}
+
+/**
+ * bearer_find - locates bearer object with matching interface name
+ */
+
+struct bearer *bearer_find_interface(const char *if_name)
+{
+	struct bearer *b_ptr;
+	char *b_if_name;
+	u32 i;
+
+	for (i = 0, b_ptr = bearers; i < MAX_BEARERS; i++, b_ptr++) {
+		if (!b_ptr->active)
+			continue;
+		b_if_name = strchr(b_ptr->publ.name, ':') + 1;
+		if (!strcmp(b_if_name, if_name))
+			return b_ptr;
+	}
+	return 0;
+}
+
+/**
+ * bearer_get_names - record names of bearers in buffer
+ */
+
+struct sk_buff *bearer_get_names(void)
+{
+	struct sk_buff *buf;
+	struct media *m_ptr;
+	struct bearer *b_ptr;
+	int i, j;
+
+	buf = cfg_reply_alloc(MAX_BEARERS * TLV_SPACE(TIPC_MAX_BEARER_NAME));
+	if (!buf)
+		return NULL;
+
+	read_lock_bh(&net_lock);
+	for (i = 0, m_ptr = media_list; i < media_count; i++, m_ptr++) {
+		for (j = 0; j < MAX_BEARERS; j++) {
+			b_ptr = &bearers[j];
+			if (b_ptr->active && (b_ptr->media == m_ptr)) {
+				cfg_append_tlv(buf, TIPC_TLV_BEARER_NAME, 
+					       b_ptr->publ.name, 
+					       strlen(b_ptr->publ.name) + 1);
+			}
+		}
+	}
+	read_unlock_bh(&net_lock);
+	return buf;
+}
+
+void bearer_add_dest(struct bearer *b_ptr, u32 dest)
+{
+	nmap_add(&b_ptr->nodes, dest);
+	disc_update_link_req(b_ptr->link_req);
+	bcbearer_sort();
+}
+
+void bearer_remove_dest(struct bearer *b_ptr, u32 dest)
+{
+	nmap_remove(&b_ptr->nodes, dest);
+	disc_update_link_req(b_ptr->link_req);
+	bcbearer_sort();
+}
+
+/*
+ * bearer_push(): Resolve bearer congestion. Force the waiting
+ * links to push out their unsent packets, one packet per link
+ * per iteration, until all packets are gone or congestion reoccurs.
+ * 'net_lock' is read_locked when this function is called
+ * bearer.lock must be taken before calling
+ * Returns binary true(1) ore false(0)
+ */
+static int bearer_push(struct bearer *b_ptr)
+{
+	u32 res = TIPC_OK;
+	struct link *ln, *tln;
+
+	if (b_ptr->publ.blocked)
+		return 0;
+
+	while (!list_empty(&b_ptr->cong_links) && (res != PUSH_FAILED)) {
+		list_for_each_entry_safe(ln, tln, &b_ptr->cong_links, link_list) {
+			res = link_push_packet(ln);
+			if (res == PUSH_FAILED)
+				break;
+			if (res == PUSH_FINISHED)
+				list_move_tail(&ln->link_list, &b_ptr->links);
+		}
+	}
+	return list_empty(&b_ptr->cong_links);
+}
+
+void bearer_lock_push(struct bearer *b_ptr)
+{
+	int res;
+
+	spin_lock_bh(&b_ptr->publ.lock);
+	res = bearer_push(b_ptr);
+	spin_unlock_bh(&b_ptr->publ.lock);
+	if (res)
+		bcbearer_push();
+}
+
+
+/*
+ * Interrupt enabling new requests after bearer congestion or blocking:    
+ * See bearer_send().   
+ */
+void tipc_continue(struct tipc_bearer *tb_ptr)
+{
+	struct bearer *b_ptr = (struct bearer *)tb_ptr;
+
+	spin_lock_bh(&b_ptr->publ.lock);
+	b_ptr->continue_count++;
+	if (!list_empty(&b_ptr->cong_links))
+		k_signal((Handler)bearer_lock_push, (unsigned long)b_ptr);
+	b_ptr->publ.blocked = 0;
+	spin_unlock_bh(&b_ptr->publ.lock);
+}
+
+/*
+ * Schedule link for sending of messages after the bearer 
+ * has been deblocked by 'continue()'. This method is called 
+ * when somebody tries to send a message via this link while 
+ * the bearer is congested. 'net_lock' is in read_lock here
+ * bearer.lock is busy
+ */
+
+static void bearer_schedule_unlocked(struct bearer *b_ptr, struct link *l_ptr)
+{
+	list_move_tail(&l_ptr->link_list, &b_ptr->cong_links);
+}
+
+/*
+ * Schedule link for sending of messages after the bearer 
+ * has been deblocked by 'continue()'. This method is called 
+ * when somebody tries to send a message via this link while 
+ * the bearer is congested. 'net_lock' is in read_lock here,
+ * bearer.lock is free
+ */
+
+void bearer_schedule(struct bearer *b_ptr, struct link *l_ptr)
+{
+	spin_lock_bh(&b_ptr->publ.lock);
+	bearer_schedule_unlocked(b_ptr, l_ptr);
+	spin_unlock_bh(&b_ptr->publ.lock);
+}
+
+
+/*
+ * bearer_resolve_congestion(): Check if there is bearer congestion,
+ * and if there is, try to resolve it before returning.
+ * 'net_lock' is read_locked when this function is called
+ */
+int bearer_resolve_congestion(struct bearer *b_ptr, struct link *l_ptr)
+{
+	int res = 1;
+
+	if (list_empty(&b_ptr->cong_links))
+		return 1;
+	spin_lock_bh(&b_ptr->publ.lock);
+	if (!bearer_push(b_ptr)) {
+		bearer_schedule_unlocked(b_ptr, l_ptr);
+		res = 0;
+	}
+	spin_unlock_bh(&b_ptr->publ.lock);
+	return res;
+}
+
+
+/**
+ * tipc_enable_bearer - enable bearer with the given name
+ */              
+
+int tipc_enable_bearer(const char *name, u32 bcast_scope, u32 priority)
+{
+	struct bearer *b_ptr;
+	struct media *m_ptr;
+	struct bearer_name b_name;
+	char addr_string[16];
+	u32 bearer_id;
+	u32 with_this_prio;
+	u32 i;
+	int res = -EINVAL;
+
+	if (tipc_mode != TIPC_NET_MODE)
+		return -ENOPROTOOPT;
+	if (!bearer_name_validate(name, &b_name) ||
+	    !addr_domain_valid(bcast_scope) ||
+	    !in_scope(bcast_scope, tipc_own_addr) ||
+	    (priority > TIPC_NUM_LINK_PRI))
+		return -EINVAL;
+
+	write_lock_bh(&net_lock);
+	if (!bearers)
+		goto failed;
+
+	m_ptr = media_find(b_name.media_name);
+	if (!m_ptr) {
+		warn("No media <%s>\n", b_name.media_name);
+		goto failed;
+	}
+	if (priority == TIPC_NUM_LINK_PRI)
+		priority = m_ptr->priority;
+
+restart:
+	bearer_id = MAX_BEARERS;
+	with_this_prio = 1;
+	for (i = MAX_BEARERS; i-- != 0; ) {
+		if (!bearers[i].active) {
+			bearer_id = i;
+			continue;
+		}
+		if (!strcmp(name, bearers[i].publ.name)) {
+			warn("Bearer <%s> already enabled\n", name);
+			goto failed;
+		}
+		if ((bearers[i].priority == priority) &&
+		    (++with_this_prio > 2)) {
+			if (priority-- == 0) {
+				warn("Third bearer <%s> with priority %u, unable to lower to %u\n",
+				     name, priority + 1, priority);
+				goto failed;
+			}
+			warn("Third bearer <%s> with priority %u, lowering to %u\n",
+			     name, priority + 1, priority);
+			goto restart;
+		}
+	}
+	if (bearer_id >= MAX_BEARERS) {
+		warn("Attempt to enable more than %d bearers\n", MAX_BEARERS);
+		goto failed;
+	}
+
+	b_ptr = &bearers[bearer_id];
+	memset(b_ptr, 0, sizeof(struct bearer));
+
+	strcpy(b_ptr->publ.name, name);
+	res = m_ptr->enable_bearer(&b_ptr->publ);
+	if (res) {
+		warn("Failed to enable bearer <%s>\n", name);
+		goto failed;
+	}
+
+	b_ptr->identity = bearer_id;
+	b_ptr->media = m_ptr;
+	b_ptr->net_plane = bearer_id + 'A';
+	b_ptr->active = 1;
+	b_ptr->detect_scope = bcast_scope;
+	b_ptr->priority = priority;
+	INIT_LIST_HEAD(&b_ptr->cong_links);
+	INIT_LIST_HEAD(&b_ptr->links);
+	if (m_ptr->bcast) {
+		b_ptr->link_req = disc_init_link_req(b_ptr, &m_ptr->bcast_addr,
+						     bcast_scope, 2);
+	}
+	b_ptr->publ.lock = SPIN_LOCK_UNLOCKED;
+	write_unlock_bh(&net_lock);
+	info("Enabled bearer <%s>, discovery domain %s\n",
+	     name, addr_string_fill(addr_string, bcast_scope));
+	return 0;
+failed:
+	write_unlock_bh(&net_lock);
+	return res;
+}
+
+/**
+ * tipc_block_bearer(): Block the bearer with the given name,
+ *                      and reset all its links
+ */
+
+int tipc_block_bearer(const char *name)
+{
+	struct bearer *b_ptr = 0;
+	struct link *l_ptr;
+	struct link *temp_l_ptr;
+
+	if (tipc_mode != TIPC_NET_MODE)
+		return -ENOPROTOOPT;
+
+	read_lock_bh(&net_lock);
+	b_ptr = bearer_find(name);
+	if (!b_ptr) {
+		warn("Attempt to block unknown bearer <%s>\n", name);
+		read_unlock_bh(&net_lock);
+		return -EINVAL;
+	}
+
+	spin_lock_bh(&b_ptr->publ.lock);
+	b_ptr->publ.blocked = 1;
+	list_for_each_entry_safe(l_ptr, temp_l_ptr, &b_ptr->links, link_list) {
+		struct node *n_ptr = l_ptr->owner;
+
+		spin_lock_bh(&n_ptr->lock);
+		link_reset(l_ptr);
+		spin_unlock_bh(&n_ptr->lock);
+	}
+	spin_unlock_bh(&b_ptr->publ.lock);
+	read_unlock_bh(&net_lock);
+	info("Blocked bearer <%s>\n", name);
+	return TIPC_OK;
+}
+
+/**
+ * bearer_disable -
+ * 
+ * Note: This routine assumes caller holds net_lock.
+ */
+
+static int bearer_disable(const char *name)
+{
+	struct bearer *b_ptr;
+	struct link *l_ptr;
+	struct link *temp_l_ptr;
+
+	if (tipc_mode != TIPC_NET_MODE)
+		return -ENOPROTOOPT;
+
+	b_ptr = bearer_find(name);
+	if (!b_ptr) {
+		warn("Attempt to disable unknown bearer <%s>\n", name);
+		return -EINVAL;
+	}
+
+	disc_stop_link_req(b_ptr->link_req);
+	spin_lock_bh(&b_ptr->publ.lock);
+	b_ptr->link_req = NULL;
+	b_ptr->publ.blocked = 1;
+	if (b_ptr->media->disable_bearer) {
+		spin_unlock_bh(&b_ptr->publ.lock);
+		write_unlock_bh(&net_lock);
+		b_ptr->media->disable_bearer(&b_ptr->publ);
+		write_lock_bh(&net_lock);
+		spin_lock_bh(&b_ptr->publ.lock);
+	}
+	list_for_each_entry_safe(l_ptr, temp_l_ptr, &b_ptr->links, link_list) {
+		link_delete(l_ptr);
+	}
+	spin_unlock_bh(&b_ptr->publ.lock);
+	info("Disabled bearer <%s>\n", name);
+	memset(b_ptr, 0, sizeof(struct bearer));
+	return TIPC_OK;
+}
+
+int tipc_disable_bearer(const char *name)
+{
+	int res;
+
+	write_lock_bh(&net_lock);
+	res = bearer_disable(name);
+	write_unlock_bh(&net_lock);
+	return res;
+}
+
+
+
+int bearer_init(void)
+{
+	int res;
+
+	write_lock_bh(&net_lock);
+	bearers = kmalloc(MAX_BEARERS * sizeof(struct bearer), GFP_ATOMIC);
+	media_list = kmalloc(MAX_MEDIA * sizeof(struct media), GFP_ATOMIC);
+	if (bearers && media_list) {
+		memset(bearers, 0, MAX_BEARERS * sizeof(struct bearer));
+		memset(media_list, 0, MAX_MEDIA * sizeof(struct media));
+		res = TIPC_OK;
+	} else {
+		kfree(bearers);
+		kfree(media_list);
+		bearers = 0;
+		media_list = 0;
+		res = -ENOMEM;
+	}
+	write_unlock_bh(&net_lock);
+	return res;
+}
+
+void bearer_stop(void)
+{
+	u32 i;
+
+	if (!bearers)
+		return;
+
+	for (i = 0; i < MAX_BEARERS; i++) {
+		if (bearers[i].active)
+			bearers[i].publ.blocked = 1;
+	}
+	for (i = 0; i < MAX_BEARERS; i++) {
+		if (bearers[i].active)
+			bearer_disable(bearers[i].publ.name);
+	}
+	kfree(bearers);
+	kfree(media_list);
+	bearers = 0;
+	media_list = 0;
+	media_count = 0;
+}
+
+
diff --git a/net/tipc/bearer.h b/net/tipc/bearer.h
new file mode 100644
index 0000000..02a16f8
--- /dev/null
+++ b/net/tipc/bearer.h
@@ -0,0 +1,169 @@
+/*
+ * net/tipc/bearer.h: Include file for TIPC bearer code
+ * 
+ * Copyright (c) 2003-2005, Ericsson Research Canada
+ * Copyright (c) 2005, Wind River Systems
+ * Copyright (c) 2005-2006, Ericsson AB
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without 
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this 
+ * list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright notice, 
+ * this list of conditions and the following disclaimer in the documentation 
+ * and/or other materials provided with the distribution.
+ * Neither the names of the copyright holders nor the names of its 
+ * contributors may be used to endorse or promote products derived from this 
+ * software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _TIPC_BEARER_H
+#define _TIPC_BEARER_H
+
+#include <net/tipc/tipc_bearer.h>
+#include "bcast.h"
+
+#define MAX_BEARERS 8
+#define MAX_MEDIA 4
+
+
+/**
+ * struct media - TIPC media information available to internal users
+ * @send_msg: routine which handles buffer transmission
+ * @enable_bearer: routine which enables a bearer
+ * @disable_bearer: routine which disables a bearer
+ * @addr2str: routine which converts bearer's address to string form
+ * @bcast_addr: media address used in broadcasting
+ * @bcast: non-zero if media supports broadcasting [currently mandatory]
+ * @priority: default link (and bearer) priority
+ * @tolerance: default time (in ms) before declaring link failure
+ * @window: default window (in packets) before declaring link congestion
+ * @type_id: TIPC media identifier [defined in tipc_bearer.h]
+ * @name: media name
+ */
+ 
+struct media {
+	int (*send_msg)(struct sk_buff *buf, 
+			struct tipc_bearer *b_ptr,
+			struct tipc_media_addr *dest);
+	int (*enable_bearer)(struct tipc_bearer *b_ptr);
+	void (*disable_bearer)(struct tipc_bearer *b_ptr);
+	char *(*addr2str)(struct tipc_media_addr *a, 
+			  char *str_buf, int str_size);
+	struct tipc_media_addr bcast_addr;
+	int bcast;
+	u32 priority;
+	u32 tolerance;
+	u32 window;
+	u32 type_id;
+	char name[TIPC_MAX_MEDIA_NAME];
+};
+
+/**
+ * struct bearer - TIPC bearer information available to internal users
+ * @publ: bearer information available to privileged users
+ * @media: ptr to media structure associated with bearer
+ * @priority: default link priority for bearer
+ * @detect_scope: network address mask used during automatic link creation
+ * @identity: array index of this bearer within TIPC bearer array
+ * @link_req: ptr to (optional) structure making periodic link setup requests
+ * @links: list of non-congested links associated with bearer
+ * @cong_links: list of congested links associated with bearer
+ * @continue_count: # of times bearer has resumed after congestion or blocking
+ * @active: non-zero if bearer structure is represents a bearer
+ * @net_plane: network plane ('A' through 'H') currently associated with bearer
+ * @nodes: indicates which nodes in cluster can be reached through bearer
+ */
+ 
+struct bearer {
+	struct tipc_bearer publ;
+	struct media *media;
+	u32 priority;
+	u32 detect_scope;
+	u32 identity;
+	struct link_req *link_req;
+	struct list_head links;
+	struct list_head cong_links;
+	u32 continue_count;
+	int active;
+	char net_plane;
+	struct node_map nodes;
+};
+
+struct bearer_name {
+	char media_name[TIPC_MAX_MEDIA_NAME];
+	char if_name[TIPC_MAX_IF_NAME];
+};
+
+struct link;
+
+extern struct bearer *bearers;
+
+void media_addr_printf(struct print_buf *pb, struct tipc_media_addr *a);
+struct sk_buff *media_get_names(void);
+
+struct sk_buff *bearer_get_names(void);
+void bearer_add_dest(struct bearer *b_ptr, u32 dest);
+void bearer_remove_dest(struct bearer *b_ptr, u32 dest);
+void bearer_schedule(struct bearer *b_ptr, struct link *l_ptr);
+struct bearer *bearer_find_interface(const char *if_name);
+int bearer_resolve_congestion(struct bearer *b_ptr, struct link *l_ptr);
+int bearer_init(void);
+void bearer_stop(void);
+int bearer_broadcast(struct sk_buff *buf, struct tipc_bearer *b_ptr,
+		     struct tipc_media_addr *dest);
+void bearer_lock_push(struct bearer *b_ptr);
+
+
+/**
+ * bearer_send- sends buffer to destination over bearer 
+ * 
+ * Returns true (1) if successful, or false (0) if unable to send
+ * 
+ * IMPORTANT:
+ * The media send routine must not alter the buffer being passed in
+ * as it may be needed for later retransmission!
+ * 
+ * If the media send routine returns a non-zero value (indicating that 
+ * it was unable to send the buffer), it must:
+ *   1) mark the bearer as blocked,
+ *   2) call tipc_continue() once the bearer is able to send again.
+ * Media types that are unable to meet these two critera must ensure their
+ * send routine always returns success -- even if the buffer was not sent --
+ * and let TIPC's link code deal with the undelivered message. 
+ */
+
+static inline int bearer_send(struct bearer *b_ptr, struct sk_buff *buf,
+			      struct tipc_media_addr *dest)
+{
+	return !b_ptr->media->send_msg(buf, &b_ptr->publ, dest);
+}
+
+/**
+ * bearer_congested - determines if bearer is currently congested
+ */
+
+static inline int bearer_congested(struct bearer *b_ptr, struct link *l_ptr)
+{
+	if (unlikely(b_ptr->publ.blocked))
+		return 1;
+	if (likely(list_empty(&b_ptr->cong_links)))
+		return 0;
+	return !bearer_resolve_congestion(b_ptr, l_ptr);
+}
+
+#endif
diff --git a/net/tipc/cluster.c b/net/tipc/cluster.c
new file mode 100644
index 0000000..d7d0f5f
--- /dev/null
+++ b/net/tipc/cluster.c
@@ -0,0 +1,573 @@
+/*
+ * net/tipc/cluster.c: TIPC cluster management routines
+ * 
+ * Copyright (c) 2003-2005, Ericsson Research Canada
+ * Copyright (c) 2005, Wind River Systems
+ * Copyright (c) 2005-2006, Ericsson AB
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * Neither the names of the copyright holders nor the names of its 
+ * contributors may be used to endorse or promote products derived from this
+ * software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "core.h"
+#include "cluster.h"
+#include "addr.h"
+#include "node_subscr.h"
+#include "link.h"
+#include "node.h"
+#include "net.h"
+#include "msg.h"
+#include "bearer.h"
+
+void cluster_multicast(struct cluster *c_ptr, struct sk_buff *buf, 
+		       u32 lower, u32 upper);
+struct sk_buff *cluster_prepare_routing_msg(u32 data_size, u32 dest);
+
+struct node **local_nodes = 0;
+struct node_map cluster_bcast_nodes = {0,{0,}};
+u32 highest_allowed_slave = 0;
+
+struct cluster *cluster_create(u32 addr)
+{
+	struct _zone *z_ptr;
+	struct cluster *c_ptr;
+	int max_nodes; 
+	int alloc;
+
+	c_ptr = (struct cluster *)kmalloc(sizeof(*c_ptr), GFP_ATOMIC);
+	if (c_ptr == NULL)
+		return 0;
+	memset(c_ptr, 0, sizeof(*c_ptr));
+
+	c_ptr->addr = tipc_addr(tipc_zone(addr), tipc_cluster(addr), 0);
+	if (in_own_cluster(addr))
+		max_nodes = LOWEST_SLAVE + tipc_max_slaves;
+	else
+		max_nodes = tipc_max_nodes + 1;
+	alloc = sizeof(void *) * (max_nodes + 1);
+	c_ptr->nodes = (struct node **)kmalloc(alloc, GFP_ATOMIC);
+	if (c_ptr->nodes == NULL) {
+		kfree(c_ptr);
+		return 0;
+	}
+	memset(c_ptr->nodes, 0, alloc);  
+	if (in_own_cluster(addr))
+		local_nodes = c_ptr->nodes;
+	c_ptr->highest_slave = LOWEST_SLAVE - 1;
+	c_ptr->highest_node = 0;
+	
+	z_ptr = zone_find(tipc_zone(addr));
+	if (z_ptr == NULL) {
+		z_ptr = zone_create(addr);
+	}
+	if (z_ptr != NULL) {
+		zone_attach_cluster(z_ptr, c_ptr);
+		c_ptr->owner = z_ptr;
+	}
+	else {
+		kfree(c_ptr);
+		c_ptr = 0;
+	}
+
+	return c_ptr;
+}
+
+void cluster_delete(struct cluster *c_ptr)
+{
+	u32 n_num;
+
+	if (!c_ptr)
+		return;
+	for (n_num = 1; n_num <= c_ptr->highest_node; n_num++) {
+		node_delete(c_ptr->nodes[n_num]);
+	}
+	for (n_num = LOWEST_SLAVE; n_num <= c_ptr->highest_slave; n_num++) {
+		node_delete(c_ptr->nodes[n_num]);
+	}
+	kfree(c_ptr->nodes);
+	kfree(c_ptr);
+}
+
+u32 cluster_next_node(struct cluster *c_ptr, u32 addr)
+{
+	struct node *n_ptr;
+	u32 n_num = tipc_node(addr) + 1;
+
+	if (!c_ptr)
+		return addr;
+	for (; n_num <= c_ptr->highest_node; n_num++) {
+		n_ptr = c_ptr->nodes[n_num];
+		if (n_ptr && node_has_active_links(n_ptr))
+			return n_ptr->addr;
+	}
+	for (n_num = 1; n_num < tipc_node(addr); n_num++) {
+		n_ptr = c_ptr->nodes[n_num];
+		if (n_ptr && node_has_active_links(n_ptr))
+			return n_ptr->addr;
+	}
+	return 0;
+}
+
+void cluster_attach_node(struct cluster *c_ptr, struct node *n_ptr)
+{
+	u32 n_num = tipc_node(n_ptr->addr);
+	u32 max_n_num = tipc_max_nodes;
+
+	if (in_own_cluster(n_ptr->addr))
+		max_n_num = highest_allowed_slave;
+	assert(n_num > 0);
+	assert(n_num <= max_n_num);
+	assert(c_ptr->nodes[n_num] == 0);
+	c_ptr->nodes[n_num] = n_ptr;
+	if (n_num > c_ptr->highest_node)
+		c_ptr->highest_node = n_num;
+}
+
+/**
+ * cluster_select_router - select router to a cluster
+ * 
+ * Uses deterministic and fair algorithm.
+ */
+
+u32 cluster_select_router(struct cluster *c_ptr, u32 ref)
+{
+	u32 n_num;
+	u32 ulim = c_ptr->highest_node;
+	u32 mask;
+	u32 tstart;
+
+	assert(!in_own_cluster(c_ptr->addr));
+	if (!ulim)
+		return 0;
+
+	/* Start entry must be random */
+	mask = tipc_max_nodes;
+	while (mask > ulim)
+		mask >>= 1;
+	tstart = ref & mask;
+	n_num = tstart;
+
+	/* Lookup upwards with wrap-around */
+	do {
+		if (node_is_up(c_ptr->nodes[n_num]))
+			break;
+	} while (++n_num <= ulim);
+	if (n_num > ulim) {
+		n_num = 1;
+		do {
+			if (node_is_up(c_ptr->nodes[n_num]))
+				break;
+		} while (++n_num < tstart);
+		if (n_num == tstart)
+			return 0;
+	}
+	assert(n_num <= ulim);
+	return node_select_router(c_ptr->nodes[n_num], ref);
+}
+
+/**
+ * cluster_select_node - select destination node within a remote cluster
+ * 
+ * Uses deterministic and fair algorithm.
+ */
+
+struct node *cluster_select_node(struct cluster *c_ptr, u32 selector)
+{
+	u32 n_num;
+	u32 mask = tipc_max_nodes;
+	u32 start_entry;
+
+	assert(!in_own_cluster(c_ptr->addr));
+	if (!c_ptr->highest_node)
+		return 0;
+
+	/* Start entry must be random */
+	while (mask > c_ptr->highest_node) {
+		mask >>= 1;
+	}
+	start_entry = (selector & mask) ? selector & mask : 1u;
+	assert(start_entry <= c_ptr->highest_node);
+
+	/* Lookup upwards with wrap-around */
+	for (n_num = start_entry; n_num <= c_ptr->highest_node; n_num++) {
+		if (node_has_active_links(c_ptr->nodes[n_num]))
+			return c_ptr->nodes[n_num];
+	}
+	for (n_num = 1; n_num < start_entry; n_num++) {
+		if (node_has_active_links(c_ptr->nodes[n_num]))
+			return c_ptr->nodes[n_num];
+	}
+	return 0;
+}
+
+/*
+ *    Routing table management: See description in node.c
+ */
+
+struct sk_buff *cluster_prepare_routing_msg(u32 data_size, u32 dest)
+{
+	u32 size = INT_H_SIZE + data_size;
+	struct sk_buff *buf = buf_acquire(size);
+	struct tipc_msg *msg;
+
+	if (buf) {
+		msg = buf_msg(buf);
+		memset((char *)msg, 0, size);
+		msg_init(msg, ROUTE_DISTRIBUTOR, 0, TIPC_OK, INT_H_SIZE, dest);
+	}
+	return buf;
+}
+
+void cluster_bcast_new_route(struct cluster *c_ptr, u32 dest,
+			     u32 lower, u32 upper)
+{
+	struct sk_buff *buf = cluster_prepare_routing_msg(0, c_ptr->addr);
+	struct tipc_msg *msg;
+
+	if (buf) {
+		msg = buf_msg(buf);
+		msg_set_remote_node(msg, dest);
+		msg_set_type(msg, ROUTE_ADDITION);
+		cluster_multicast(c_ptr, buf, lower, upper);
+	} else {
+		warn("Memory squeeze: broadcast of new route failed\n");
+	}
+}
+
+void cluster_bcast_lost_route(struct cluster *c_ptr, u32 dest,
+			      u32 lower, u32 upper)
+{
+	struct sk_buff *buf = cluster_prepare_routing_msg(0, c_ptr->addr);
+	struct tipc_msg *msg;
+
+	if (buf) {
+		msg = buf_msg(buf);
+		msg_set_remote_node(msg, dest);
+		msg_set_type(msg, ROUTE_REMOVAL);
+		cluster_multicast(c_ptr, buf, lower, upper);
+	} else {
+		warn("Memory squeeze: broadcast of lost route failed\n");
+	}
+}
+
+void cluster_send_slave_routes(struct cluster *c_ptr, u32 dest)
+{
+	struct sk_buff *buf;
+	struct tipc_msg *msg;
+	u32 highest = c_ptr->highest_slave;
+	u32 n_num;
+	int send = 0;
+
+	assert(!is_slave(dest));
+	assert(in_own_cluster(dest));
+	assert(in_own_cluster(c_ptr->addr));
+	if (highest <= LOWEST_SLAVE)
+		return;
+	buf = cluster_prepare_routing_msg(highest - LOWEST_SLAVE + 1,
+					  c_ptr->addr);
+	if (buf) {
+		msg = buf_msg(buf);
+		msg_set_remote_node(msg, c_ptr->addr);
+		msg_set_type(msg, SLAVE_ROUTING_TABLE);
+		for (n_num = LOWEST_SLAVE; n_num <= highest; n_num++) {
+			if (c_ptr->nodes[n_num] && 
+			    node_has_active_links(c_ptr->nodes[n_num])) {
+				send = 1;
+				msg_set_dataoctet(msg, n_num);
+			}
+		}
+		if (send)
+			link_send(buf, dest, dest);
+		else
+			buf_discard(buf);
+	} else {
+		warn("Memory squeeze: broadcast of lost route failed\n");
+	}
+}
+
+void cluster_send_ext_routes(struct cluster *c_ptr, u32 dest)
+{
+	struct sk_buff *buf;
+	struct tipc_msg *msg;
+	u32 highest = c_ptr->highest_node;
+	u32 n_num;
+	int send = 0;
+
+	if (in_own_cluster(c_ptr->addr))
+		return;
+	assert(!is_slave(dest));
+	assert(in_own_cluster(dest));
+	highest = c_ptr->highest_node;
+	buf = cluster_prepare_routing_msg(highest + 1, c_ptr->addr);
+	if (buf) {
+		msg = buf_msg(buf);
+		msg_set_remote_node(msg, c_ptr->addr);
+		msg_set_type(msg, EXT_ROUTING_TABLE);
+		for (n_num = 1; n_num <= highest; n_num++) {
+			if (c_ptr->nodes[n_num] && 
+			    node_has_active_links(c_ptr->nodes[n_num])) {
+				send = 1;
+				msg_set_dataoctet(msg, n_num);
+			}
+		}
+		if (send)
+			link_send(buf, dest, dest);
+		else
+			buf_discard(buf);
+	} else {
+		warn("Memory squeeze: broadcast of external route failed\n");
+	}
+}
+
+void cluster_send_local_routes(struct cluster *c_ptr, u32 dest)
+{
+	struct sk_buff *buf;
+	struct tipc_msg *msg;
+	u32 highest = c_ptr->highest_node;
+	u32 n_num;
+	int send = 0;
+
+	assert(is_slave(dest));
+	assert(in_own_cluster(c_ptr->addr));
+	buf = cluster_prepare_routing_msg(highest, c_ptr->addr);
+	if (buf) {
+		msg = buf_msg(buf);
+		msg_set_remote_node(msg, c_ptr->addr);
+		msg_set_type(msg, LOCAL_ROUTING_TABLE);
+		for (n_num = 1; n_num <= highest; n_num++) {
+			if (c_ptr->nodes[n_num] && 
+			    node_has_active_links(c_ptr->nodes[n_num])) {
+				send = 1;
+				msg_set_dataoctet(msg, n_num);
+			}
+		}
+		if (send)
+			link_send(buf, dest, dest);
+		else
+			buf_discard(buf);
+	} else {
+		warn("Memory squeeze: broadcast of local route failed\n");
+	}
+}
+
+void cluster_recv_routing_table(struct sk_buff *buf)
+{
+	struct tipc_msg *msg = buf_msg(buf);
+	struct cluster *c_ptr;
+	struct node *n_ptr;
+	unchar *node_table;
+	u32 table_size;
+	u32 router;
+	u32 rem_node = msg_remote_node(msg);
+	u32 z_num;
+	u32 c_num;
+	u32 n_num;
+
+	c_ptr = cluster_find(rem_node);
+	if (!c_ptr) {
+		c_ptr = cluster_create(rem_node);
+		if (!c_ptr) {
+			buf_discard(buf);
+			return;
+		}
+	}
+
+	node_table = buf->data + msg_hdr_sz(msg);
+	table_size = msg_size(msg) - msg_hdr_sz(msg);
+	router = msg_prevnode(msg);
+	z_num = tipc_zone(rem_node);
+	c_num = tipc_cluster(rem_node);
+
+	switch (msg_type(msg)) {
+	case LOCAL_ROUTING_TABLE:
+		assert(is_slave(tipc_own_addr));
+	case EXT_ROUTING_TABLE:
+		for (n_num = 1; n_num < table_size; n_num++) {
+			if (node_table[n_num]) {
+				u32 addr = tipc_addr(z_num, c_num, n_num);
+				n_ptr = c_ptr->nodes[n_num];
+				if (!n_ptr) {
+					n_ptr = node_create(addr);
+				}
+				if (n_ptr)
+					node_add_router(n_ptr, router);
+			}
+		}
+		break;
+	case SLAVE_ROUTING_TABLE:
+		assert(!is_slave(tipc_own_addr));
+		assert(in_own_cluster(c_ptr->addr));
+		for (n_num = 1; n_num < table_size; n_num++) {
+			if (node_table[n_num]) {
+				u32 slave_num = n_num + LOWEST_SLAVE;
+				u32 addr = tipc_addr(z_num, c_num, slave_num);
+				n_ptr = c_ptr->nodes[slave_num];
+				if (!n_ptr) {
+					n_ptr = node_create(addr);
+				}
+				if (n_ptr)
+					node_add_router(n_ptr, router);
+			}
+		}
+		break;
+	case ROUTE_ADDITION:
+		if (!is_slave(tipc_own_addr)) {
+			assert(!in_own_cluster(c_ptr->addr)
+			       || is_slave(rem_node));
+		} else {
+			assert(in_own_cluster(c_ptr->addr)
+			       && !is_slave(rem_node));
+		}
+		n_ptr = c_ptr->nodes[tipc_node(rem_node)];
+		if (!n_ptr)
+			n_ptr = node_create(rem_node);
+		if (n_ptr)
+			node_add_router(n_ptr, router);
+		break;
+	case ROUTE_REMOVAL:
+		if (!is_slave(tipc_own_addr)) {
+			assert(!in_own_cluster(c_ptr->addr)
+			       || is_slave(rem_node));
+		} else {
+			assert(in_own_cluster(c_ptr->addr)
+			       && !is_slave(rem_node));
+		}
+		n_ptr = c_ptr->nodes[tipc_node(rem_node)];
+		if (n_ptr)
+			node_remove_router(n_ptr, router);
+		break;
+	default:
+		assert(!"Illegal routing manager message received\n");
+	}
+	buf_discard(buf);
+}
+
+void cluster_remove_as_router(struct cluster *c_ptr, u32 router)
+{
+	u32 start_entry;
+	u32 tstop;
+	u32 n_num;
+
+	if (is_slave(router))
+		return;	/* Slave nodes can not be routers */
+
+	if (in_own_cluster(c_ptr->addr)) {
+		start_entry = LOWEST_SLAVE;
+		tstop = c_ptr->highest_slave;
+	} else {
+		start_entry = 1;
+		tstop = c_ptr->highest_node;
+	}
+
+	for (n_num = start_entry; n_num <= tstop; n_num++) {
+		if (c_ptr->nodes[n_num]) {
+			node_remove_router(c_ptr->nodes[n_num], router);
+		}
+	}
+}
+
+/**
+ * cluster_multicast - multicast message to local nodes 
+ */
+
+void cluster_multicast(struct cluster *c_ptr, struct sk_buff *buf, 
+		       u32 lower, u32 upper)
+{
+	struct sk_buff *buf_copy;
+	struct node *n_ptr;
+	u32 n_num;
+	u32 tstop;
+
+	assert(lower <= upper);
+	assert(((lower >= 1) && (lower <= tipc_max_nodes)) ||
+	       ((lower >= LOWEST_SLAVE) && (lower <= highest_allowed_slave)));
+	assert(((upper >= 1) && (upper <= tipc_max_nodes)) ||
+	       ((upper >= LOWEST_SLAVE) && (upper <= highest_allowed_slave)));
+	assert(in_own_cluster(c_ptr->addr));
+
+	tstop = is_slave(upper) ? c_ptr->highest_slave : c_ptr->highest_node;
+	if (tstop > upper)
+		tstop = upper;
+	for (n_num = lower; n_num <= tstop; n_num++) {
+		n_ptr = c_ptr->nodes[n_num];
+		if (n_ptr && node_has_active_links(n_ptr)) {
+			buf_copy = skb_copy(buf, GFP_ATOMIC);
+			if (buf_copy == NULL)
+				break;
+			msg_set_destnode(buf_msg(buf_copy), n_ptr->addr);
+			link_send(buf_copy, n_ptr->addr, n_ptr->addr);
+		}
+	}
+	buf_discard(buf);
+}
+
+/**
+ * cluster_broadcast - broadcast message to all nodes within cluster
+ */
+
+void cluster_broadcast(struct sk_buff *buf)
+{
+	struct sk_buff *buf_copy;
+	struct cluster *c_ptr;
+	struct node *n_ptr;
+	u32 n_num;
+	u32 tstart;
+	u32 tstop;
+	u32 node_type;
+
+	if (tipc_mode == TIPC_NET_MODE) {
+		c_ptr = cluster_find(tipc_own_addr);
+		assert(in_own_cluster(c_ptr->addr));	/* For now */
+
+		/* Send to standard nodes, then repeat loop sending to slaves */
+		tstart = 1;
+		tstop = c_ptr->highest_node;
+		for (node_type = 1; node_type <= 2; node_type++) {
+			for (n_num = tstart; n_num <= tstop; n_num++) {
+				n_ptr = c_ptr->nodes[n_num];
+				if (n_ptr && node_has_active_links(n_ptr)) {
+					buf_copy = skb_copy(buf, GFP_ATOMIC);
+					if (buf_copy == NULL)
+						goto exit;
+					msg_set_destnode(buf_msg(buf_copy), 
+							 n_ptr->addr);
+					link_send(buf_copy, n_ptr->addr, 
+						  n_ptr->addr);
+				}
+			}
+			tstart = LOWEST_SLAVE;
+			tstop = c_ptr->highest_slave;
+		}
+	}
+exit:
+	buf_discard(buf);
+}
+
+int cluster_init(void)
+{
+	highest_allowed_slave = LOWEST_SLAVE + tipc_max_slaves;
+	return cluster_create(tipc_own_addr) ? TIPC_OK : -ENOMEM;
+}
+
diff --git a/net/tipc/cluster.h b/net/tipc/cluster.h
new file mode 100644
index 0000000..c12875a
--- /dev/null
+++ b/net/tipc/cluster.h
@@ -0,0 +1,89 @@
+/*
+ * net/tipc/cluster.h: Include file for TIPC cluster management routines
+ * 
+ * Copyright (c) 2003-2005, Ericsson Research Canada
+ * Copyright (c) 2005, Wind River Systems
+ * Copyright (c) 2005-2006, Ericsson AB
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without 
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this 
+ * list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright notice, 
+ * this list of conditions and the following disclaimer in the documentation 
+ * and/or other materials provided with the distribution.
+ * Neither the names of the copyright holders nor the names of its 
+ * contributors may be used to endorse or promote products derived from this 
+ * software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _TIPC_CLUSTER_H
+#define _TIPC_CLUSTER_H
+
+#include "addr.h"
+#include "zone.h"
+
+#define LOWEST_SLAVE  2048u
+
+/**
+ * struct cluster - TIPC cluster structure
+ * @addr: network address of cluster
+ * @owner: pointer to zone that cluster belongs to
+ * @nodes: array of pointers to all nodes within cluster
+ * @highest_node: id of highest numbered node within cluster
+ * @highest_slave: (used for secondary node support)
+ */
+ 
+struct cluster {
+	u32 addr;
+	struct _zone *owner;
+	struct node **nodes;
+	u32 highest_node;
+	u32 highest_slave;
+};
+
+
+extern struct node **local_nodes;
+extern u32 highest_allowed_slave;
+extern struct node_map cluster_bcast_nodes;
+
+void cluster_remove_as_router(struct cluster *c_ptr, u32 router);
+void cluster_send_ext_routes(struct cluster *c_ptr, u32 dest);
+struct node *cluster_select_node(struct cluster *c_ptr, u32 selector);
+u32 cluster_select_router(struct cluster *c_ptr, u32 ref);
+void cluster_recv_routing_table(struct sk_buff *buf);
+struct cluster *cluster_create(u32 addr);
+void cluster_delete(struct cluster *c_ptr);
+void cluster_attach_node(struct cluster *c_ptr, struct node *n_ptr);
+void cluster_send_slave_routes(struct cluster *c_ptr, u32 dest);
+void cluster_broadcast(struct sk_buff *buf);
+int cluster_init(void);
+u32 cluster_next_node(struct cluster *c_ptr, u32 addr);
+void cluster_bcast_new_route(struct cluster *c_ptr, u32 dest, u32 lo, u32 hi);
+void cluster_send_local_routes(struct cluster *c_ptr, u32 dest);
+void cluster_bcast_lost_route(struct cluster *c_ptr, u32 dest, u32 lo, u32 hi);
+
+static inline struct cluster *cluster_find(u32 addr)
+{
+	struct _zone *z_ptr = zone_find(addr);
+
+	if (z_ptr)
+		return z_ptr->clusters[1];
+	return 0;
+}
+
+#endif
diff --git a/net/tipc/config.c b/net/tipc/config.c
new file mode 100644
index 0000000..0f31c34
--- /dev/null
+++ b/net/tipc/config.c
@@ -0,0 +1,715 @@
+/*
+ * net/tipc/config.c: TIPC configuration management code
+ * 
+ * Copyright (c) 2003-2005, Ericsson Research Canada
+ * Copyright (c) 2004-2005, Wind River Systems
+ * Copyright (c) 2005-2006, Ericsson AB
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without 
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this 
+ * list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright notice, 
+ * this list of conditions and the following disclaimer in the documentation 
+ * and/or other materials provided with the distribution.
+ * Neither the names of the copyright holders nor the names of its 
+ * contributors may be used to endorse or promote products derived from this 
+ * software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "core.h"
+#include "dbg.h"
+#include "bearer.h"
+#include "port.h"
+#include "link.h"
+#include "zone.h"
+#include "addr.h"
+#include "name_table.h"
+#include "node.h"
+#include "config.h"
+#include "discover.h"
+
+struct subscr_data {
+	char usr_handle[8];
+	u32 domain;
+	u32 port_ref;
+	struct list_head subd_list;
+};
+
+struct manager {
+	u32 user_ref;
+	u32 port_ref;
+	u32 subscr_ref;
+	u32 link_subscriptions;
+	struct list_head link_subscribers;
+};
+
+static struct manager mng = { 0};
+
+static spinlock_t config_lock = SPIN_LOCK_UNLOCKED;
+
+static const void *req_tlv_area;	/* request message TLV area */
+static int req_tlv_space;		/* request message TLV area size */
+static int rep_headroom;		/* reply message headroom to use */
+
+
+void cfg_link_event(u32 addr, char *name, int up)
+{
+	/* TIPC DOESN'T HANDLE LINK EVENT SUBSCRIPTIONS AT THE MOMENT */
+}
+
+
+struct sk_buff *cfg_reply_alloc(int payload_size)
+{
+	struct sk_buff *buf;
+
+	buf = alloc_skb(rep_headroom + payload_size, GFP_ATOMIC);
+	if (buf)
+		skb_reserve(buf, rep_headroom);
+	return buf;
+}
+
+int cfg_append_tlv(struct sk_buff *buf, int tlv_type, 
+		   void *tlv_data, int tlv_data_size)
+{
+	struct tlv_desc *tlv = (struct tlv_desc *)buf->tail;
+	int new_tlv_space = TLV_SPACE(tlv_data_size);
+
+	if (skb_tailroom(buf) < new_tlv_space) {
+		dbg("cfg_append_tlv unable to append TLV\n");
+		return 0;
+	}
+	skb_put(buf, new_tlv_space);
+	tlv->tlv_type = htons(tlv_type);
+	tlv->tlv_len  = htons(TLV_LENGTH(tlv_data_size));
+	if (tlv_data_size && tlv_data)
+		memcpy(TLV_DATA(tlv), tlv_data, tlv_data_size);
+	return 1;
+}
+
+struct sk_buff *cfg_reply_unsigned_type(u16 tlv_type, u32 value)
+{
+	struct sk_buff *buf;
+	u32 value_net;
+
+	buf = cfg_reply_alloc(TLV_SPACE(sizeof(value)));
+	if (buf) {
+		value_net = htonl(value);
+		cfg_append_tlv(buf, tlv_type, &value_net, 
+			       sizeof(value_net));
+	}
+	return buf;
+}
+
+struct sk_buff *cfg_reply_string_type(u16 tlv_type, char *string)
+{
+	struct sk_buff *buf;
+	int string_len = strlen(string) + 1;
+
+	buf = cfg_reply_alloc(TLV_SPACE(string_len));
+	if (buf)
+		cfg_append_tlv(buf, tlv_type, string, string_len);
+	return buf;
+}
+
+
+
+
+#if 0
+
+/* Now obsolete code for handling commands not yet implemented the new way */
+
+int tipc_cfg_cmd(const struct tipc_cmd_msg * msg,
+		 char *data,
+		 u32 sz,
+		 u32 *ret_size,
+		 struct tipc_portid *orig)
+{
+	int rv = -EINVAL;
+	u32 cmd = msg->cmd;
+
+	*ret_size = 0;
+	switch (cmd) {
+	case TIPC_REMOVE_LINK:
+	case TIPC_CMD_BLOCK_LINK:
+	case TIPC_CMD_UNBLOCK_LINK:
+		if (!cfg_check_connection(orig))
+			rv = link_control(msg->argv.link_name, msg->cmd, 0);
+		break;
+	case TIPC_ESTABLISH:
+		{
+			int connected;
+
+			tipc_isconnected(mng.conn_port_ref, &connected);
+			if (connected || !orig) {
+				rv = TIPC_FAILURE;
+				break;
+			}
+			rv = tipc_connect2port(mng.conn_port_ref, orig);
+			if (rv == TIPC_OK)
+				orig = 0;
+			break;
+		}
+	case TIPC_GET_PEER_ADDRESS:
+		*ret_size = link_peer_addr(msg->argv.link_name, data, sz);
+		break;
+	case TIPC_GET_ROUTES:
+		rv = TIPC_OK;
+		break;
+	default: {}
+	}
+	if (*ret_size)
+		rv = TIPC_OK;
+	return rv;
+}
+
+static void cfg_cmd_event(struct tipc_cmd_msg *msg,
+			  char *data,
+			  u32 sz,        
+			  struct tipc_portid const *orig)
+{
+	int rv = -EINVAL;
+	struct tipc_cmd_result_msg rmsg;
+	struct iovec msg_sect[2];
+	int *arg;
+
+	msg->cmd = ntohl(msg->cmd);
+
+	cfg_prepare_res_msg(msg->cmd, msg->usr_handle, rv, &rmsg, msg_sect, 
+			    data, 0);
+	if (ntohl(msg->magic) != TIPC_MAGIC)
+		goto exit;
+
+	switch (msg->cmd) {
+	case TIPC_CREATE_LINK:
+		if (!cfg_check_connection(orig))
+			rv = disc_create_link(&msg->argv.create_link);
+		break;
+	case TIPC_LINK_SUBSCRIBE:
+		{
+			struct subscr_data *sub;
+
+			if (mng.link_subscriptions > 64)
+				break;
+			sub = (struct subscr_data *)kmalloc(sizeof(*sub),
+							    GFP_ATOMIC);
+			if (sub == NULL) {
+				warn("Memory squeeze; dropped remote link subscription\n");
+				break;
+			}
+			INIT_LIST_HEAD(&sub->subd_list);
+			tipc_createport(mng.user_ref,
+					(void *)sub,
+					TIPC_HIGH_IMPORTANCE,
+					0,
+					0,
+					(tipc_conn_shutdown_event)cfg_linksubscr_cancel,
+					0,
+					0,
+					(tipc_conn_msg_event)cfg_linksubscr_cancel,
+					0,
+					&sub->port_ref);
+			if (!sub->port_ref) {
+				kfree(sub);
+				break;
+			}
+			memcpy(sub->usr_handle,msg->usr_handle,
+			       sizeof(sub->usr_handle));
+			sub->domain = msg->argv.domain;
+			list_add_tail(&sub->subd_list, &mng.link_subscribers);
+			tipc_connect2port(sub->port_ref, orig);
+			rmsg.retval = TIPC_OK;
+			tipc_send(sub->port_ref, 2u, msg_sect);
+			mng.link_subscriptions++;
+			return;
+		}
+	default:
+		rv = tipc_cfg_cmd(msg, data, sz, (u32 *)&msg_sect[1].iov_len, orig);
+	}
+	exit:
+	rmsg.result_len = htonl(msg_sect[1].iov_len);
+	rmsg.retval = htonl(rv);
+	cfg_respond(msg_sect, 2u, orig);
+}
+#endif
+
+static struct sk_buff *cfg_enable_bearer(void)
+{
+	struct tipc_bearer_config *args;
+
+	if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_BEARER_CONFIG))
+		return cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
+
+	args = (struct tipc_bearer_config *)TLV_DATA(req_tlv_area);
+	if (tipc_enable_bearer(args->name,
+			       ntohl(args->detect_scope),
+			       ntohl(args->priority)))
+		return cfg_reply_error_string("unable to enable bearer");
+
+	return cfg_reply_none();
+}
+
+static struct sk_buff *cfg_disable_bearer(void)
+{
+	if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_BEARER_NAME))
+		return cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
+
+	if (tipc_disable_bearer((char *)TLV_DATA(req_tlv_area)))
+		return cfg_reply_error_string("unable to disable bearer");
+
+	return cfg_reply_none();
+}
+
+static struct sk_buff *cfg_set_own_addr(void)
+{
+	u32 addr;
+
+	if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_NET_ADDR))
+		return cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
+
+	addr = *(u32 *)TLV_DATA(req_tlv_area);
+	addr = ntohl(addr);
+	if (addr == tipc_own_addr)
+		return cfg_reply_none();
+	if (!addr_node_valid(addr))
+		return cfg_reply_error_string(TIPC_CFG_INVALID_VALUE
+					      " (node address)");
+	if (tipc_own_addr)
+		return cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED
+					      " (cannot change node address once assigned)");
+
+	spin_unlock_bh(&config_lock);
+	stop_net();
+	tipc_own_addr = addr;
+	start_net();
+	spin_lock_bh(&config_lock);
+	return cfg_reply_none();
+}
+
+static struct sk_buff *cfg_set_remote_mng(void)
+{
+	u32 value;
+
+	if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_UNSIGNED))
+		return cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
+
+	value = *(u32 *)TLV_DATA(req_tlv_area);
+	value = ntohl(value);
+	tipc_remote_management = (value != 0);
+	return cfg_reply_none();
+}
+
+static struct sk_buff *cfg_set_max_publications(void)
+{
+	u32 value;
+
+	if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_UNSIGNED))
+		return cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
+
+	value = *(u32 *)TLV_DATA(req_tlv_area);
+	value = ntohl(value);
+	if (value != delimit(value, 1, 65535))
+		return cfg_reply_error_string(TIPC_CFG_INVALID_VALUE
+					      " (max publications must be 1-65535)");
+	tipc_max_publications = value;
+	return cfg_reply_none();
+}
+
+static struct sk_buff *cfg_set_max_subscriptions(void)
+{
+	u32 value;
+
+	if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_UNSIGNED))
+		return cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
+
+	value = *(u32 *)TLV_DATA(req_tlv_area);
+	value = ntohl(value);
+	if (value != delimit(value, 1, 65535))
+		return cfg_reply_error_string(TIPC_CFG_INVALID_VALUE
+					      " (max subscriptions must be 1-65535");
+	tipc_max_subscriptions = value;
+	return cfg_reply_none();
+}
+
+static struct sk_buff *cfg_set_max_ports(void)
+{
+	int orig_mode;
+	u32 value;
+
+	if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_UNSIGNED))
+		return cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
+	value = *(u32 *)TLV_DATA(req_tlv_area);
+	value = ntohl(value);
+	if (value != delimit(value, 127, 65535))
+		return cfg_reply_error_string(TIPC_CFG_INVALID_VALUE
+					      " (max ports must be 127-65535)");
+
+	if (value == tipc_max_ports)
+		return cfg_reply_none();
+
+	if (atomic_read(&tipc_user_count) > 2)
+		return cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED
+					      " (cannot change max ports while TIPC users exist)");
+
+	spin_unlock_bh(&config_lock);
+	orig_mode = tipc_get_mode();
+	if (orig_mode == TIPC_NET_MODE)
+		stop_net();
+	stop_core();
+	tipc_max_ports = value;
+	start_core();
+	if (orig_mode == TIPC_NET_MODE)
+		start_net();
+	spin_lock_bh(&config_lock);
+	return cfg_reply_none();
+}
+
+static struct sk_buff *set_net_max(int value, int *parameter)
+{
+	int orig_mode;
+
+	if (value != *parameter) {
+		orig_mode = tipc_get_mode();
+		if (orig_mode == TIPC_NET_MODE)
+			stop_net();
+		*parameter = value;
+		if (orig_mode == TIPC_NET_MODE)
+			start_net();
+	}
+
+	return cfg_reply_none();
+}
+
+static struct sk_buff *cfg_set_max_zones(void)
+{
+	u32 value;
+
+	if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_UNSIGNED))
+		return cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
+	value = *(u32 *)TLV_DATA(req_tlv_area);
+	value = ntohl(value);
+	if (value != delimit(value, 1, 255))
+		return cfg_reply_error_string(TIPC_CFG_INVALID_VALUE
+					      " (max zones must be 1-255)");
+	return set_net_max(value, &tipc_max_zones);
+}
+
+static struct sk_buff *cfg_set_max_clusters(void)
+{
+	u32 value;
+
+	if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_UNSIGNED))
+		return cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
+	value = *(u32 *)TLV_DATA(req_tlv_area);
+	value = ntohl(value);
+	if (value != 1)
+		return cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED
+					      " (max clusters fixed at 1)");
+	return cfg_reply_none();
+}
+
+static struct sk_buff *cfg_set_max_nodes(void)
+{
+	u32 value;
+
+	if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_UNSIGNED))
+		return cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
+	value = *(u32 *)TLV_DATA(req_tlv_area);
+	value = ntohl(value);
+	if (value != delimit(value, 8, 2047))
+		return cfg_reply_error_string(TIPC_CFG_INVALID_VALUE
+					      " (max nodes must be 8-2047)");
+	return set_net_max(value, &tipc_max_nodes);
+}
+
+static struct sk_buff *cfg_set_max_slaves(void)
+{
+	u32 value;
+
+	if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_UNSIGNED))
+		return cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
+	value = *(u32 *)TLV_DATA(req_tlv_area);
+	value = ntohl(value);
+	if (value != 0)
+		return cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED
+					      " (max secondary nodes fixed at 0)");
+	return cfg_reply_none();
+}
+
+static struct sk_buff *cfg_set_netid(void)
+{
+	u32 value;
+
+	if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_UNSIGNED))
+		return cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
+	value = *(u32 *)TLV_DATA(req_tlv_area);
+	value = ntohl(value);
+	if (value != delimit(value, 1, 9999))
+		return cfg_reply_error_string(TIPC_CFG_INVALID_VALUE
+					      " (network id must be 1-9999)");
+
+	if (tipc_own_addr)
+		return cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED
+					      " (cannot change network id once part of network)");
+	
+	return set_net_max(value, &tipc_net_id);
+}
+
+struct sk_buff *cfg_do_cmd(u32 orig_node, u16 cmd, const void *request_area,
+			   int request_space, int reply_headroom)
+{
+	struct sk_buff *rep_tlv_buf;
+
+	spin_lock_bh(&config_lock);
+
+	/* Save request and reply details in a well-known location */
+
+	req_tlv_area = request_area;
+	req_tlv_space = request_space;
+	rep_headroom = reply_headroom;
+
+	/* Check command authorization */
+
+	if (likely(orig_node == tipc_own_addr)) {
+		/* command is permitted */
+	} else if (cmd >= 0x8000) {
+		rep_tlv_buf = cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED
+						     " (cannot be done remotely)");
+		goto exit;
+	} else if (!tipc_remote_management) {
+		rep_tlv_buf = cfg_reply_error_string(TIPC_CFG_NO_REMOTE);
+		goto exit;
+	}
+	else if (cmd >= 0x4000) {
+		u32 domain = 0;
+
+		if ((nametbl_translate(TIPC_ZM_SRV, 0, &domain) == 0) ||
+		    (domain != orig_node)) {
+			rep_tlv_buf = cfg_reply_error_string(TIPC_CFG_NOT_ZONE_MSTR);
+			goto exit;
+		}
+	}
+
+	/* Call appropriate processing routine */
+
+	switch (cmd) {
+	case TIPC_CMD_NOOP:
+		rep_tlv_buf = cfg_reply_none();
+		break;
+	case TIPC_CMD_GET_NODES:
+		rep_tlv_buf = node_get_nodes(req_tlv_area, req_tlv_space);
+		break;
+	case TIPC_CMD_GET_LINKS:
+		rep_tlv_buf = node_get_links(req_tlv_area, req_tlv_space);
+		break;
+	case TIPC_CMD_SHOW_LINK_STATS:
+		rep_tlv_buf = link_cmd_show_stats(req_tlv_area, req_tlv_space);
+		break;
+	case TIPC_CMD_RESET_LINK_STATS:
+		rep_tlv_buf = link_cmd_reset_stats(req_tlv_area, req_tlv_space);
+		break;
+	case TIPC_CMD_SHOW_NAME_TABLE:
+		rep_tlv_buf = nametbl_get(req_tlv_area, req_tlv_space);
+		break;
+	case TIPC_CMD_GET_BEARER_NAMES:
+		rep_tlv_buf = bearer_get_names();
+		break;
+	case TIPC_CMD_GET_MEDIA_NAMES:
+		rep_tlv_buf = media_get_names();
+		break;
+	case TIPC_CMD_SHOW_PORTS:
+		rep_tlv_buf = port_get_ports();
+		break;
+#if 0
+	case TIPC_CMD_SHOW_PORT_STATS:
+		rep_tlv_buf = port_show_stats(req_tlv_area, req_tlv_space);
+		break;
+	case TIPC_CMD_RESET_PORT_STATS:
+		rep_tlv_buf = cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED);
+		break;
+#endif
+	case TIPC_CMD_SET_LOG_SIZE:
+		rep_tlv_buf = log_resize(req_tlv_area, req_tlv_space);
+		break;
+	case TIPC_CMD_DUMP_LOG:
+		rep_tlv_buf = log_dump();
+		break;
+	case TIPC_CMD_SET_LINK_TOL:
+	case TIPC_CMD_SET_LINK_PRI:
+	case TIPC_CMD_SET_LINK_WINDOW:
+		rep_tlv_buf = link_cmd_config(req_tlv_area, req_tlv_space, cmd);
+		break;
+	case TIPC_CMD_ENABLE_BEARER:
+		rep_tlv_buf = cfg_enable_bearer();
+		break;
+	case TIPC_CMD_DISABLE_BEARER:
+		rep_tlv_buf = cfg_disable_bearer();
+		break;
+	case TIPC_CMD_SET_NODE_ADDR:
+		rep_tlv_buf = cfg_set_own_addr();
+		break;
+	case TIPC_CMD_SET_REMOTE_MNG:
+		rep_tlv_buf = cfg_set_remote_mng();
+		break;
+	case TIPC_CMD_SET_MAX_PORTS:
+		rep_tlv_buf = cfg_set_max_ports();
+		break;
+	case TIPC_CMD_SET_MAX_PUBL:
+		rep_tlv_buf = cfg_set_max_publications();
+		break;
+	case TIPC_CMD_SET_MAX_SUBSCR:
+		rep_tlv_buf = cfg_set_max_subscriptions();
+		break;
+	case TIPC_CMD_SET_MAX_ZONES:
+		rep_tlv_buf = cfg_set_max_zones();
+		break;
+	case TIPC_CMD_SET_MAX_CLUSTERS:
+		rep_tlv_buf = cfg_set_max_clusters();
+		break;
+	case TIPC_CMD_SET_MAX_NODES:
+		rep_tlv_buf = cfg_set_max_nodes();
+		break;
+	case TIPC_CMD_SET_MAX_SLAVES:
+		rep_tlv_buf = cfg_set_max_slaves();
+		break;
+	case TIPC_CMD_SET_NETID:
+		rep_tlv_buf = cfg_set_netid();
+		break;
+	case TIPC_CMD_GET_REMOTE_MNG:
+		rep_tlv_buf = cfg_reply_unsigned(tipc_remote_management);
+		break;
+	case TIPC_CMD_GET_MAX_PORTS:
+		rep_tlv_buf = cfg_reply_unsigned(tipc_max_ports);
+		break;
+	case TIPC_CMD_GET_MAX_PUBL:
+		rep_tlv_buf = cfg_reply_unsigned(tipc_max_publications);
+		break;
+	case TIPC_CMD_GET_MAX_SUBSCR:
+		rep_tlv_buf = cfg_reply_unsigned(tipc_max_subscriptions);
+		break;
+	case TIPC_CMD_GET_MAX_ZONES:
+		rep_tlv_buf = cfg_reply_unsigned(tipc_max_zones);
+		break;
+	case TIPC_CMD_GET_MAX_CLUSTERS:
+		rep_tlv_buf = cfg_reply_unsigned(tipc_max_clusters);
+		break;
+	case TIPC_CMD_GET_MAX_NODES:
+		rep_tlv_buf = cfg_reply_unsigned(tipc_max_nodes);
+		break;
+	case TIPC_CMD_GET_MAX_SLAVES:
+		rep_tlv_buf = cfg_reply_unsigned(tipc_max_slaves);
+		break;
+	case TIPC_CMD_GET_NETID:
+		rep_tlv_buf = cfg_reply_unsigned(tipc_net_id);
+		break;
+	default:
+		rep_tlv_buf = NULL;
+		break;
+	}
+
+	/* Return reply buffer */
+exit:
+	spin_unlock_bh(&config_lock);
+	return rep_tlv_buf;
+}
+
+static void cfg_named_msg_event(void *userdata,
+				u32 port_ref,
+				struct sk_buff **buf,
+				const unchar *msg,
+				u32 size,
+				u32 importance, 
+				struct tipc_portid const *orig,
+				struct tipc_name_seq const *dest)
+{
+	struct tipc_cfg_msg_hdr *req_hdr;
+	struct tipc_cfg_msg_hdr *rep_hdr;
+	struct sk_buff *rep_buf;
+
+	/* Validate configuration message header (ignore invalid message) */
+
+	req_hdr = (struct tipc_cfg_msg_hdr *)msg;
+	if ((size < sizeof(*req_hdr)) ||
+	    (size != TCM_ALIGN(ntohl(req_hdr->tcm_len))) ||
+	    (ntohs(req_hdr->tcm_flags) != TCM_F_REQUEST)) {
+		warn("discarded invalid configuration message\n");
+		return;
+	}
+
+	/* Generate reply for request (if can't, return request) */
+
+	rep_buf = cfg_do_cmd(orig->node,
+			     ntohs(req_hdr->tcm_type), 
+			     msg + sizeof(*req_hdr),
+			     size - sizeof(*req_hdr),
+			     BUF_HEADROOM + MAX_H_SIZE + sizeof(*rep_hdr));
+	if (rep_buf) {
+		skb_push(rep_buf, sizeof(*rep_hdr));
+		rep_hdr = (struct tipc_cfg_msg_hdr *)rep_buf->data;
+		memcpy(rep_hdr, req_hdr, sizeof(*rep_hdr));
+		rep_hdr->tcm_len = htonl(rep_buf->len);
+		rep_hdr->tcm_flags &= htons(~TCM_F_REQUEST);
+	} else {
+		rep_buf = *buf;
+		*buf = NULL;
+	}
+
+	/* NEED TO ADD CODE TO HANDLE FAILED SEND (SUCH AS CONGESTION) */
+	tipc_send_buf2port(port_ref, orig, rep_buf, rep_buf->len);
+}
+
+int cfg_init(void)
+{
+	struct tipc_name_seq seq;
+	int res;
+
+	memset(&mng, 0, sizeof(mng));
+	INIT_LIST_HEAD(&mng.link_subscribers);
+
+	res = tipc_attach(&mng.user_ref, 0, 0);
+	if (res)
+		goto failed;
+
+	res = tipc_createport(mng.user_ref, 0, TIPC_CRITICAL_IMPORTANCE,
+			      NULL, NULL, NULL,
+			      NULL, cfg_named_msg_event, NULL,
+			      NULL, &mng.port_ref);
+	if (res)
+		goto failed;
+
+	seq.type = TIPC_CFG_SRV;
+	seq.lower = seq.upper = tipc_own_addr;
+	res = nametbl_publish_rsv(mng.port_ref, TIPC_ZONE_SCOPE, &seq);
+	if (res)
+		goto failed;
+
+	return 0;
+
+failed:
+	err("Unable to create configuration service\n");
+	tipc_detach(mng.user_ref);
+	mng.user_ref = 0;
+	return res;
+}
+
+void cfg_stop(void)
+{
+	if (mng.user_ref) {
+		tipc_detach(mng.user_ref);
+		mng.user_ref = 0;
+	}
+}
diff --git a/net/tipc/config.h b/net/tipc/config.h
new file mode 100644
index 0000000..7ac0af5
--- /dev/null
+++ b/net/tipc/config.h
@@ -0,0 +1,76 @@
+/*
+ * net/tipc/config.h: Include file for TIPC configuration service code
+ * 
+ * Copyright (c) 2003-2005, Ericsson Research Canada
+ * Copyright (c) 2005, Wind River Systems
+ * Copyright (c) 2005-2006, Ericsson AB
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without 
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this 
+ * list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright notice, 
+ * this list of conditions and the following disclaimer in the documentation 
+ * and/or other materials provided with the distribution.
+ * Neither the names of the copyright holders nor the names of its 
+ * contributors may be used to endorse or promote products derived from this 
+ * software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _TIPC_CONFIG_H
+#define _TIPC_CONFIG_H
+
+/* ---------------------------------------------------------------------- */
+
+#include <linux/tipc.h>
+#include "link.h"
+
+struct sk_buff *cfg_reply_alloc(int payload_size);
+int cfg_append_tlv(struct sk_buff *buf, int tlv_type, 
+		   void *tlv_data, int tlv_data_size);
+struct sk_buff *cfg_reply_unsigned_type(u16 tlv_type, u32 value);
+struct sk_buff *cfg_reply_string_type(u16 tlv_type, char *string);
+
+static inline struct sk_buff *cfg_reply_none(void)
+{
+	return cfg_reply_alloc(0);
+}
+
+static inline struct sk_buff *cfg_reply_unsigned(u32 value)
+{
+	return cfg_reply_unsigned_type(TIPC_TLV_UNSIGNED, value);
+}
+
+static inline struct sk_buff *cfg_reply_error_string(char *string)
+{
+	return cfg_reply_string_type(TIPC_TLV_ERROR_STRING, string);
+}
+
+static inline struct sk_buff *cfg_reply_ultra_string(char *string)
+{
+	return cfg_reply_string_type(TIPC_TLV_ULTRA_STRING, string);
+}
+
+struct sk_buff *cfg_do_cmd(u32 orig_node, u16 cmd, 
+			   const void *req_tlv_area, int req_tlv_space, 
+			   int headroom);
+
+void cfg_link_event(u32 addr, char *name, int up);
+int  cfg_init(void);
+void cfg_stop(void);
+
+#endif
diff --git a/net/tipc/core.c b/net/tipc/core.c
new file mode 100644
index 0000000..17c723f
--- /dev/null
+++ b/net/tipc/core.c
@@ -0,0 +1,282 @@
+/*
+ * net/tipc/core.c: TIPC module code
+ *
+ * Copyright (c) 2003-2005, Ericsson Research Canada
+ * Copyright (c) 2005, Wind River Systems
+ * Copyright (c) 2005-2006, Ericsson AB
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without 
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this 
+ * list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright notice, 
+ * this list of conditions and the following disclaimer in the documentation 
+ * and/or other materials provided with the distribution.
+ * Neither the names of the copyright holders nor the names of its 
+ * contributors may be used to endorse or promote products derived from this 
+ * software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/version.h>
+#include <linux/random.h>
+
+#include "core.h"
+#include "dbg.h"
+#include "ref.h"
+#include "net.h"
+#include "user_reg.h"
+#include "name_table.h"
+#include "subscr.h"
+#include "config.h"
+
+int  eth_media_start(void);
+void eth_media_stop(void);
+int  handler_start(void);
+void handler_stop(void);
+int  socket_init(void);
+void socket_stop(void);
+int  netlink_start(void);
+void netlink_stop(void);
+
+#define MOD_NAME "tipc_start: "
+
+#ifndef CONFIG_TIPC_ZONES
+#define CONFIG_TIPC_ZONES 3
+#endif
+
+#ifndef CONFIG_TIPC_CLUSTERS
+#define CONFIG_TIPC_CLUSTERS 1
+#endif
+
+#ifndef CONFIG_TIPC_NODES
+#define CONFIG_TIPC_NODES 255
+#endif
+
+#ifndef CONFIG_TIPC_SLAVE_NODES
+#define CONFIG_TIPC_SLAVE_NODES 0
+#endif
+
+#ifndef CONFIG_TIPC_PORTS
+#define CONFIG_TIPC_PORTS 8191
+#endif
+
+#ifndef CONFIG_TIPC_LOG
+#define CONFIG_TIPC_LOG 0
+#endif
+
+/* global variables used by multiple sub-systems within TIPC */
+
+int tipc_mode = TIPC_NOT_RUNNING;
+int tipc_random;
+atomic_t tipc_user_count = ATOMIC_INIT(0);
+
+const char tipc_alphabet[] = 
+	"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789_";
+
+/* configurable TIPC parameters */
+
+u32 tipc_own_addr;
+int tipc_max_zones;
+int tipc_max_clusters;
+int tipc_max_nodes;
+int tipc_max_slaves;
+int tipc_max_ports;
+int tipc_max_subscriptions;
+int tipc_max_publications;
+int tipc_net_id;
+int tipc_remote_management;
+
+
+int tipc_get_mode(void)
+{
+	return tipc_mode;
+}
+
+/**
+ * stop_net - shut down TIPC networking sub-systems
+ */
+
+void stop_net(void)
+{
+	eth_media_stop();
+	tipc_stop_net();
+}
+
+/**
+ * start_net - start TIPC networking sub-systems
+ */
+
+int start_net(void)
+{
+	int res;
+
+	if ((res = tipc_start_net()) ||
+	    (res = eth_media_start())) {
+		stop_net();
+	}
+	return res;
+}
+
+/**
+ * stop_core - switch TIPC from SINGLE NODE to NOT RUNNING mode
+ */
+
+void stop_core(void)
+{
+	if (tipc_mode != TIPC_NODE_MODE)
+		return;
+
+	tipc_mode = TIPC_NOT_RUNNING;
+
+	netlink_stop();
+	handler_stop();
+	cfg_stop();
+	subscr_stop();
+	reg_stop();
+	nametbl_stop();
+	ref_table_stop();
+	socket_stop();
+}
+
+/**
+ * start_core - switch TIPC from NOT RUNNING to SINGLE NODE mode
+ */
+
+int start_core(void)
+{
+	int res;
+
+	if (tipc_mode != TIPC_NOT_RUNNING)
+		return -ENOPROTOOPT;
+
+	get_random_bytes(&tipc_random, sizeof(tipc_random));
+	tipc_mode = TIPC_NODE_MODE;
+
+	if ((res = handler_start()) || 
+	    (res = ref_table_init(tipc_max_ports + tipc_max_subscriptions,
+				  tipc_random)) ||
+	    (res = reg_start()) ||
+	    (res = nametbl_init()) ||
+            (res = k_signal((Handler)subscr_start, 0)) ||
+	    (res = k_signal((Handler)cfg_init, 0)) || 
+	    (res = netlink_start()) ||
+	    (res = socket_init())) {
+		stop_core();
+	}
+	return res;
+}
+
+
+static int __init tipc_init(void)
+{
+	int res;
+
+	log_reinit(CONFIG_TIPC_LOG);
+	info("Activated (compiled " __DATE__ " " __TIME__ ")\n");
+
+	tipc_own_addr = 0;
+	tipc_remote_management = 1;
+	tipc_max_publications = 10000;
+	tipc_max_subscriptions = 2000;
+	tipc_max_ports = delimit(CONFIG_TIPC_PORTS, 127, 65536);
+	tipc_max_zones = delimit(CONFIG_TIPC_ZONES, 1, 511);
+	tipc_max_clusters = delimit(CONFIG_TIPC_CLUSTERS, 1, 1);
+	tipc_max_nodes = delimit(CONFIG_TIPC_NODES, 8, 2047);
+	tipc_max_slaves = delimit(CONFIG_TIPC_SLAVE_NODES, 0, 2047);
+	tipc_net_id = 4711;
+
+	if ((res = start_core()))
+		err("Unable to start in single node mode\n");
+	else	
+		info("Started in single node mode\n");
+        return res;
+}
+
+static void __exit tipc_exit(void)
+{
+	stop_net();
+	stop_core();
+	info("Deactivated\n");
+	log_stop();
+}
+
+module_init(tipc_init);
+module_exit(tipc_exit);
+
+MODULE_DESCRIPTION("TIPC: Transparent Inter Process Communication");
+MODULE_LICENSE("Dual BSD/GPL");
+
+/* Native TIPC API for kernel-space applications (see tipc.h) */
+
+EXPORT_SYMBOL(tipc_attach);
+EXPORT_SYMBOL(tipc_detach);
+EXPORT_SYMBOL(tipc_get_addr);
+EXPORT_SYMBOL(tipc_get_mode);
+EXPORT_SYMBOL(tipc_createport);
+EXPORT_SYMBOL(tipc_deleteport);
+EXPORT_SYMBOL(tipc_ownidentity);
+EXPORT_SYMBOL(tipc_portimportance);
+EXPORT_SYMBOL(tipc_set_portimportance);
+EXPORT_SYMBOL(tipc_portunreliable);
+EXPORT_SYMBOL(tipc_set_portunreliable);
+EXPORT_SYMBOL(tipc_portunreturnable);
+EXPORT_SYMBOL(tipc_set_portunreturnable);
+EXPORT_SYMBOL(tipc_publish);
+EXPORT_SYMBOL(tipc_withdraw);
+EXPORT_SYMBOL(tipc_connect2port);
+EXPORT_SYMBOL(tipc_disconnect);
+EXPORT_SYMBOL(tipc_shutdown);
+EXPORT_SYMBOL(tipc_isconnected);
+EXPORT_SYMBOL(tipc_peer);
+EXPORT_SYMBOL(tipc_ref_valid);
+EXPORT_SYMBOL(tipc_send);
+EXPORT_SYMBOL(tipc_send_buf);
+EXPORT_SYMBOL(tipc_send2name);
+EXPORT_SYMBOL(tipc_forward2name);
+EXPORT_SYMBOL(tipc_send_buf2name);
+EXPORT_SYMBOL(tipc_forward_buf2name);
+EXPORT_SYMBOL(tipc_send2port);
+EXPORT_SYMBOL(tipc_forward2port);
+EXPORT_SYMBOL(tipc_send_buf2port);
+EXPORT_SYMBOL(tipc_forward_buf2port);
+EXPORT_SYMBOL(tipc_multicast);
+/* EXPORT_SYMBOL(tipc_multicast_buf); not available yet */
+EXPORT_SYMBOL(tipc_ispublished);
+EXPORT_SYMBOL(tipc_available_nodes);
+
+/* TIPC API for external bearers (see tipc_bearer.h) */
+
+EXPORT_SYMBOL(tipc_block_bearer);
+EXPORT_SYMBOL(tipc_continue); 
+EXPORT_SYMBOL(tipc_disable_bearer);
+EXPORT_SYMBOL(tipc_enable_bearer);
+EXPORT_SYMBOL(tipc_recv_msg);
+EXPORT_SYMBOL(tipc_register_media); 
+
+/* TIPC API for external APIs (see tipc_port.h) */
+
+EXPORT_SYMBOL(tipc_createport_raw);
+EXPORT_SYMBOL(tipc_set_msg_option);
+EXPORT_SYMBOL(tipc_reject_msg);
+EXPORT_SYMBOL(tipc_send_buf_fast);
+EXPORT_SYMBOL(tipc_acknowledge);
+EXPORT_SYMBOL(tipc_get_port);
+EXPORT_SYMBOL(tipc_get_handle);
+
diff --git a/net/tipc/core.h b/net/tipc/core.h
new file mode 100644
index 0000000..d92898d6
--- /dev/null
+++ b/net/tipc/core.h
@@ -0,0 +1,313 @@
+/*
+ * net/tipc/core.h: Include file for TIPC global declarations
+ * 
+ * Copyright (c) 2003-2005, Ericsson Research Canada
+ * Copyright (c) 2005, Wind River Systems
+ * Copyright (c) 2005-2006, Ericsson AB
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without 
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this 
+ * list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright notice, 
+ * this list of conditions and the following disclaimer in the documentation 
+ * and/or other materials provided with the distribution.
+ * Neither the names of the copyright holders nor the names of its 
+ * contributors may be used to endorse or promote products derived from this 
+ * software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _TIPC_CORE_H
+#define _TIPC_CORE_H
+
+#include <net/tipc/tipc.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/mm.h>
+#include <linux/timer.h>
+#include <linux/string.h>
+#include <asm/uaccess.h>
+#include <linux/interrupt.h>
+#include <asm/atomic.h>
+#include <asm/hardirq.h>
+#include <linux/netdevice.h>
+#include <linux/in.h>	
+#include <linux/list.h>
+#include <linux/vmalloc.h>
+
+/*
+ * TIPC debugging code
+ */
+
+#define assert(i)  BUG_ON(!(i))
+
+struct tipc_msg;
+extern struct print_buf *CONS, *LOG;
+extern struct print_buf *TEE(struct print_buf *, struct print_buf *);
+void msg_print(struct print_buf*,struct tipc_msg *,const char*);
+void tipc_printf(struct print_buf *, const char *fmt, ...);
+void tipc_dump(struct print_buf*,const char *fmt, ...);
+
+#ifdef CONFIG_TIPC_DEBUG
+
+/*
+ * TIPC debug support included:
+ * - system messages are printed to TIPC_OUTPUT print buffer
+ * - debug messages are printed to DBG_OUTPUT print buffer
+ */
+
+#define err(fmt, arg...)  tipc_printf(TIPC_OUTPUT, KERN_ERR "TIPC: " fmt, ## arg)
+#define warn(fmt, arg...) tipc_printf(TIPC_OUTPUT, KERN_WARNING "TIPC: " fmt, ## arg)
+#define info(fmt, arg...) tipc_printf(TIPC_OUTPUT, KERN_NOTICE "TIPC: " fmt, ## arg)
+
+#define dbg(fmt, arg...)  do {if (DBG_OUTPUT) tipc_printf(DBG_OUTPUT, fmt, ## arg);} while(0)
+#define msg_dbg(msg, txt) do {if (DBG_OUTPUT) msg_print(DBG_OUTPUT, msg, txt);} while(0)
+#define dump(fmt, arg...) do {if (DBG_OUTPUT) tipc_dump(DBG_OUTPUT, fmt, ##arg);} while(0)
+
+
+/*	
+ * By default, TIPC_OUTPUT is defined to be system console and TIPC log buffer,
+ * while DBG_OUTPUT is the null print buffer.  These defaults can be changed
+ * here, or on a per .c file basis, by redefining these symbols.  The following
+ * print buffer options are available:
+ *
+ * NULL			: Output to null print buffer (i.e. print nowhere)
+ * CONS			: Output to system console
+ * LOG			: Output to TIPC log buffer 
+ * &buf 		: Output to user-defined buffer (struct print_buf *)
+ * TEE(&buf_a,&buf_b)	: Output to two print buffers (eg. TEE(CONS,LOG) )
+ */
+
+#ifndef TIPC_OUTPUT
+#define TIPC_OUTPUT TEE(CONS,LOG)
+#endif
+
+#ifndef DBG_OUTPUT
+#define DBG_OUTPUT NULL
+#endif
+
+#else
+
+#ifndef DBG_OUTPUT
+#define DBG_OUTPUT NULL
+#endif
+
+/*
+ * TIPC debug support not included:
+ * - system messages are printed to system console
+ * - debug messages are not printed
+ */
+
+#define err(fmt, arg...)  printk(KERN_ERR "%s: " fmt "\n" , __FILE__ , ## arg)
+#define info(fmt, arg...) printk(KERN_INFO "%s: " fmt "\n" , __FILE__ , ## arg)
+#define warn(fmt, arg...) printk(KERN_WARNING "%s: " fmt "\n" , __FILE__ , ## arg)
+
+#define dbg(fmt, arg...) do {} while (0)
+#define msg_dbg(msg,txt) do {} while (0)
+#define dump(fmt,arg...) do {} while (0)
+
+#endif			  
+
+
+/* 
+ * TIPC-specific error codes
+ */
+
+#define ELINKCONG EAGAIN	/* link congestion <=> resource unavailable */
+
+/*
+ * Global configuration variables
+ */
+
+extern u32 tipc_own_addr;
+extern int tipc_max_zones;
+extern int tipc_max_clusters;
+extern int tipc_max_nodes;
+extern int tipc_max_slaves;
+extern int tipc_max_ports;
+extern int tipc_max_subscriptions;
+extern int tipc_max_publications;
+extern int tipc_net_id;
+extern int tipc_remote_management;
+
+/*
+ * Other global variables
+ */
+
+extern int tipc_mode;
+extern int tipc_random;
+extern const char tipc_alphabet[];
+extern atomic_t tipc_user_count;
+
+
+/*
+ * Routines available to privileged subsystems
+ */
+
+extern int  start_core(void);
+extern void stop_core(void);
+extern int  start_net(void);
+extern void stop_net(void);
+
+static inline int delimit(int val, int min, int max)
+{
+	if (val > max)
+		return max;
+	if (val < min)
+		return min;
+	return val;
+}
+
+
+/*
+ * TIPC timer and signal code
+ */
+
+typedef void (*Handler) (unsigned long);
+
+u32 k_signal(Handler routine, unsigned long argument);
+
+/**
+ * k_init_timer - initialize a timer
+ * @timer: pointer to timer structure
+ * @routine: pointer to routine to invoke when timer expires
+ * @argument: value to pass to routine when timer expires
+ * 
+ * Timer must be initialized before use (and terminated when no longer needed).
+ */
+
+static inline void k_init_timer(struct timer_list *timer, Handler routine, 
+				unsigned long argument)
+{
+	dbg("initializing timer %p\n", timer);
+	init_timer(timer);
+	timer->function = routine;
+	timer->data = argument;
+}
+
+/**
+ * k_start_timer - start a timer
+ * @timer: pointer to timer structure
+ * @msec: time to delay (in ms)
+ * 
+ * Schedules a previously initialized timer for later execution.
+ * If timer is already running, the new timeout overrides the previous request.
+ * 
+ * To ensure the timer doesn't expire before the specified delay elapses,
+ * the amount of delay is rounded up when converting to the jiffies
+ * then an additional jiffy is added to account for the fact that 
+ * the starting time may be in the middle of the current jiffy.
+ */
+
+static inline void k_start_timer(struct timer_list *timer, unsigned long msec)
+{
+	dbg("starting timer %p for %u\n", timer, msec);
+	mod_timer(timer, jiffies + msecs_to_jiffies(msec) + 1);
+}
+
+/**
+ * k_cancel_timer - cancel a timer
+ * @timer: pointer to timer structure
+ * 
+ * Cancels a previously initialized timer.  
+ * Can be called safely even if the timer is already inactive.
+ * 
+ * WARNING: Must not be called when holding locks required by the timer's
+ *          timeout routine, otherwise deadlock can occur on SMP systems!
+ */
+
+static inline void k_cancel_timer(struct timer_list *timer)
+{
+	dbg("cancelling timer %p\n", timer);
+	del_timer_sync(timer);
+}
+
+/**
+ * k_term_timer - terminate a timer
+ * @timer: pointer to timer structure
+ * 
+ * Prevents further use of a previously initialized timer.
+ * 
+ * WARNING: Caller must ensure timer isn't currently running.
+ * 
+ * (Do not "enhance" this routine to automatically cancel an active timer,
+ * otherwise deadlock can arise when a timeout routine calls k_term_timer.)
+ */
+
+static inline void k_term_timer(struct timer_list *timer)
+{
+	dbg("terminating timer %p\n", timer);
+}
+
+
+/*
+ * TIPC message buffer code
+ *
+ * TIPC message buffer headroom leaves room for 14 byte Ethernet header, 
+ * while ensuring TIPC header is word aligned for quicker access
+ */
+
+#define BUF_HEADROOM 16u 
+
+struct tipc_skb_cb {
+	void *handle;
+};
+
+#define TIPC_SKB_CB(__skb) ((struct tipc_skb_cb *)&((__skb)->cb[0]))
+
+
+static inline struct tipc_msg *buf_msg(struct sk_buff *skb)
+{
+	return (struct tipc_msg *)skb->data;
+}
+
+/**
+ * buf_acquire - creates a TIPC message buffer
+ * @size: message size (including TIPC header)
+ *
+ * Returns a new buffer.  Space is reserved for a data link header.
+ */
+
+static inline struct sk_buff *buf_acquire(u32 size)
+{
+	struct sk_buff *skb;
+	unsigned int buf_size = (BUF_HEADROOM + size + 3) & ~3u;
+
+	skb = alloc_skb(buf_size, GFP_ATOMIC);
+	if (skb) {
+		skb_reserve(skb, BUF_HEADROOM);
+		skb_put(skb, size);
+		skb->next = NULL;
+	}
+	return skb;
+}
+
+/**
+ * buf_discard - frees a TIPC message buffer
+ * @skb: message buffer
+ *
+ * Frees a new buffer.  If passed NULL, just returns.
+ */
+
+static inline void buf_discard(struct sk_buff *skb)
+{
+	if (likely(skb != NULL))
+		kfree_skb(skb);
+}
+
+#endif			
diff --git a/net/tipc/dbg.c b/net/tipc/dbg.c
new file mode 100644
index 0000000..efd6d65
--- /dev/null
+++ b/net/tipc/dbg.c
@@ -0,0 +1,392 @@
+/*
+ * net/tipc/dbg.c: TIPC print buffer routines for debuggign
+ * 
+ * Copyright (c) 2003-2005, Ericsson Research Canada
+ * Copyright (c) 2005, Wind River Systems
+ * Copyright (c) 2005-2006, Ericsson AB
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without 
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this 
+ * list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright notice, 
+ * this list of conditions and the following disclaimer in the documentation 
+ * and/or other materials provided with the distribution.
+ * Neither the names of the copyright holders nor the names of its 
+ * contributors may be used to endorse or promote products derived from this 
+ * software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "core.h"
+#include "config.h"
+#include "dbg.h"
+
+#define MAX_STRING 512
+
+static char print_string[MAX_STRING];
+static spinlock_t print_lock = SPIN_LOCK_UNLOCKED;
+
+static struct print_buf cons_buf = { NULL, 0, NULL, NULL };
+struct print_buf *CONS = &cons_buf;
+
+static struct print_buf log_buf = { NULL, 0, NULL, NULL };
+struct print_buf *LOG = &log_buf;
+
+
+#define FORMAT(PTR,LEN,FMT) \
+{\
+       va_list args;\
+       va_start(args, FMT);\
+       LEN = vsprintf(PTR, FMT, args);\
+       va_end(args);\
+       *(PTR + LEN) = '\0';\
+}
+
+/*
+ * Locking policy when using print buffers.
+ *
+ * 1) Routines of the form printbuf_XXX() rely on the caller to prevent
+ *    simultaneous use of the print buffer(s) being manipulated.
+ * 2) tipc_printf() uses 'print_lock' to prevent simultaneous use of
+ *    'print_string' and to protect its print buffer(s).
+ * 3) TEE() uses 'print_lock' to protect its print buffer(s).
+ * 4) Routines of the form log_XXX() uses 'print_lock' to protect LOG.
+ */
+
+/**
+ * printbuf_init - initialize print buffer to empty
+ */
+
+void printbuf_init(struct print_buf *pb, char *raw, u32 sz)
+{
+	if (!pb || !raw || (sz < (MAX_STRING + 1)))
+		return;
+
+	pb->crs = pb->buf = raw;
+	pb->size = sz;
+	pb->next = 0;
+	pb->buf[0] = 0;
+	pb->buf[sz-1] = ~0;
+}
+
+/**
+ * printbuf_reset - reinitialize print buffer to empty state
+ */
+
+void printbuf_reset(struct print_buf *pb)
+{
+	if (pb && pb->buf)
+		printbuf_init(pb, pb->buf, pb->size);
+}
+
+/**
+ * printbuf_empty - test if print buffer is in empty state
+ */
+
+int printbuf_empty(struct print_buf *pb)
+{
+	return (!pb || !pb->buf || (pb->crs == pb->buf));
+}
+
+/**
+ * printbuf_validate - check for print buffer overflow
+ * 
+ * Verifies that a print buffer has captured all data written to it. 
+ * If data has been lost, linearize buffer and prepend an error message
+ * 
+ * Returns length of print buffer data string (including trailing NULL)
+ */
+
+int printbuf_validate(struct print_buf *pb)
+{
+        char *err = "             *** PRINT BUFFER WRAPPED AROUND ***\n";
+        char *cp_buf;
+        struct print_buf cb;
+
+	if (!pb || !pb->buf)
+		return 0;
+
+	if (pb->buf[pb->size - 1] == '\0') {
+                cp_buf = kmalloc(pb->size, GFP_ATOMIC);
+                if (cp_buf != NULL){
+                        printbuf_init(&cb, cp_buf, pb->size);
+                        printbuf_move(&cb, pb);
+                        printbuf_move(pb, &cb);
+                        kfree(cp_buf);
+                        memcpy(pb->buf, err, strlen(err));
+                } else {
+                        printbuf_reset(pb);
+                        tipc_printf(pb, err);
+                }
+	}
+	return (pb->crs - pb->buf + 1);
+}
+
+/**
+ * printbuf_move - move print buffer contents to another print buffer
+ * 
+ * Current contents of destination print buffer (if any) are discarded.
+ * Source print buffer becomes empty if a successful move occurs.
+ */
+
+void printbuf_move(struct print_buf *pb_to, struct print_buf *pb_from)
+{
+	int len;
+
+	/* Handle the cases where contents can't be moved */
+
+	if (!pb_to || !pb_to->buf)
+		return;
+
+	if (!pb_from || !pb_from->buf) {
+		printbuf_reset(pb_to);
+		return;
+	}
+
+	if (pb_to->size < pb_from->size) {
+		printbuf_reset(pb_to);
+		tipc_printf(pb_to, "*** PRINT BUFFER OVERFLOW ***");
+		return;
+	}
+
+	/* Copy data from char after cursor to end (if used) */
+	len = pb_from->buf + pb_from->size - pb_from->crs - 2;
+	if ((pb_from->buf[pb_from->size-1] == 0) && (len > 0)) {
+		strcpy(pb_to->buf, pb_from->crs + 1);
+		pb_to->crs = pb_to->buf + len;
+	} else
+		pb_to->crs = pb_to->buf;
+
+	/* Copy data from start to cursor (always) */
+	len = pb_from->crs - pb_from->buf;
+	strcpy(pb_to->crs, pb_from->buf);
+	pb_to->crs += len;
+
+	printbuf_reset(pb_from);
+}
+
+/**
+ * tipc_printf - append formatted output to print buffer chain
+ */
+
+void tipc_printf(struct print_buf *pb, const char *fmt, ...)
+{
+	int chars_to_add;
+	int chars_left;
+	char save_char;
+	struct print_buf *pb_next;
+
+	spin_lock_bh(&print_lock);
+	FORMAT(print_string, chars_to_add, fmt);
+	if (chars_to_add >= MAX_STRING)
+		strcpy(print_string, "*** STRING TOO LONG ***");
+
+	while (pb) {
+		if (pb == CONS)
+			printk(print_string);
+		else if (pb->buf) {
+			chars_left = pb->buf + pb->size - pb->crs - 1;
+			if (chars_to_add <= chars_left) {
+				strcpy(pb->crs, print_string);
+				pb->crs += chars_to_add;
+			} else {
+				strcpy(pb->buf, print_string + chars_left);
+                                save_char = print_string[chars_left];
+                                print_string[chars_left] = 0;
+                                strcpy(pb->crs, print_string);
+                                print_string[chars_left] = save_char;
+                                pb->crs = pb->buf + chars_to_add - chars_left;
+                        }
+                }
+		pb_next = pb->next;
+		pb->next = 0;
+		pb = pb_next;
+	}
+	spin_unlock_bh(&print_lock);
+}
+
+/**
+ * TEE - perform next output operation on both print buffers  
+ */
+
+struct print_buf *TEE(struct print_buf *b0, struct print_buf *b1)
+{
+	struct print_buf *pb = b0;
+
+	if (!b0 || (b0 == b1))
+		return b1;
+	if (!b1)
+		return b0;
+
+	spin_lock_bh(&print_lock);
+	while (pb->next) {
+		if ((pb->next == b1) || (pb->next == b0))
+			pb->next = pb->next->next;
+		else
+			pb = pb->next;
+	}
+	pb->next = b1;
+	spin_unlock_bh(&print_lock);
+	return b0;
+}
+
+/**
+ * print_to_console - write string of bytes to console in multiple chunks
+ */
+
+static void print_to_console(char *crs, int len)
+{
+	int rest = len;
+
+	while (rest > 0) {
+		int sz = rest < MAX_STRING ? rest : MAX_STRING;
+		char c = crs[sz];
+
+		crs[sz] = 0;
+		printk((const char *)crs);
+		crs[sz] = c;
+		rest -= sz;
+		crs += sz;
+	}
+}
+
+/**
+ * printbuf_dump - write print buffer contents to console
+ */
+
+static void printbuf_dump(struct print_buf *pb)
+{
+	int len;
+
+	/* Dump print buffer from char after cursor to end (if used) */
+	len = pb->buf + pb->size - pb->crs - 2;
+	if ((pb->buf[pb->size - 1] == 0) && (len > 0))
+		print_to_console(pb->crs + 1, len);
+
+	/* Dump print buffer from start to cursor (always) */
+	len = pb->crs - pb->buf;
+	print_to_console(pb->buf, len);
+}
+
+/**
+ * tipc_dump - dump non-console print buffer(s) to console
+ */
+
+void tipc_dump(struct print_buf *pb, const char *fmt, ...)
+{
+	int len;
+
+	spin_lock_bh(&print_lock);
+	FORMAT(CONS->buf, len, fmt);
+	printk(CONS->buf);
+
+	for (; pb; pb = pb->next) {
+		if (pb == CONS)
+			continue;
+		printk("\n---- Start of dump,%s log ----\n\n", 
+		       (pb == LOG) ? "global" : "local");
+		printbuf_dump(pb);
+		printbuf_reset(pb);
+		printk("\n-------- End of dump --------\n");
+	}
+	spin_unlock_bh(&print_lock);
+}
+
+/**
+ * log_stop - free up TIPC log print buffer 
+ */
+
+void log_stop(void)
+{
+	spin_lock_bh(&print_lock);
+	if (LOG->buf) {
+		kfree(LOG->buf);
+		LOG->buf = NULL;
+	}
+	spin_unlock_bh(&print_lock);
+}
+
+/**
+ * log_reinit - set TIPC log print buffer to specified size
+ */
+
+void log_reinit(int log_size)
+{
+	log_stop();
+
+	if (log_size) {
+		if (log_size <= MAX_STRING)
+			log_size = MAX_STRING + 1;
+		spin_lock_bh(&print_lock);
+		printbuf_init(LOG, kmalloc(log_size, GFP_ATOMIC), log_size);
+		spin_unlock_bh(&print_lock);
+	}
+}
+
+/**
+ * log_resize - reconfigure size of TIPC log buffer
+ */
+
+struct sk_buff *log_resize(const void *req_tlv_area, int req_tlv_space)
+{
+	u32 value;
+
+	if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_UNSIGNED))
+		return cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
+
+	value = *(u32 *)TLV_DATA(req_tlv_area);
+	value = ntohl(value);
+	if (value != delimit(value, 0, 32768))
+		return cfg_reply_error_string(TIPC_CFG_INVALID_VALUE
+					      " (log size must be 0-32768)");
+	log_reinit(value);
+	return cfg_reply_none();
+}
+
+/**
+ * log_dump - capture TIPC log buffer contents in configuration message
+ */
+
+struct sk_buff *log_dump(void)
+{
+	struct sk_buff *reply;
+
+	spin_lock_bh(&print_lock);
+	if (!LOG->buf)
+		reply = cfg_reply_ultra_string("log not activated\n");
+	else if (printbuf_empty(LOG))
+		reply = cfg_reply_ultra_string("log is empty\n");
+	else {
+		struct tlv_desc *rep_tlv;
+		struct print_buf pb;
+		int str_len;
+
+		str_len = min(LOG->size, 32768u);
+		reply = cfg_reply_alloc(TLV_SPACE(str_len));
+		if (reply) {
+			rep_tlv = (struct tlv_desc *)reply->data;
+			printbuf_init(&pb, TLV_DATA(rep_tlv), str_len);
+			printbuf_move(&pb, LOG);
+			str_len = strlen(TLV_DATA(rep_tlv)) + 1;
+			skb_put(reply, TLV_SPACE(str_len));
+			TLV_SET(rep_tlv, TIPC_TLV_ULTRA_STRING, NULL, str_len);
+		}
+	}
+	spin_unlock_bh(&print_lock);
+	return reply;
+}
+
diff --git a/net/tipc/dbg.h b/net/tipc/dbg.h
new file mode 100644
index 0000000..af4217a
--- /dev/null
+++ b/net/tipc/dbg.h
@@ -0,0 +1,56 @@
+/*
+ * net/tipc/dbg.h: Include file for TIPC print buffer routines
+ * 
+ * Copyright (c) 2003-2005, Ericsson Research Canada
+ * Copyright (c) 2005, Wind River Systems
+ * Copyright (c) 2005-2006, Ericsson AB
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without 
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this 
+ * list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright notice, 
+ * this list of conditions and the following disclaimer in the documentation 
+ * and/or other materials provided with the distribution.
+ * Neither the names of the copyright holders nor the names of its 
+ * contributors may be used to endorse or promote products derived from this 
+ * software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _TIPC_DBG_H
+#define _TIPC_DBG_H
+
+struct print_buf {
+	char *buf;
+	u32 size;
+	char *crs;
+	struct print_buf *next;
+};
+
+void printbuf_init(struct print_buf *pb, char *buf, u32 sz);
+void printbuf_reset(struct print_buf *pb);
+int  printbuf_empty(struct print_buf *pb);
+int  printbuf_validate(struct print_buf *pb);
+void printbuf_move(struct print_buf *pb_to, struct print_buf *pb_from);
+
+void log_reinit(int log_size);
+void log_stop(void);
+
+struct sk_buff *log_resize(const void *req_tlv_area, int req_tlv_space);
+struct sk_buff *log_dump(void);
+
+#endif
diff --git a/net/tipc/discover.c b/net/tipc/discover.c
new file mode 100644
index 0000000..c83c1be
--- /dev/null
+++ b/net/tipc/discover.c
@@ -0,0 +1,339 @@
+/*
+ * net/tipc/discover.c
+ * 
+ * Copyright (c) 2003-2005, Ericsson Research Canada
+ * Copyright (c) 2005, Wind River Systems
+ * Copyright (c) 2005-2006, Ericsson AB
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without 
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this 
+ * list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright notice, 
+ * this list of conditions and the following disclaimer in the documentation 
+ * and/or other materials provided with the distribution.
+ * Neither the names of the copyright holders nor the names of its 
+ * contributors may be used to endorse or promote products derived from this 
+ * software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "core.h"
+#include "dbg.h"
+#include "link.h"
+#include "zone.h"
+#include "discover.h"
+#include "port.h"
+#include "name_table.h"
+
+#define TIPC_LINK_REQ_INIT	125	/* min delay during bearer start up */
+#define TIPC_LINK_REQ_FAST	2000	/* normal delay if bearer has no links */
+#define TIPC_LINK_REQ_SLOW	600000	/* normal delay if bearer has links */
+
+#if 0
+#define  GET_NODE_INFO         300
+#define  GET_NODE_INFO_RESULT  301
+#define  FORWARD_LINK_PROBE    302
+#define  LINK_REQUEST_REJECTED 303
+#define  LINK_REQUEST_ACCEPTED 304
+#define  DROP_LINK_REQUEST     305
+#define  CHECK_LINK_COUNT      306
+#endif
+
+/* 
+ * TODO: Most of the inter-cluster setup stuff should be
+ * rewritten, and be made conformant with specification.
+ */ 
+
+
+/**
+ * struct link_req - information about an ongoing link setup request
+ * @bearer: bearer issuing requests
+ * @dest: destination address for request messages
+ * @buf: request message to be (repeatedly) sent
+ * @timer: timer governing period between requests
+ * @timer_intv: current interval between requests (in ms)
+ */
+struct link_req {
+	struct bearer *bearer;
+	struct tipc_media_addr dest;
+	struct sk_buff *buf;
+	struct timer_list timer;
+	unsigned int timer_intv;
+};
+
+
+#if 0
+int disc_create_link(const struct tipc_link_create *argv) 
+{
+	/* 
+	 * Code for inter cluster link setup here 
+	 */
+	return TIPC_OK;
+}
+#endif
+
+/*
+ * disc_lost_link(): A link has lost contact
+ */
+
+void disc_link_event(u32 addr, char *name, int up) 
+{
+	if (in_own_cluster(addr))
+		return;
+	/* 
+	 * Code for inter cluster link setup here 
+	 */
+}
+
+/** 
+ * disc_init_msg - initialize a link setup message
+ * @type: message type (request or response)
+ * @req_links: number of links associated with message
+ * @dest_domain: network domain of node(s) which should respond to message
+ * @b_ptr: ptr to bearer issuing message
+ */
+
+struct sk_buff *disc_init_msg(u32 type,
+			      u32 req_links,
+			      u32 dest_domain,
+			      struct bearer *b_ptr)
+{
+	struct sk_buff *buf = buf_acquire(DSC_H_SIZE);
+	struct tipc_msg *msg;
+
+	if (buf) {
+		msg = buf_msg(buf);
+		msg_init(msg, LINK_CONFIG, type, TIPC_OK, DSC_H_SIZE,
+			 dest_domain);
+		msg_set_non_seq(msg);
+		msg_set_req_links(msg, req_links);
+		msg_set_dest_domain(msg, dest_domain);
+		msg_set_bc_netid(msg, tipc_net_id);
+		msg_set_media_addr(msg, &b_ptr->publ.addr);
+	}
+	return buf;
+}
+
+/**
+ * disc_recv_msg - handle incoming link setup message (request or response)
+ * @buf: buffer containing message
+ */
+
+void disc_recv_msg(struct sk_buff *buf)
+{
+	struct bearer *b_ptr = (struct bearer *)TIPC_SKB_CB(buf)->handle;
+	struct link *link;
+	struct tipc_media_addr media_addr;
+	struct tipc_msg *msg = buf_msg(buf);
+	u32 dest = msg_dest_domain(msg);
+	u32 orig = msg_prevnode(msg);
+	u32 net_id = msg_bc_netid(msg);
+	u32 type = msg_type(msg);
+
+	msg_get_media_addr(msg,&media_addr);
+	msg_dbg(msg, "RECV:");
+	buf_discard(buf);
+
+	if (net_id != tipc_net_id)
+		return;
+	if (!addr_domain_valid(dest))
+		return;
+	if (!addr_node_valid(orig))
+		return;
+	if (orig == tipc_own_addr)
+		return;
+	if (!in_scope(dest, tipc_own_addr))
+		return;
+	if (is_slave(tipc_own_addr) && is_slave(orig))
+		return;
+	if (is_slave(orig) && !in_own_cluster(orig))
+		return;
+	if (in_own_cluster(orig)) {
+		/* Always accept link here */
+		struct sk_buff *rbuf;
+		struct tipc_media_addr *addr;
+		struct node *n_ptr = node_find(orig);
+		int link_up;
+		dbg(" in own cluster\n");
+		if (n_ptr == NULL) {
+			n_ptr = node_create(orig);
+		}
+		if (n_ptr == NULL) {
+			warn("Memory squeeze; Failed to create node\n");
+			return;
+		}
+		spin_lock_bh(&n_ptr->lock);
+		link = n_ptr->links[b_ptr->identity];
+		if (!link) {
+			dbg("creating link\n");
+			link = link_create(b_ptr, orig, &media_addr);
+			if (!link) {
+				spin_unlock_bh(&n_ptr->lock);                
+				return;
+			}
+		}
+		addr = &link->media_addr;
+		if (memcmp(addr, &media_addr, sizeof(*addr))) {
+			char addr_string[16];
+
+			warn("New bearer address for %s\n", 
+			     addr_string_fill(addr_string, orig));
+			memcpy(addr, &media_addr, sizeof(*addr));
+			link_reset(link);     
+		}
+		link_up = link_is_up(link);
+		spin_unlock_bh(&n_ptr->lock);                
+		if ((type == DSC_RESP_MSG) || link_up)
+			return;
+		rbuf = disc_init_msg(DSC_RESP_MSG, 1, orig, b_ptr);
+		if (rbuf != NULL) {
+			msg_dbg(buf_msg(rbuf),"SEND:");
+			b_ptr->media->send_msg(rbuf, &b_ptr->publ, &media_addr);
+			buf_discard(rbuf);
+		}
+	}
+}
+
+/**
+ * disc_stop_link_req - stop sending periodic link setup requests
+ * @req: ptr to link request structure
+ */
+
+void disc_stop_link_req(struct link_req *req) 
+{
+	if (!req)
+		return;
+		
+	k_cancel_timer(&req->timer);
+	k_term_timer(&req->timer);
+	buf_discard(req->buf);
+	kfree(req);
+} 
+
+/**
+ * disc_update_link_req - update frequency of periodic link setup requests
+ * @req: ptr to link request structure
+ */
+
+void disc_update_link_req(struct link_req *req) 
+{
+	if (!req)
+		return;
+
+	if (req->timer_intv == TIPC_LINK_REQ_SLOW) {
+		if (!req->bearer->nodes.count) {
+			req->timer_intv = TIPC_LINK_REQ_FAST;
+			k_start_timer(&req->timer, req->timer_intv);
+		}
+	} else if (req->timer_intv == TIPC_LINK_REQ_FAST) {
+		if (req->bearer->nodes.count) {
+			req->timer_intv = TIPC_LINK_REQ_SLOW;
+			k_start_timer(&req->timer, req->timer_intv);
+		}
+	} else {
+		/* leave timer "as is" if haven't yet reached a "normal" rate */
+	}
+} 
+
+/**
+ * disc_timeout - send a periodic link setup request
+ * @req: ptr to link request structure
+ * 
+ * Called whenever a link setup request timer associated with a bearer expires.
+ */
+
+static void disc_timeout(struct link_req *req) 
+{
+	struct tipc_msg *msg = buf_msg(req->buf);
+
+	spin_lock_bh(&req->bearer->publ.lock);
+
+#if 0
+	/* CURRENTLY DON'T SUPPORT INTER-ZONE LINKS */
+	u32 dest_domain = msg_dest_domain(msg);
+	int stop = 0;
+	if (!in_scope(dest_domain, tipc_own_addr)) {
+		struct _zone *z_ptr = zone_find(dest_domain);
+
+		if (z_ptr && (z_ptr->links >= msg_req_links(msg)))
+			stop = 1;
+		if (req->timer_intv >= 32000)
+			stop = 1;
+	}
+	if (stop) {
+		k_cancel_timer(&req->timer);
+		buf_discard(req->buf);
+		kfree(req);
+		spin_unlock_bh(&req->bearer->publ.lock);
+		return;
+	}
+#endif
+
+	msg_dbg(msg,"SEND:");
+	req->bearer->media->send_msg(req->buf, &req->bearer->publ, &req->dest);
+
+	if ((req->timer_intv == TIPC_LINK_REQ_SLOW) ||
+	    (req->timer_intv == TIPC_LINK_REQ_FAST)) {
+		/* leave timer interval "as is" if already at a "normal" rate */
+	} else {
+		req->timer_intv *= 2;
+		if (req->timer_intv > TIPC_LINK_REQ_FAST)
+			req->timer_intv = TIPC_LINK_REQ_FAST;
+		if ((req->timer_intv == TIPC_LINK_REQ_FAST) && 
+		    (req->bearer->nodes.count))
+			req->timer_intv = TIPC_LINK_REQ_SLOW;
+	}
+	k_start_timer(&req->timer, req->timer_intv);
+
+	spin_unlock_bh(&req->bearer->publ.lock);
+}
+
+/**
+ * disc_init_link_req - start sending periodic link setup requests
+ * @b_ptr: ptr to bearer issuing requests
+ * @dest: destination address for request messages
+ * @dest_domain: network domain of node(s) which should respond to message
+ * @req_links: max number of desired links
+ * 
+ * Returns pointer to link request structure, or NULL if unable to create.
+ */
+
+struct link_req *disc_init_link_req(struct bearer *b_ptr, 
+				    const struct tipc_media_addr *dest,
+				    u32 dest_domain,
+				    u32 req_links) 
+{
+	struct link_req *req;
+
+	req = (struct link_req *)kmalloc(sizeof(*req), GFP_ATOMIC);
+	if (!req)
+		return NULL;
+
+	req->buf = disc_init_msg(DSC_REQ_MSG, req_links, dest_domain, b_ptr);
+	if (!req->buf) {
+		kfree(req);
+		return NULL;
+	}
+
+	memcpy(&req->dest, dest, sizeof(*dest));
+	req->bearer = b_ptr;
+	req->timer_intv = TIPC_LINK_REQ_INIT;
+	k_init_timer(&req->timer, (Handler)disc_timeout, (unsigned long)req);
+	k_start_timer(&req->timer, req->timer_intv);
+	return req;
+} 
+
diff --git a/net/tipc/discover.h b/net/tipc/discover.h
new file mode 100644
index 0000000..90c1de9
--- /dev/null
+++ b/net/tipc/discover.h
@@ -0,0 +1,55 @@
+/*
+ * net/tipc/discover.h
+ *
+ * Copyright (c) 2003-2005, Ericsson Research Canada
+ * Copyright (c) 2005, Wind River Systems
+ * Copyright (c) 2005-2006, Ericsson AB
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without 
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this 
+ * list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright notice, 
+ * this list of conditions and the following disclaimer in the documentation 
+ * and/or other materials provided with the distribution.
+ * Neither the names of the copyright holders nor the names of its 
+ * contributors may be used to endorse or promote products derived from this 
+ * software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _TIPC_DISCOVER_H
+#define _TIPC_DISCOVER_H
+
+#include <linux/tipc.h>
+
+struct link_req;
+
+struct link_req *disc_init_link_req(struct bearer *b_ptr, 
+			            const struct tipc_media_addr *dest,
+			            u32 dest_domain,
+			            u32 req_links);
+void disc_update_link_req(struct link_req *req);
+void disc_stop_link_req(struct link_req *req);
+
+void disc_recv_msg(struct sk_buff *buf);
+
+void disc_link_event(u32 addr, char *name, int up);
+#if 0
+int  disc_create_link(const struct tipc_link_create *argv);
+#endif
+
+#endif
diff --git a/net/tipc/eth_media.c b/net/tipc/eth_media.c
new file mode 100644
index 0000000..b634d7a
--- /dev/null
+++ b/net/tipc/eth_media.c
@@ -0,0 +1,296 @@
+/*
+ * net/tipc/eth_media.c: Ethernet bearer support for TIPC
+ * 
+ * Copyright (c) 2003-2005, Ericsson Research Canada
+ * Copyright (c) 2005, Wind River Systems
+ * Copyright (c) 2005-2006, Ericsson AB
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without 
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this 
+ * list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright notice, 
+ * this list of conditions and the following disclaimer in the documentation 
+ * and/or other materials provided with the distribution.
+ * Neither the names of the copyright holders nor the names of its 
+ * contributors may be used to endorse or promote products derived from this 
+ * software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <net/tipc/tipc.h>
+#include <net/tipc/tipc_bearer.h>
+#include <net/tipc/tipc_msg.h>
+#include <linux/netdevice.h>
+#include <linux/version.h>
+
+#define MAX_ETH_BEARERS		2
+#define TIPC_PROTOCOL		0x88ca
+#define ETH_LINK_PRIORITY	10
+#define ETH_LINK_TOLERANCE	TIPC_DEF_LINK_TOL
+
+
+/**
+ * struct eth_bearer - Ethernet bearer data structure
+ * @bearer: ptr to associated "generic" bearer structure
+ * @dev: ptr to associated Ethernet network device
+ * @tipc_packet_type: used in binding TIPC to Ethernet driver
+ */
+ 
+struct eth_bearer {
+	struct tipc_bearer *bearer;
+	struct net_device *dev;
+	struct packet_type tipc_packet_type;
+};
+
+static struct eth_bearer eth_bearers[MAX_ETH_BEARERS];
+static int eth_started = 0;
+static struct notifier_block notifier;
+
+/**
+ * send_msg - send a TIPC message out over an Ethernet interface 
+ */
+
+static int send_msg(struct sk_buff *buf, struct tipc_bearer *tb_ptr, 
+		    struct tipc_media_addr *dest)
+{
+	struct sk_buff *clone;
+	struct net_device *dev;
+
+	clone = skb_clone(buf, GFP_ATOMIC);
+	if (clone) {
+		clone->nh.raw = clone->data;
+		dev = ((struct eth_bearer *)(tb_ptr->usr_handle))->dev;
+		clone->dev = dev;
+		dev->hard_header(clone, dev, TIPC_PROTOCOL, 
+				 &dest->dev_addr.eth_addr,
+				 dev->dev_addr, clone->len);
+		dev_queue_xmit(clone);
+	}
+	return TIPC_OK;
+}
+
+/**
+ * recv_msg - handle incoming TIPC message from an Ethernet interface
+ * 
+ * Routine truncates any Ethernet padding/CRC appended to the message,
+ * and ensures message size matches actual length
+ */
+
+static int recv_msg(struct sk_buff *buf, struct net_device *dev, 
+		    struct packet_type *pt, struct net_device *orig_dev)
+{
+	struct eth_bearer *eb_ptr = (struct eth_bearer *)pt->af_packet_priv;
+	u32 size;
+
+	if (likely(eb_ptr->bearer)) {
+		size = msg_size((struct tipc_msg *)buf->data);
+		skb_trim(buf, size);
+		if (likely(buf->len == size)) {
+			buf->next = NULL;
+			tipc_recv_msg(buf, eb_ptr->bearer);
+		} else {
+			kfree_skb(buf);
+		}
+	} else {
+		kfree_skb(buf);
+	}
+	return TIPC_OK;
+}
+
+/**
+ * enable_bearer - attach TIPC bearer to an Ethernet interface 
+ */
+
+static int enable_bearer(struct tipc_bearer *tb_ptr)
+{
+	struct net_device *dev = dev_base;
+	struct eth_bearer *eb_ptr = &eth_bearers[0];
+	struct eth_bearer *stop = &eth_bearers[MAX_ETH_BEARERS];
+	char *driver_name = strchr((const char *)tb_ptr->name, ':') + 1;
+
+	/* Find device with specified name */
+
+	while (dev && dev->name &&
+	       (memcmp(dev->name, driver_name, strlen(dev->name)))) {
+		dev = dev->next;
+	}
+	if (!dev)
+		return -ENODEV;
+
+	/* Find Ethernet bearer for device (or create one) */
+
+	for (;(eb_ptr != stop) && eb_ptr->dev && (eb_ptr->dev != dev); eb_ptr++);
+	if (eb_ptr == stop)
+		return -EDQUOT;
+	if (!eb_ptr->dev) {
+		eb_ptr->dev = dev;
+		eb_ptr->tipc_packet_type.type = __constant_htons(TIPC_PROTOCOL);
+		eb_ptr->tipc_packet_type.dev = dev;
+		eb_ptr->tipc_packet_type.func = recv_msg;
+		eb_ptr->tipc_packet_type.af_packet_priv = eb_ptr;
+		INIT_LIST_HEAD(&(eb_ptr->tipc_packet_type.list));
+		dev_hold(dev);
+		dev_add_pack(&eb_ptr->tipc_packet_type);
+	}
+
+	/* Associate TIPC bearer with Ethernet bearer */
+
+	eb_ptr->bearer = tb_ptr;
+	tb_ptr->usr_handle = (void *)eb_ptr;
+	tb_ptr->mtu = dev->mtu;
+	tb_ptr->blocked = 0; 
+	tb_ptr->addr.type = htonl(TIPC_MEDIA_TYPE_ETH);
+	memcpy(&tb_ptr->addr.dev_addr, &dev->dev_addr, ETH_ALEN);
+	return 0;
+}
+
+/**
+ * disable_bearer - detach TIPC bearer from an Ethernet interface 
+ *
+ * We really should do dev_remove_pack() here, but this function can not be
+ * called at tasklet level. => Use eth_bearer->bearer as a flag to throw away
+ * incoming buffers, & postpone dev_remove_pack() to eth_media_stop() on exit.
+ */
+
+static void disable_bearer(struct tipc_bearer *tb_ptr)
+{
+	((struct eth_bearer *)tb_ptr->usr_handle)->bearer = 0;
+}
+
+/**
+ * recv_notification - handle device updates from OS
+ *
+ * Change the state of the Ethernet bearer (if any) associated with the 
+ * specified device.
+ */
+
+static int recv_notification(struct notifier_block *nb, unsigned long evt, 
+			     void *dv)
+{
+	struct net_device *dev = (struct net_device *)dv;
+	struct eth_bearer *eb_ptr = &eth_bearers[0];
+	struct eth_bearer *stop = &eth_bearers[MAX_ETH_BEARERS];
+
+	while ((eb_ptr->dev != dev)) {
+		if (++eb_ptr == stop)
+			return NOTIFY_DONE;	/* couldn't find device */
+	}
+	if (!eb_ptr->bearer)
+		return NOTIFY_DONE;		/* bearer had been disabled */
+
+        eb_ptr->bearer->mtu = dev->mtu;
+
+	switch (evt) {
+	case NETDEV_CHANGE:
+		if (netif_carrier_ok(dev))
+			tipc_continue(eb_ptr->bearer);
+		else
+			tipc_block_bearer(eb_ptr->bearer->name);
+		break;
+	case NETDEV_UP:
+		tipc_continue(eb_ptr->bearer);
+		break;
+	case NETDEV_DOWN:
+		tipc_block_bearer(eb_ptr->bearer->name);
+		break;
+	case NETDEV_CHANGEMTU:
+        case NETDEV_CHANGEADDR:
+		tipc_block_bearer(eb_ptr->bearer->name);
+                tipc_continue(eb_ptr->bearer);
+		break;
+	case NETDEV_UNREGISTER:
+        case NETDEV_CHANGENAME:
+		tipc_disable_bearer(eb_ptr->bearer->name);
+		break;
+	}
+	return NOTIFY_OK;
+}
+
+/**
+ * eth_addr2str - convert Ethernet address to string
+ */
+
+static char *eth_addr2str(struct tipc_media_addr *a, char *str_buf, int str_size)
+{                       
+	unchar *addr = (unchar *)&a->dev_addr;
+
+	if (str_size < 18)
+		*str_buf = '\0';
+	else
+		sprintf(str_buf, "%02x:%02x:%02x:%02x:%02x:%02x",
+			addr[0], addr[1], addr[2], addr[3], addr[4], addr[5]);
+	return str_buf;
+}
+
+/**
+ * eth_media_start - activate Ethernet bearer support
+ *
+ * Register Ethernet media type with TIPC bearer code.  Also register
+ * with OS for notifications about device state changes.
+ */
+
+int eth_media_start(void)
+{                       
+	struct tipc_media_addr bcast_addr;
+	int res;
+
+	if (eth_started)
+		return -EINVAL;
+
+	memset(&bcast_addr, 0xff, sizeof(bcast_addr));
+	memset(eth_bearers, 0, sizeof(eth_bearers));
+
+	res = tipc_register_media(TIPC_MEDIA_TYPE_ETH, "eth",
+				  enable_bearer, disable_bearer, send_msg, 
+				  eth_addr2str, &bcast_addr, ETH_LINK_PRIORITY, 
+				  ETH_LINK_TOLERANCE, TIPC_DEF_LINK_WIN);
+	if (res)
+		return res;
+
+	notifier.notifier_call = &recv_notification;
+	notifier.priority = 0;
+	res = register_netdevice_notifier(&notifier);
+	if (!res)
+		eth_started = 1;
+	return res;
+}
+
+/**
+ * eth_media_stop - deactivate Ethernet bearer support
+ */
+
+void eth_media_stop(void)
+{
+	int i;
+
+	if (!eth_started)
+		return;
+
+	unregister_netdevice_notifier(&notifier);
+	for (i = 0; i < MAX_ETH_BEARERS ; i++) {
+		if (eth_bearers[i].bearer) {
+			eth_bearers[i].bearer->blocked = 1;
+			eth_bearers[i].bearer = 0;
+		}
+		if (eth_bearers[i].dev) {
+			dev_remove_pack(&eth_bearers[i].tipc_packet_type);
+			dev_put(eth_bearers[i].dev);
+		}
+	}
+	memset(&eth_bearers, 0, sizeof(eth_bearers));
+	eth_started = 0;
+}
diff --git a/net/tipc/handler.c b/net/tipc/handler.c
new file mode 100644
index 0000000..c8fbb20
--- /dev/null
+++ b/net/tipc/handler.c
@@ -0,0 +1,129 @@
+/*
+ * net/tipc/handler.c: TIPC signal handling
+ * 
+ * Copyright (c) 2003-2005, Ericsson Research Canada
+ * Copyright (c) 2005, Wind River Systems
+ * Copyright (c) 2005-2006, Ericsson AB
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without 
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this 
+ * list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright notice, 
+ * this list of conditions and the following disclaimer in the documentation 
+ * and/or other materials provided with the distribution.
+ * Neither the names of the copyright holders nor the names of its 
+ * contributors may be used to endorse or promote products derived from this 
+ * software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "core.h"
+
+struct queue_item {
+	struct list_head next_signal;
+	void (*handler) (unsigned long);
+	unsigned long data;
+};
+
+static kmem_cache_t *tipc_queue_item_cache;
+static struct list_head signal_queue_head;
+static spinlock_t qitem_lock = SPIN_LOCK_UNLOCKED;
+static int handler_enabled = 0;
+
+static void process_signal_queue(unsigned long dummy);
+
+static DECLARE_TASKLET_DISABLED(tipc_tasklet, process_signal_queue, 0);
+
+
+unsigned int k_signal(Handler routine, unsigned long argument)
+{
+	struct queue_item *item;
+
+	if (!handler_enabled) {
+		err("Signal request ignored by handler\n");
+		return -ENOPROTOOPT;
+	}
+
+	spin_lock_bh(&qitem_lock);
+	item = kmem_cache_alloc(tipc_queue_item_cache, GFP_ATOMIC);
+	if (!item) {
+		err("Signal queue out of memory\n");
+		spin_unlock_bh(&qitem_lock);
+		return -ENOMEM;
+	}
+	item->handler = routine;
+	item->data = argument;
+	list_add_tail(&item->next_signal, &signal_queue_head);
+	spin_unlock_bh(&qitem_lock);
+	tasklet_schedule(&tipc_tasklet);
+	return 0;
+}
+
+static void process_signal_queue(unsigned long dummy)
+{
+	struct queue_item *__volatile__ item;
+	struct list_head *l, *n;
+
+	spin_lock_bh(&qitem_lock);
+	list_for_each_safe(l, n, &signal_queue_head) {
+		item = list_entry(l, struct queue_item, next_signal);
+		list_del(&item->next_signal);
+		spin_unlock_bh(&qitem_lock);
+		item->handler(item->data);
+		spin_lock_bh(&qitem_lock);
+		kmem_cache_free(tipc_queue_item_cache, item);
+	}
+	spin_unlock_bh(&qitem_lock);
+}
+
+int handler_start(void)
+{
+	tipc_queue_item_cache = 
+		kmem_cache_create("tipc_queue_items", sizeof(struct queue_item),
+				  0, SLAB_HWCACHE_ALIGN, NULL, NULL);
+	if (!tipc_queue_item_cache)
+		return -ENOMEM;
+
+	INIT_LIST_HEAD(&signal_queue_head);
+	tasklet_enable(&tipc_tasklet);
+	handler_enabled = 1;
+	return 0;
+}
+
+void handler_stop(void)
+{
+	struct list_head *l, *n;
+	struct queue_item *item; 
+
+	if (!handler_enabled)
+		return;
+
+	handler_enabled = 0;
+	tasklet_disable(&tipc_tasklet);
+	tasklet_kill(&tipc_tasklet);
+
+	spin_lock_bh(&qitem_lock);
+	list_for_each_safe(l, n, &signal_queue_head) {
+		item = list_entry(l, struct queue_item, next_signal);
+		list_del(&item->next_signal);
+		kmem_cache_free(tipc_queue_item_cache, item);
+	}
+	spin_unlock_bh(&qitem_lock);
+
+	kmem_cache_destroy(tipc_queue_item_cache);
+}
+
diff --git a/net/tipc/link.c b/net/tipc/link.c
new file mode 100644
index 0000000..92acb80
--- /dev/null
+++ b/net/tipc/link.c
@@ -0,0 +1,3164 @@
+/*
+ * net/tipc/link.c: TIPC link code
+ * 
+ * Copyright (c) 2003-2005, Ericsson Research Canada
+ * Copyright (c) 2004-2005, Wind River Systems
+ * Copyright (c) 2005-2006, Ericsson AB
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without 
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this 
+ * list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright notice, 
+ * this list of conditions and the following disclaimer in the documentation 
+ * and/or other materials provided with the distribution.
+ * Neither the names of the copyright holders nor the names of its 
+ * contributors may be used to endorse or promote products derived from this 
+ * software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "core.h"
+#include "dbg.h"
+#include "link.h"
+#include "net.h"
+#include "node.h"
+#include "port.h"
+#include "addr.h"
+#include "node_subscr.h"
+#include "name_distr.h"
+#include "bearer.h"
+#include "name_table.h"
+#include "discover.h"
+#include "config.h"
+#include "bcast.h"
+
+
+/* 
+ * Limit for deferred reception queue: 
+ */
+
+#define DEF_QUEUE_LIMIT 256u
+
+/* 
+ * Link state events: 
+ */
+
+#define  STARTING_EVT    856384768	/* link processing trigger */
+#define  TRAFFIC_MSG_EVT 560815u	/* rx'd ??? */
+#define  TIMEOUT_EVT     560817u	/* link timer expired */
+
+/*   
+ * The following two 'message types' is really just implementation 
+ * data conveniently stored in the message header. 
+ * They must not be considered part of the protocol
+ */
+#define OPEN_MSG   0
+#define CLOSED_MSG 1
+
+/* 
+ * State value stored in 'exp_msg_count'
+ */
+
+#define START_CHANGEOVER 100000u
+
+/**
+ * struct link_name - deconstructed link name
+ * @addr_local: network address of node at this end
+ * @if_local: name of interface at this end
+ * @addr_peer: network address of node at far end
+ * @if_peer: name of interface at far end
+ */
+
+struct link_name {
+	u32 addr_local;
+	char if_local[TIPC_MAX_IF_NAME];
+	u32 addr_peer;
+	char if_peer[TIPC_MAX_IF_NAME];
+};
+
+#if 0
+
+/* LINK EVENT CODE IS NOT SUPPORTED AT PRESENT */
+
+/** 
+ * struct link_event - link up/down event notification
+ */
+
+struct link_event {
+	u32 addr;
+	int up;
+	void (*fcn)(u32, char *, int);
+	char name[TIPC_MAX_LINK_NAME];
+};
+
+#endif
+
+static void link_handle_out_of_seq_msg(struct link *l_ptr,
+				       struct sk_buff *buf);
+static void link_recv_proto_msg(struct link *l_ptr, struct sk_buff *buf);
+static int  link_recv_changeover_msg(struct link **l_ptr, struct sk_buff **buf);
+static void link_set_supervision_props(struct link *l_ptr, u32 tolerance);
+static int  link_send_sections_long(struct port *sender,
+				    struct iovec const *msg_sect,
+				    u32 num_sect, u32 destnode);
+static void link_check_defragm_bufs(struct link *l_ptr);
+static void link_state_event(struct link *l_ptr, u32 event);
+static void link_reset_statistics(struct link *l_ptr);
+static void link_print(struct link *l_ptr, struct print_buf *buf, 
+		       const char *str);
+
+/*
+ * Debugging code used by link routines only
+ *
+ * When debugging link problems on a system that has multiple links,
+ * the standard TIPC debugging routines may not be useful since they
+ * allow the output from multiple links to be intermixed.  For this reason
+ * routines of the form "dbg_link_XXX()" have been created that will capture
+ * debug info into a link's personal print buffer, which can then be dumped
+ * into the TIPC system log (LOG) upon request.
+ *
+ * To enable per-link debugging, use LINK_LOG_BUF_SIZE to specify the size
+ * of the print buffer used by each link.  If LINK_LOG_BUF_SIZE is set to 0,
+ * the dbg_link_XXX() routines simply send their output to the standard 
+ * debug print buffer (DBG_OUTPUT), if it has been defined; this can be useful
+ * when there is only a single link in the system being debugged.
+ *
+ * Notes:
+ * - When enabled, LINK_LOG_BUF_SIZE should be set to at least 1000 (bytes)
+ * - "l_ptr" must be valid when using dbg_link_XXX() macros  
+ */
+
+#define LINK_LOG_BUF_SIZE 0
+
+#define dbg_link(fmt, arg...)  do {if (LINK_LOG_BUF_SIZE) tipc_printf(&l_ptr->print_buf, fmt, ## arg); } while(0)
+#define dbg_link_msg(msg, txt) do {if (LINK_LOG_BUF_SIZE) msg_print(&l_ptr->print_buf, msg, txt); } while(0)
+#define dbg_link_state(txt) do {if (LINK_LOG_BUF_SIZE) link_print(l_ptr, &l_ptr->print_buf, txt); } while(0)
+#define dbg_link_dump() do { \
+	if (LINK_LOG_BUF_SIZE) { \
+		tipc_printf(LOG, "\n\nDumping link <%s>:\n", l_ptr->name); \
+		printbuf_move(LOG, &l_ptr->print_buf); \
+	} \
+} while (0)
+
+static inline void dbg_print_link(struct link *l_ptr, const char *str)
+{
+	if (DBG_OUTPUT)
+		link_print(l_ptr, DBG_OUTPUT, str);
+}
+
+static inline void dbg_print_buf_chain(struct sk_buff *root_buf)
+{
+	if (DBG_OUTPUT) {
+		struct sk_buff *buf = root_buf;
+
+		while (buf) {
+			msg_dbg(buf_msg(buf), "In chain: ");
+			buf = buf->next;
+		}
+	}
+}
+
+/*
+ *  Simple inlined link routines
+ */
+
+static inline unsigned int align(unsigned int i)
+{
+	return (i + 3) & ~3u;
+}
+
+static inline int link_working_working(struct link *l_ptr)
+{
+	return (l_ptr->state == WORKING_WORKING);
+}
+
+static inline int link_working_unknown(struct link *l_ptr)
+{
+	return (l_ptr->state == WORKING_UNKNOWN);
+}
+
+static inline int link_reset_unknown(struct link *l_ptr)
+{
+	return (l_ptr->state == RESET_UNKNOWN);
+}
+
+static inline int link_reset_reset(struct link *l_ptr)
+{
+	return (l_ptr->state == RESET_RESET);
+}
+
+static inline int link_blocked(struct link *l_ptr)
+{
+	return (l_ptr->exp_msg_count || l_ptr->blocked);
+}
+
+static inline int link_congested(struct link *l_ptr)
+{
+	return (l_ptr->out_queue_size >= l_ptr->queue_limit[0]);
+}
+
+static inline u32 link_max_pkt(struct link *l_ptr)
+{
+	return l_ptr->max_pkt;
+}
+
+static inline void link_init_max_pkt(struct link *l_ptr)
+{
+	u32 max_pkt;
+	
+	max_pkt = (l_ptr->b_ptr->publ.mtu & ~3);
+	if (max_pkt > MAX_MSG_SIZE)
+		max_pkt = MAX_MSG_SIZE;
+
+        l_ptr->max_pkt_target = max_pkt;
+	if (l_ptr->max_pkt_target < MAX_PKT_DEFAULT)
+		l_ptr->max_pkt = l_ptr->max_pkt_target;
+	else 
+		l_ptr->max_pkt = MAX_PKT_DEFAULT;
+
+        l_ptr->max_pkt_probes = 0;
+}
+
+static inline u32 link_next_sent(struct link *l_ptr)
+{
+	if (l_ptr->next_out)
+		return msg_seqno(buf_msg(l_ptr->next_out));
+	return mod(l_ptr->next_out_no);
+}
+
+static inline u32 link_last_sent(struct link *l_ptr)
+{
+	return mod(link_next_sent(l_ptr) - 1);
+}
+
+/*
+ *  Simple non-inlined link routines (i.e. referenced outside this file)
+ */
+
+int link_is_up(struct link *l_ptr)
+{
+	if (!l_ptr)
+		return 0;
+	return (link_working_working(l_ptr) || link_working_unknown(l_ptr));
+}
+
+int link_is_active(struct link *l_ptr)
+{
+	return ((l_ptr->owner->active_links[0] == l_ptr) ||
+		(l_ptr->owner->active_links[1] == l_ptr));
+}
+
+/**
+ * link_name_validate - validate & (optionally) deconstruct link name
+ * @name - ptr to link name string
+ * @name_parts - ptr to area for link name components (or NULL if not needed)
+ * 
+ * Returns 1 if link name is valid, otherwise 0.
+ */
+
+static int link_name_validate(const char *name, struct link_name *name_parts)
+{
+	char name_copy[TIPC_MAX_LINK_NAME];
+	char *addr_local;
+	char *if_local;
+	char *addr_peer;
+	char *if_peer;
+	char dummy;
+	u32 z_local, c_local, n_local;
+	u32 z_peer, c_peer, n_peer;
+	u32 if_local_len;
+	u32 if_peer_len;
+
+	/* copy link name & ensure length is OK */
+
+	name_copy[TIPC_MAX_LINK_NAME - 1] = 0;
+	/* need above in case non-Posix strncpy() doesn't pad with nulls */
+	strncpy(name_copy, name, TIPC_MAX_LINK_NAME);
+	if (name_copy[TIPC_MAX_LINK_NAME - 1] != 0)
+		return 0;
+
+	/* ensure all component parts of link name are present */
+
+	addr_local = name_copy;
+	if ((if_local = strchr(addr_local, ':')) == NULL)
+		return 0;
+	*(if_local++) = 0;
+	if ((addr_peer = strchr(if_local, '-')) == NULL)
+		return 0;
+	*(addr_peer++) = 0;
+	if_local_len = addr_peer - if_local;
+	if ((if_peer = strchr(addr_peer, ':')) == NULL)
+		return 0;
+	*(if_peer++) = 0;
+	if_peer_len = strlen(if_peer) + 1;
+
+	/* validate component parts of link name */
+
+	if ((sscanf(addr_local, "%u.%u.%u%c",
+		    &z_local, &c_local, &n_local, &dummy) != 3) ||
+	    (sscanf(addr_peer, "%u.%u.%u%c",
+		    &z_peer, &c_peer, &n_peer, &dummy) != 3) ||
+	    (z_local > 255) || (c_local > 4095) || (n_local > 4095) ||
+	    (z_peer  > 255) || (c_peer  > 4095) || (n_peer  > 4095) ||
+	    (if_local_len <= 1) || (if_local_len > TIPC_MAX_IF_NAME) || 
+	    (if_peer_len  <= 1) || (if_peer_len  > TIPC_MAX_IF_NAME) || 
+	    (strspn(if_local, tipc_alphabet) != (if_local_len - 1)) ||
+	    (strspn(if_peer, tipc_alphabet) != (if_peer_len - 1)))
+		return 0;
+
+	/* return link name components, if necessary */
+
+	if (name_parts) {
+		name_parts->addr_local = tipc_addr(z_local, c_local, n_local);
+		strcpy(name_parts->if_local, if_local);
+		name_parts->addr_peer = tipc_addr(z_peer, c_peer, n_peer);
+		strcpy(name_parts->if_peer, if_peer);
+	}
+	return 1;
+}
+
+/**
+ * link_timeout - handle expiration of link timer
+ * @l_ptr: pointer to link
+ * 
+ * This routine must not grab "net_lock" to avoid a potential deadlock conflict
+ * with link_delete().  (There is no risk that the node will be deleted by
+ * another thread because link_delete() always cancels the link timer before
+ * node_delete() is called.)
+ */
+
+static void link_timeout(struct link *l_ptr)
+{
+	node_lock(l_ptr->owner);
+
+	/* update counters used in statistical profiling of send traffic */
+
+	l_ptr->stats.accu_queue_sz += l_ptr->out_queue_size;
+	l_ptr->stats.queue_sz_counts++;
+
+	if (l_ptr->out_queue_size > l_ptr->stats.max_queue_sz)
+		l_ptr->stats.max_queue_sz = l_ptr->out_queue_size;
+
+	if (l_ptr->first_out) {
+		struct tipc_msg *msg = buf_msg(l_ptr->first_out);
+		u32 length = msg_size(msg);
+
+		if ((msg_user(msg) == MSG_FRAGMENTER)
+		    && (msg_type(msg) == FIRST_FRAGMENT)) {
+			length = msg_size(msg_get_wrapped(msg));
+		}
+		if (length) {
+			l_ptr->stats.msg_lengths_total += length;
+			l_ptr->stats.msg_length_counts++;
+			if (length <= 64)
+				l_ptr->stats.msg_length_profile[0]++;
+			else if (length <= 256)
+				l_ptr->stats.msg_length_profile[1]++;
+			else if (length <= 1024)
+				l_ptr->stats.msg_length_profile[2]++;
+			else if (length <= 4096)
+				l_ptr->stats.msg_length_profile[3]++;
+			else if (length <= 16384)
+				l_ptr->stats.msg_length_profile[4]++;
+			else if (length <= 32768)
+				l_ptr->stats.msg_length_profile[5]++;
+			else
+				l_ptr->stats.msg_length_profile[6]++;
+		}
+	}
+
+	/* do all other link processing performed on a periodic basis */
+
+	link_check_defragm_bufs(l_ptr);
+
+	link_state_event(l_ptr, TIMEOUT_EVT);
+
+	if (l_ptr->next_out)
+		link_push_queue(l_ptr);
+
+	node_unlock(l_ptr->owner);
+}
+
+static inline void link_set_timer(struct link *l_ptr, u32 time)
+{
+	k_start_timer(&l_ptr->timer, time);
+}
+
+/**
+ * link_create - create a new link
+ * @b_ptr: pointer to associated bearer
+ * @peer: network address of node at other end of link
+ * @media_addr: media address to use when sending messages over link
+ * 
+ * Returns pointer to link.
+ */
+
+struct link *link_create(struct bearer *b_ptr, const u32 peer,
+			 const struct tipc_media_addr *media_addr)
+{
+	struct link *l_ptr;
+	struct tipc_msg *msg;
+	char *if_name;
+
+	l_ptr = (struct link *)kmalloc(sizeof(*l_ptr), GFP_ATOMIC);
+	if (!l_ptr) {
+		warn("Memory squeeze; Failed to create link\n");
+		return NULL;
+	}
+	memset(l_ptr, 0, sizeof(*l_ptr));
+
+	l_ptr->addr = peer;
+	if_name = strchr(b_ptr->publ.name, ':') + 1;
+	sprintf(l_ptr->name, "%u.%u.%u:%s-%u.%u.%u:",
+		tipc_zone(tipc_own_addr), tipc_cluster(tipc_own_addr),
+		tipc_node(tipc_own_addr), 
+		if_name,
+		tipc_zone(peer), tipc_cluster(peer), tipc_node(peer));
+		/* note: peer i/f is appended to link name by reset/activate */
+	memcpy(&l_ptr->media_addr, media_addr, sizeof(*media_addr));
+	k_init_timer(&l_ptr->timer, (Handler)link_timeout, (unsigned long)l_ptr);
+	list_add_tail(&l_ptr->link_list, &b_ptr->links);
+	l_ptr->checkpoint = 1;
+	l_ptr->b_ptr = b_ptr;
+	link_set_supervision_props(l_ptr, b_ptr->media->tolerance);
+	l_ptr->state = RESET_UNKNOWN;
+
+	l_ptr->pmsg = (struct tipc_msg *)&l_ptr->proto_msg;
+	msg = l_ptr->pmsg;
+	msg_init(msg, LINK_PROTOCOL, RESET_MSG, TIPC_OK, INT_H_SIZE, l_ptr->addr);
+	msg_set_size(msg, sizeof(l_ptr->proto_msg));
+	msg_set_session(msg, tipc_random);
+	msg_set_bearer_id(msg, b_ptr->identity);
+	strcpy((char *)msg_data(msg), if_name);
+
+	l_ptr->priority = b_ptr->priority;
+	link_set_queue_limits(l_ptr, b_ptr->media->window);
+
+	link_init_max_pkt(l_ptr);
+
+	l_ptr->next_out_no = 1;
+	INIT_LIST_HEAD(&l_ptr->waiting_ports);
+
+	link_reset_statistics(l_ptr);
+
+	l_ptr->owner = node_attach_link(l_ptr);
+	if (!l_ptr->owner) {
+		kfree(l_ptr);
+		return NULL;
+	}
+
+	if (LINK_LOG_BUF_SIZE) {
+		char *pb = kmalloc(LINK_LOG_BUF_SIZE, GFP_ATOMIC);
+
+		if (!pb) {
+			kfree(l_ptr);
+			warn("Memory squeeze; Failed to create link\n");
+			return NULL;
+		}
+		printbuf_init(&l_ptr->print_buf, pb, LINK_LOG_BUF_SIZE);
+	}
+
+	k_signal((Handler)link_start, (unsigned long)l_ptr);
+
+	dbg("link_create(): tolerance = %u,cont intv = %u, abort_limit = %u\n",
+	    l_ptr->tolerance, l_ptr->continuity_interval, l_ptr->abort_limit);
+	
+	return l_ptr;
+}
+
+/** 
+ * link_delete - delete a link
+ * @l_ptr: pointer to link
+ * 
+ * Note: 'net_lock' is write_locked, bearer is locked.
+ * This routine must not grab the node lock until after link timer cancellation
+ * to avoid a potential deadlock situation.  
+ */
+
+void link_delete(struct link *l_ptr)
+{
+	if (!l_ptr) {
+		err("Attempt to delete non-existent link\n");
+		return;
+	}
+
+	dbg("link_delete()\n");
+
+	k_cancel_timer(&l_ptr->timer);
+	
+	node_lock(l_ptr->owner);
+	link_reset(l_ptr);
+	node_detach_link(l_ptr->owner, l_ptr);
+	link_stop(l_ptr);
+	list_del_init(&l_ptr->link_list);
+	if (LINK_LOG_BUF_SIZE)
+		kfree(l_ptr->print_buf.buf);
+	node_unlock(l_ptr->owner);
+	k_term_timer(&l_ptr->timer);
+	kfree(l_ptr);
+}
+
+void link_start(struct link *l_ptr)
+{
+	dbg("link_start %x\n", l_ptr);
+	link_state_event(l_ptr, STARTING_EVT);
+}
+
+/**
+ * link_schedule_port - schedule port for deferred sending 
+ * @l_ptr: pointer to link
+ * @origport: reference to sending port
+ * @sz: amount of data to be sent
+ * 
+ * Schedules port for renewed sending of messages after link congestion 
+ * has abated.
+ */
+
+static int link_schedule_port(struct link *l_ptr, u32 origport, u32 sz)
+{
+	struct port *p_ptr;
+
+	spin_lock_bh(&port_list_lock);
+	p_ptr = port_lock(origport);
+	if (p_ptr) {
+		if (!p_ptr->wakeup)
+			goto exit;
+		if (!list_empty(&p_ptr->wait_list))
+			goto exit;
+		p_ptr->congested_link = l_ptr;
+		p_ptr->publ.congested = 1;
+		p_ptr->waiting_pkts = 1 + ((sz - 1) / link_max_pkt(l_ptr));
+		list_add_tail(&p_ptr->wait_list, &l_ptr->waiting_ports);
+		l_ptr->stats.link_congs++;
+exit:
+		port_unlock(p_ptr);
+	}
+	spin_unlock_bh(&port_list_lock);
+	return -ELINKCONG;
+}
+
+void link_wakeup_ports(struct link *l_ptr, int all)
+{
+	struct port *p_ptr;
+	struct port *temp_p_ptr;
+	int win = l_ptr->queue_limit[0] - l_ptr->out_queue_size;
+
+	if (all)
+		win = 100000;
+	if (win <= 0)
+		return;
+	if (!spin_trylock_bh(&port_list_lock))
+		return;
+	if (link_congested(l_ptr))
+		goto exit;
+	list_for_each_entry_safe(p_ptr, temp_p_ptr, &l_ptr->waiting_ports, 
+				 wait_list) {
+		if (win <= 0)
+			break;
+		list_del_init(&p_ptr->wait_list);
+		p_ptr->congested_link = 0;
+		assert(p_ptr->wakeup);
+		spin_lock_bh(p_ptr->publ.lock);
+		p_ptr->publ.congested = 0;
+		p_ptr->wakeup(&p_ptr->publ);
+		win -= p_ptr->waiting_pkts;
+		spin_unlock_bh(p_ptr->publ.lock);
+	}
+
+exit:
+	spin_unlock_bh(&port_list_lock);
+}
+
+/** 
+ * link_release_outqueue - purge link's outbound message queue
+ * @l_ptr: pointer to link
+ */
+
+static void link_release_outqueue(struct link *l_ptr)
+{
+	struct sk_buff *buf = l_ptr->first_out;
+	struct sk_buff *next;
+
+	while (buf) {
+		next = buf->next;
+		buf_discard(buf);
+		buf = next;
+	}
+	l_ptr->first_out = NULL;
+	l_ptr->out_queue_size = 0;
+}
+
+/**
+ * link_reset_fragments - purge link's inbound message fragments queue
+ * @l_ptr: pointer to link
+ */
+
+void link_reset_fragments(struct link *l_ptr)
+{
+	struct sk_buff *buf = l_ptr->defragm_buf;
+	struct sk_buff *next;
+
+	while (buf) {
+		next = buf->next;
+		buf_discard(buf);
+		buf = next;
+	}
+	l_ptr->defragm_buf = NULL;
+}
+
+/** 
+ * link_stop - purge all inbound and outbound messages associated with link
+ * @l_ptr: pointer to link
+ */
+
+void link_stop(struct link *l_ptr)
+{
+	struct sk_buff *buf;
+	struct sk_buff *next;
+
+	buf = l_ptr->oldest_deferred_in;
+	while (buf) {
+		next = buf->next;
+		buf_discard(buf);
+		buf = next;
+	}
+
+	buf = l_ptr->first_out;
+	while (buf) {
+		next = buf->next;
+		buf_discard(buf);
+		buf = next;
+	}
+
+	link_reset_fragments(l_ptr);
+
+	buf_discard(l_ptr->proto_msg_queue);
+	l_ptr->proto_msg_queue = NULL;
+}
+
+#if 0
+
+/* LINK EVENT CODE IS NOT SUPPORTED AT PRESENT */
+
+static void link_recv_event(struct link_event *ev)
+{
+	ev->fcn(ev->addr, ev->name, ev->up);
+	kfree(ev);
+}
+
+static void link_send_event(void (*fcn)(u32 a, char *n, int up),
+			    struct link *l_ptr, int up)
+{
+	struct link_event *ev;
+	
+	ev = kmalloc(sizeof(*ev), GFP_ATOMIC);
+	if (!ev) {
+		warn("Link event allocation failure\n");
+		return;
+	}
+	ev->addr = l_ptr->addr;
+	ev->up = up;
+	ev->fcn = fcn;
+	memcpy(ev->name, l_ptr->name, TIPC_MAX_LINK_NAME);
+	k_signal((Handler)link_recv_event, (unsigned long)ev);
+}
+
+#else
+
+#define link_send_event(fcn, l_ptr, up) do { } while (0)
+
+#endif
+
+void link_reset(struct link *l_ptr)
+{
+	struct sk_buff *buf;
+	u32 prev_state = l_ptr->state;
+	u32 checkpoint = l_ptr->next_in_no;
+	
+	msg_set_session(l_ptr->pmsg, msg_session(l_ptr->pmsg) + 1);
+
+        /* Link is down, accept any session: */
+	l_ptr->peer_session = 0;
+
+        /* Prepare for max packet size negotiation */
+	link_init_max_pkt(l_ptr);
+	
+	l_ptr->state = RESET_UNKNOWN;
+	dbg_link_state("Resetting Link\n");
+
+	if ((prev_state == RESET_UNKNOWN) || (prev_state == RESET_RESET))
+		return;
+
+	node_link_down(l_ptr->owner, l_ptr);
+	bearer_remove_dest(l_ptr->b_ptr, l_ptr->addr);
+#if 0
+	tipc_printf(CONS, "\nReset link <%s>\n", l_ptr->name);
+	dbg_link_dump();
+#endif
+	if (node_has_active_links(l_ptr->owner) &&
+	    l_ptr->owner->permit_changeover) {
+		l_ptr->reset_checkpoint = checkpoint;
+		l_ptr->exp_msg_count = START_CHANGEOVER;
+	}
+
+	/* Clean up all queues: */
+
+	link_release_outqueue(l_ptr);
+	buf_discard(l_ptr->proto_msg_queue);
+	l_ptr->proto_msg_queue = NULL;
+	buf = l_ptr->oldest_deferred_in;
+	while (buf) {
+		struct sk_buff *next = buf->next;
+		buf_discard(buf);
+		buf = next;
+	}
+	if (!list_empty(&l_ptr->waiting_ports))
+		link_wakeup_ports(l_ptr, 1);
+
+	l_ptr->retransm_queue_head = 0;
+	l_ptr->retransm_queue_size = 0;
+	l_ptr->last_out = NULL;
+	l_ptr->first_out = NULL;
+	l_ptr->next_out = NULL;
+	l_ptr->unacked_window = 0;
+	l_ptr->checkpoint = 1;
+	l_ptr->next_out_no = 1;
+	l_ptr->deferred_inqueue_sz = 0;
+	l_ptr->oldest_deferred_in = NULL;
+	l_ptr->newest_deferred_in = NULL;
+	l_ptr->fsm_msg_cnt = 0;
+	l_ptr->stale_count = 0;
+	link_reset_statistics(l_ptr);
+
+	link_send_event(cfg_link_event, l_ptr, 0);
+	if (!in_own_cluster(l_ptr->addr))
+		link_send_event(disc_link_event, l_ptr, 0);
+}
+
+
+static void link_activate(struct link *l_ptr)
+{
+	l_ptr->next_in_no = 1;
+	node_link_up(l_ptr->owner, l_ptr);
+	bearer_add_dest(l_ptr->b_ptr, l_ptr->addr);
+	link_send_event(cfg_link_event, l_ptr, 1);
+	if (!in_own_cluster(l_ptr->addr))
+		link_send_event(disc_link_event, l_ptr, 1);
+}
+
+/**
+ * link_state_event - link finite state machine
+ * @l_ptr: pointer to link
+ * @event: state machine event to process
+ */
+
+static void link_state_event(struct link *l_ptr, unsigned event)
+{
+	struct link *other; 
+	u32 cont_intv = l_ptr->continuity_interval;
+
+	if (!l_ptr->started && (event != STARTING_EVT))
+		return;		/* Not yet. */
+
+	if (link_blocked(l_ptr)) {
+		if (event == TIMEOUT_EVT) {
+			link_set_timer(l_ptr, cont_intv);
+		}
+		return;	  /* Changeover going on */
+	}
+	dbg_link("STATE_EV: <%s> ", l_ptr->name);
+
+	switch (l_ptr->state) {
+	case WORKING_WORKING:
+		dbg_link("WW/");
+		switch (event) {
+		case TRAFFIC_MSG_EVT:
+			dbg_link("TRF-");
+			/* fall through */
+		case ACTIVATE_MSG:
+			dbg_link("ACT\n");
+			break;
+		case TIMEOUT_EVT:
+			dbg_link("TIM ");
+			if (l_ptr->next_in_no != l_ptr->checkpoint) {
+				l_ptr->checkpoint = l_ptr->next_in_no;
+				if (bclink_acks_missing(l_ptr->owner)) {
+					link_send_proto_msg(l_ptr, STATE_MSG, 
+							    0, 0, 0, 0, 0);
+					l_ptr->fsm_msg_cnt++;
+				} else if (l_ptr->max_pkt < l_ptr->max_pkt_target) {
+					link_send_proto_msg(l_ptr, STATE_MSG, 
+							    1, 0, 0, 0, 0);
+					l_ptr->fsm_msg_cnt++;
+				}
+				link_set_timer(l_ptr, cont_intv);
+				break;
+			}
+			dbg_link(" -> WU\n");
+			l_ptr->state = WORKING_UNKNOWN;
+			l_ptr->fsm_msg_cnt = 0;
+			link_send_proto_msg(l_ptr, STATE_MSG, 1, 0, 0, 0, 0);
+			l_ptr->fsm_msg_cnt++;
+			link_set_timer(l_ptr, cont_intv / 4);
+			break;
+		case RESET_MSG:
+			dbg_link("RES -> RR\n");
+			link_reset(l_ptr);
+			l_ptr->state = RESET_RESET;
+			l_ptr->fsm_msg_cnt = 0;
+			link_send_proto_msg(l_ptr, ACTIVATE_MSG, 0, 0, 0, 0, 0);
+			l_ptr->fsm_msg_cnt++;
+			link_set_timer(l_ptr, cont_intv);
+			break;
+		default:
+			err("Unknown link event %u in WW state\n", event);
+		}
+		break;
+	case WORKING_UNKNOWN:
+		dbg_link("WU/");
+		switch (event) {
+		case TRAFFIC_MSG_EVT:
+			dbg_link("TRF-");
+		case ACTIVATE_MSG:
+			dbg_link("ACT -> WW\n");
+			l_ptr->state = WORKING_WORKING;
+			l_ptr->fsm_msg_cnt = 0;
+			link_set_timer(l_ptr, cont_intv);
+			break;
+		case RESET_MSG:
+			dbg_link("RES -> RR\n");
+			link_reset(l_ptr);
+			l_ptr->state = RESET_RESET;
+			l_ptr->fsm_msg_cnt = 0;
+			link_send_proto_msg(l_ptr, ACTIVATE_MSG, 0, 0, 0, 0, 0);
+			l_ptr->fsm_msg_cnt++;
+			link_set_timer(l_ptr, cont_intv);
+			break;
+		case TIMEOUT_EVT:
+			dbg_link("TIM ");
+			if (l_ptr->next_in_no != l_ptr->checkpoint) {
+				dbg_link("-> WW \n");
+				l_ptr->state = WORKING_WORKING;
+				l_ptr->fsm_msg_cnt = 0;
+				l_ptr->checkpoint = l_ptr->next_in_no;
+				if (bclink_acks_missing(l_ptr->owner)) {
+					link_send_proto_msg(l_ptr, STATE_MSG,
+							    0, 0, 0, 0, 0);
+					l_ptr->fsm_msg_cnt++;
+				}
+				link_set_timer(l_ptr, cont_intv);
+			} else if (l_ptr->fsm_msg_cnt < l_ptr->abort_limit) {
+				dbg_link("Probing %u/%u,timer = %u ms)\n",
+					 l_ptr->fsm_msg_cnt, l_ptr->abort_limit,
+					 cont_intv / 4);
+				link_send_proto_msg(l_ptr, STATE_MSG, 
+						    1, 0, 0, 0, 0);
+				l_ptr->fsm_msg_cnt++;
+				link_set_timer(l_ptr, cont_intv / 4);
+			} else {	/* Link has failed */
+				dbg_link("-> RU (%u probes unanswered)\n",
+					 l_ptr->fsm_msg_cnt);
+				link_reset(l_ptr);
+				l_ptr->state = RESET_UNKNOWN;
+				l_ptr->fsm_msg_cnt = 0;
+				link_send_proto_msg(l_ptr, RESET_MSG,
+						    0, 0, 0, 0, 0);
+				l_ptr->fsm_msg_cnt++;
+				link_set_timer(l_ptr, cont_intv);
+			}
+			break;
+		default:
+			err("Unknown link event %u in WU state\n", event);
+		}
+		break;
+	case RESET_UNKNOWN:
+		dbg_link("RU/");
+		switch (event) {
+		case TRAFFIC_MSG_EVT:
+			dbg_link("TRF-\n");
+			break;
+		case ACTIVATE_MSG:
+			other = l_ptr->owner->active_links[0];
+			if (other && link_working_unknown(other)) {
+				dbg_link("ACT\n");
+				break;
+			}
+			dbg_link("ACT -> WW\n");
+			l_ptr->state = WORKING_WORKING;
+			l_ptr->fsm_msg_cnt = 0;
+			link_activate(l_ptr);
+			link_send_proto_msg(l_ptr, STATE_MSG, 1, 0, 0, 0, 0);
+			l_ptr->fsm_msg_cnt++;
+			link_set_timer(l_ptr, cont_intv);
+			break;
+		case RESET_MSG:
+			dbg_link("RES \n");
+			dbg_link(" -> RR\n");
+			l_ptr->state = RESET_RESET;
+			l_ptr->fsm_msg_cnt = 0;
+			link_send_proto_msg(l_ptr, ACTIVATE_MSG, 1, 0, 0, 0, 0);
+			l_ptr->fsm_msg_cnt++;
+			link_set_timer(l_ptr, cont_intv);
+			break;
+		case STARTING_EVT:
+			dbg_link("START-");
+			l_ptr->started = 1;
+			/* fall through */
+		case TIMEOUT_EVT:
+			dbg_link("TIM \n");
+			link_send_proto_msg(l_ptr, RESET_MSG, 0, 0, 0, 0, 0);
+			l_ptr->fsm_msg_cnt++;
+			link_set_timer(l_ptr, cont_intv);
+			break;
+		default:
+			err("Unknown link event %u in RU state\n", event);
+		}
+		break;
+	case RESET_RESET:
+		dbg_link("RR/ ");
+		switch (event) {
+		case TRAFFIC_MSG_EVT:
+			dbg_link("TRF-");
+			/* fall through */
+		case ACTIVATE_MSG:
+			other = l_ptr->owner->active_links[0];
+			if (other && link_working_unknown(other)) {
+				dbg_link("ACT\n");
+				break;
+			}
+			dbg_link("ACT -> WW\n");
+			l_ptr->state = WORKING_WORKING;
+			l_ptr->fsm_msg_cnt = 0;
+			link_activate(l_ptr);
+			link_send_proto_msg(l_ptr, STATE_MSG, 1, 0, 0, 0, 0);
+			l_ptr->fsm_msg_cnt++;
+			link_set_timer(l_ptr, cont_intv);
+			break;
+		case RESET_MSG:
+			dbg_link("RES\n");
+			break;
+		case TIMEOUT_EVT:
+			dbg_link("TIM\n");
+			link_send_proto_msg(l_ptr, ACTIVATE_MSG, 0, 0, 0, 0, 0);
+			l_ptr->fsm_msg_cnt++;
+			link_set_timer(l_ptr, cont_intv);
+			dbg_link("fsm_msg_cnt %u\n", l_ptr->fsm_msg_cnt);
+			break;
+		default:
+			err("Unknown link event %u in RR state\n", event);
+		}
+		break;
+	default:
+		err("Unknown link state %u/%u\n", l_ptr->state, event);
+	}
+}
+
+/*
+ * link_bundle_buf(): Append contents of a buffer to
+ * the tail of an existing one. 
+ */
+
+static int link_bundle_buf(struct link *l_ptr,
+			   struct sk_buff *bundler, 
+			   struct sk_buff *buf)
+{
+	struct tipc_msg *bundler_msg = buf_msg(bundler);
+	struct tipc_msg *msg = buf_msg(buf);
+	u32 size = msg_size(msg);
+	u32 to_pos = align(msg_size(bundler_msg));
+	u32 rest = link_max_pkt(l_ptr) - to_pos;
+
+	if (msg_user(bundler_msg) != MSG_BUNDLER)
+		return 0;
+	if (msg_type(bundler_msg) != OPEN_MSG)
+		return 0;
+	if (rest < align(size))
+		return 0;
+
+	skb_put(bundler, (to_pos - msg_size(bundler_msg)) + size);
+	memcpy(bundler->data + to_pos, buf->data, size);
+	msg_set_size(bundler_msg, to_pos + size);
+	msg_set_msgcnt(bundler_msg, msg_msgcnt(bundler_msg) + 1);
+	dbg("Packed msg # %u(%u octets) into pos %u in buf(#%u)\n",
+	    msg_msgcnt(bundler_msg), size, to_pos, msg_seqno(bundler_msg));
+	msg_dbg(msg, "PACKD:");
+	buf_discard(buf);
+	l_ptr->stats.sent_bundled++;
+	return 1;
+}
+
+static inline void link_add_to_outqueue(struct link *l_ptr, 
+					struct sk_buff *buf, 
+					struct tipc_msg *msg)
+{
+	u32 ack = mod(l_ptr->next_in_no - 1);
+	u32 seqno = mod(l_ptr->next_out_no++);
+
+	msg_set_word(msg, 2, ((ack << 16) | seqno));
+	msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in);
+	buf->next = NULL;
+	if (l_ptr->first_out) {
+		l_ptr->last_out->next = buf;
+		l_ptr->last_out = buf;
+	} else
+		l_ptr->first_out = l_ptr->last_out = buf;
+	l_ptr->out_queue_size++;
+}
+
+/* 
+ * link_send_buf() is the 'full path' for messages, called from 
+ * inside TIPC when the 'fast path' in tipc_send_buf
+ * has failed, and from link_send()
+ */
+
+int link_send_buf(struct link *l_ptr, struct sk_buff *buf)
+{
+	struct tipc_msg *msg = buf_msg(buf);
+	u32 size = msg_size(msg);
+	u32 dsz = msg_data_sz(msg);
+	u32 queue_size = l_ptr->out_queue_size;
+	u32 imp = msg_tot_importance(msg);
+	u32 queue_limit = l_ptr->queue_limit[imp];
+	u32 max_packet = link_max_pkt(l_ptr);
+
+	msg_set_prevnode(msg, tipc_own_addr);	/* If routed message */
+
+	/* Match msg importance against queue limits: */
+
+	if (unlikely(queue_size >= queue_limit)) {
+		if (imp <= TIPC_CRITICAL_IMPORTANCE) {
+			return link_schedule_port(l_ptr, msg_origport(msg),
+						  size);
+		}
+		msg_dbg(msg, "TIPC: Congestion, throwing away\n");
+		buf_discard(buf);
+		if (imp > CONN_MANAGER) {
+			warn("Resetting <%s>, send queue full", l_ptr->name);
+			link_reset(l_ptr);
+		}
+		return dsz;
+	}
+
+	/* Fragmentation needed ? */
+
+	if (size > max_packet)
+		return link_send_long_buf(l_ptr, buf);
+
+	/* Packet can be queued or sent: */
+
+	if (queue_size > l_ptr->stats.max_queue_sz)
+		l_ptr->stats.max_queue_sz = queue_size;
+
+	if (likely(!bearer_congested(l_ptr->b_ptr, l_ptr) && 
+		   !link_congested(l_ptr))) {
+		link_add_to_outqueue(l_ptr, buf, msg);
+
+		if (likely(bearer_send(l_ptr->b_ptr, buf, &l_ptr->media_addr))) {
+			l_ptr->unacked_window = 0;
+		} else {
+			bearer_schedule(l_ptr->b_ptr, l_ptr);
+			l_ptr->stats.bearer_congs++;
+			l_ptr->next_out = buf;
+		}
+		return dsz;
+	}
+	/* Congestion: can message be bundled ?: */
+
+	if ((msg_user(msg) != CHANGEOVER_PROTOCOL) &&
+	    (msg_user(msg) != MSG_FRAGMENTER)) {
+
+		/* Try adding message to an existing bundle */
+
+		if (l_ptr->next_out && 
+		    link_bundle_buf(l_ptr, l_ptr->last_out, buf)) {
+			bearer_resolve_congestion(l_ptr->b_ptr, l_ptr);
+			return dsz;
+		}
+
+		/* Try creating a new bundle */
+
+		if (size <= max_packet * 2 / 3) {
+			struct sk_buff *bundler = buf_acquire(max_packet);
+			struct tipc_msg bundler_hdr;
+
+			if (bundler) {
+				msg_init(&bundler_hdr, MSG_BUNDLER, OPEN_MSG,
+					 TIPC_OK, INT_H_SIZE, l_ptr->addr);
+				memcpy(bundler->data, (unchar *)&bundler_hdr, 
+				       INT_H_SIZE);
+				skb_trim(bundler, INT_H_SIZE);
+				link_bundle_buf(l_ptr, bundler, buf);
+				buf = bundler;
+				msg = buf_msg(buf);
+				l_ptr->stats.sent_bundles++;
+			}
+		}
+	}
+	if (!l_ptr->next_out)
+		l_ptr->next_out = buf;
+	link_add_to_outqueue(l_ptr, buf, msg);
+	bearer_resolve_congestion(l_ptr->b_ptr, l_ptr);
+	return dsz;
+}
+
+/* 
+ * link_send(): same as link_send_buf(), but the link to use has 
+ * not been selected yet, and the the owner node is not locked
+ * Called by TIPC internal users, e.g. the name distributor
+ */
+
+int link_send(struct sk_buff *buf, u32 dest, u32 selector)
+{
+	struct link *l_ptr;
+	struct node *n_ptr;
+	int res = -ELINKCONG;
+
+	read_lock_bh(&net_lock);
+	n_ptr = node_select(dest, selector);
+	if (n_ptr) {
+		node_lock(n_ptr);
+		l_ptr = n_ptr->active_links[selector & 1];
+		dbg("link_send: found link %x for dest %x\n", l_ptr, dest);
+		if (l_ptr) {
+			res = link_send_buf(l_ptr, buf);
+		}
+		node_unlock(n_ptr);
+	} else {
+		dbg("Attempt to send msg to unknown node:\n");
+		msg_dbg(buf_msg(buf),">>>");
+		buf_discard(buf);
+	}
+	read_unlock_bh(&net_lock);
+	return res;
+}
+
+/* 
+ * link_send_buf_fast: Entry for data messages where the 
+ * destination link is known and the header is complete,
+ * inclusive total message length. Very time critical.
+ * Link is locked. Returns user data length.
+ */
+
+static inline int link_send_buf_fast(struct link *l_ptr, struct sk_buff *buf,
+				     u32 *used_max_pkt)
+{
+	struct tipc_msg *msg = buf_msg(buf);
+	int res = msg_data_sz(msg);
+
+	if (likely(!link_congested(l_ptr))) {
+		if (likely(msg_size(msg) <= link_max_pkt(l_ptr))) {
+			if (likely(list_empty(&l_ptr->b_ptr->cong_links))) {
+				link_add_to_outqueue(l_ptr, buf, msg);
+				if (likely(bearer_send(l_ptr->b_ptr, buf,
+						       &l_ptr->media_addr))) {
+					l_ptr->unacked_window = 0;
+					msg_dbg(msg,"SENT_FAST:");
+					return res;
+				}
+				dbg("failed sent fast...\n");
+				bearer_schedule(l_ptr->b_ptr, l_ptr);
+				l_ptr->stats.bearer_congs++;
+				l_ptr->next_out = buf;
+				return res;
+			}
+		}
+		else
+			*used_max_pkt = link_max_pkt(l_ptr);
+	}
+	return link_send_buf(l_ptr, buf);  /* All other cases */
+}
+
+/* 
+ * tipc_send_buf_fast: Entry for data messages where the 
+ * destination node is known and the header is complete,
+ * inclusive total message length.
+ * Returns user data length.
+ */
+int tipc_send_buf_fast(struct sk_buff *buf, u32 destnode)
+{
+	struct link *l_ptr;
+	struct node *n_ptr;
+	int res;
+	u32 selector = msg_origport(buf_msg(buf)) & 1;
+	u32 dummy;
+
+	if (destnode == tipc_own_addr)
+		return port_recv_msg(buf);
+
+	read_lock_bh(&net_lock);
+	n_ptr = node_select(destnode, selector);
+	if (likely(n_ptr)) {
+		node_lock(n_ptr);
+		l_ptr = n_ptr->active_links[selector];
+		dbg("send_fast: buf %x selected %x, destnode = %x\n",
+		    buf, l_ptr, destnode);
+		if (likely(l_ptr)) {
+			res = link_send_buf_fast(l_ptr, buf, &dummy);
+			node_unlock(n_ptr);
+			read_unlock_bh(&net_lock);
+			return res;
+		}
+		node_unlock(n_ptr);
+	}
+	read_unlock_bh(&net_lock);
+	res = msg_data_sz(buf_msg(buf));
+	tipc_reject_msg(buf, TIPC_ERR_NO_NODE);
+	return res;
+}
+
+
+/* 
+ * link_send_sections_fast: Entry for messages where the 
+ * destination processor is known and the header is complete,
+ * except for total message length. 
+ * Returns user data length or errno.
+ */
+int link_send_sections_fast(struct port *sender, 
+			    struct iovec const *msg_sect,
+			    const u32 num_sect, 
+			    u32 destaddr)
+{
+	struct tipc_msg *hdr = &sender->publ.phdr;
+	struct link *l_ptr;
+	struct sk_buff *buf;
+	struct node *node;
+	int res;
+	u32 selector = msg_origport(hdr) & 1;
+
+	assert(destaddr != tipc_own_addr);
+
+again:
+	/*
+	 * Try building message using port's max_pkt hint.
+	 * (Must not hold any locks while building message.)
+	 */
+
+	res = msg_build(hdr, msg_sect, num_sect, sender->max_pkt,
+			!sender->user_port, &buf);
+
+	read_lock_bh(&net_lock);
+	node = node_select(destaddr, selector);
+	if (likely(node)) {
+		node_lock(node);
+		l_ptr = node->active_links[selector];
+		if (likely(l_ptr)) {
+			if (likely(buf)) {
+				res = link_send_buf_fast(l_ptr, buf,
+							 &sender->max_pkt);
+				if (unlikely(res < 0))
+					buf_discard(buf);
+exit:
+				node_unlock(node);
+				read_unlock_bh(&net_lock);
+				return res;
+			}
+
+			/* Exit if build request was invalid */
+
+			if (unlikely(res < 0))
+				goto exit;
+
+			/* Exit if link (or bearer) is congested */
+
+			if (link_congested(l_ptr) || 
+			    !list_empty(&l_ptr->b_ptr->cong_links)) {
+				res = link_schedule_port(l_ptr,
+							 sender->publ.ref, res);
+				goto exit;
+			}
+
+			/* 
+			 * Message size exceeds max_pkt hint; update hint,
+			 * then re-try fast path or fragment the message
+			 */
+
+			sender->max_pkt = link_max_pkt(l_ptr);
+			node_unlock(node);
+			read_unlock_bh(&net_lock);
+
+
+			if ((msg_hdr_sz(hdr) + res) <= sender->max_pkt)
+				goto again;
+
+			return link_send_sections_long(sender, msg_sect,
+						       num_sect, destaddr);
+		}
+		node_unlock(node);
+	}
+	read_unlock_bh(&net_lock);
+
+	/* Couldn't find a link to the destination node */
+
+	if (buf)
+		return tipc_reject_msg(buf, TIPC_ERR_NO_NODE);
+	if (res >= 0)
+		return port_reject_sections(sender, hdr, msg_sect, num_sect,
+					    TIPC_ERR_NO_NODE);
+	return res;
+}
+
+/* 
+ * link_send_sections_long(): Entry for long messages where the 
+ * destination node is known and the header is complete,
+ * inclusive total message length. 
+ * Link and bearer congestion status have been checked to be ok,
+ * and are ignored if they change.
+ *
+ * Note that fragments do not use the full link MTU so that they won't have
+ * to undergo refragmentation if link changeover causes them to be sent
+ * over another link with an additional tunnel header added as prefix.
+ * (Refragmentation will still occur if the other link has a smaller MTU.)
+ *
+ * Returns user data length or errno.
+ */
+static int link_send_sections_long(struct port *sender,
+				   struct iovec const *msg_sect,
+				   u32 num_sect,
+				   u32 destaddr)
+{
+	struct link *l_ptr;
+	struct node *node;
+	struct tipc_msg *hdr = &sender->publ.phdr;
+	u32 dsz = msg_data_sz(hdr);
+	u32 max_pkt,fragm_sz,rest;
+	struct tipc_msg fragm_hdr;
+	struct sk_buff *buf,*buf_chain,*prev;
+	u32 fragm_crs,fragm_rest,hsz,sect_rest;
+	const unchar *sect_crs;
+	int curr_sect;
+	u32 fragm_no;
+
+again:
+	fragm_no = 1;
+	max_pkt = sender->max_pkt - INT_H_SIZE;  
+		/* leave room for tunnel header in case of link changeover */
+	fragm_sz = max_pkt - INT_H_SIZE; 
+		/* leave room for fragmentation header in each fragment */
+	rest = dsz;
+	fragm_crs = 0;
+	fragm_rest = 0;
+	sect_rest = 0;
+	sect_crs = 0;
+	curr_sect = -1;
+
+	/* Prepare reusable fragment header: */
+
+	msg_dbg(hdr, ">FRAGMENTING>");
+	msg_init(&fragm_hdr, MSG_FRAGMENTER, FIRST_FRAGMENT,
+		 TIPC_OK, INT_H_SIZE, msg_destnode(hdr));
+	msg_set_link_selector(&fragm_hdr, sender->publ.ref);
+	msg_set_size(&fragm_hdr, max_pkt);
+	msg_set_fragm_no(&fragm_hdr, 1);
+
+	/* Prepare header of first fragment: */
+
+	buf_chain = buf = buf_acquire(max_pkt);
+	if (!buf)
+		return -ENOMEM;
+	buf->next = NULL;
+	memcpy(buf->data, (unchar *)&fragm_hdr, INT_H_SIZE);
+	hsz = msg_hdr_sz(hdr);
+	memcpy(buf->data + INT_H_SIZE, (unchar *)hdr, hsz);
+	msg_dbg(buf_msg(buf), ">BUILD>");
+
+	/* Chop up message: */
+
+	fragm_crs = INT_H_SIZE + hsz;
+	fragm_rest = fragm_sz - hsz;
+
+	do {		/* For all sections */
+		u32 sz;
+
+		if (!sect_rest) {
+			sect_rest = msg_sect[++curr_sect].iov_len;
+			sect_crs = (const unchar *)msg_sect[curr_sect].iov_base;
+		}
+
+		if (sect_rest < fragm_rest)
+			sz = sect_rest;
+		else
+			sz = fragm_rest;
+
+		if (likely(!sender->user_port)) {
+			if (copy_from_user(buf->data + fragm_crs, sect_crs, sz)) {
+error:
+				for (; buf_chain; buf_chain = buf) {
+					buf = buf_chain->next;
+					buf_discard(buf_chain);
+				}
+				return -EFAULT;
+			}
+		} else
+			memcpy(buf->data + fragm_crs, sect_crs, sz);
+
+		sect_crs += sz;
+		sect_rest -= sz;
+		fragm_crs += sz;
+		fragm_rest -= sz;
+		rest -= sz;
+
+		if (!fragm_rest && rest) {
+
+			/* Initiate new fragment: */
+			if (rest <= fragm_sz) {
+				fragm_sz = rest;
+				msg_set_type(&fragm_hdr,LAST_FRAGMENT);
+			} else {
+				msg_set_type(&fragm_hdr, FRAGMENT);
+			}
+			msg_set_size(&fragm_hdr, fragm_sz + INT_H_SIZE);
+			msg_set_fragm_no(&fragm_hdr, ++fragm_no);
+			prev = buf;
+			buf = buf_acquire(fragm_sz + INT_H_SIZE);
+			if (!buf)
+				goto error;
+
+			buf->next = NULL;                                
+			prev->next = buf;
+			memcpy(buf->data, (unchar *)&fragm_hdr, INT_H_SIZE);
+			fragm_crs = INT_H_SIZE;
+			fragm_rest = fragm_sz;
+			msg_dbg(buf_msg(buf),"  >BUILD>");
+		}
+	}
+	while (rest > 0);
+
+	/* 
+	 * Now we have a buffer chain. Select a link and check
+	 * that packet size is still OK
+	 */
+	node = node_select(destaddr, sender->publ.ref & 1);
+	if (likely(node)) {
+		node_lock(node);
+		l_ptr = node->active_links[sender->publ.ref & 1];
+		if (!l_ptr) {
+			node_unlock(node);
+			goto reject;
+		}
+		if (link_max_pkt(l_ptr) < max_pkt) {
+			sender->max_pkt = link_max_pkt(l_ptr);
+			node_unlock(node);
+			for (; buf_chain; buf_chain = buf) {
+				buf = buf_chain->next;
+				buf_discard(buf_chain);
+			}
+			goto again;
+		}
+	} else {
+reject:
+		for (; buf_chain; buf_chain = buf) {
+			buf = buf_chain->next;
+			buf_discard(buf_chain);
+		}
+		return port_reject_sections(sender, hdr, msg_sect, num_sect,
+					    TIPC_ERR_NO_NODE);
+	}
+
+	/* Append whole chain to send queue: */
+
+	buf = buf_chain;
+	l_ptr->long_msg_seq_no = mod(l_ptr->long_msg_seq_no + 1);
+	if (!l_ptr->next_out)
+		l_ptr->next_out = buf_chain;
+	l_ptr->stats.sent_fragmented++;
+	while (buf) {
+		struct sk_buff *next = buf->next;
+		struct tipc_msg *msg = buf_msg(buf);
+
+		l_ptr->stats.sent_fragments++;
+		msg_set_long_msgno(msg, l_ptr->long_msg_seq_no);
+		link_add_to_outqueue(l_ptr, buf, msg);
+		msg_dbg(msg, ">ADD>");
+		buf = next;
+	}
+
+	/* Send it, if possible: */
+
+	link_push_queue(l_ptr);
+	node_unlock(node);
+	return dsz;
+}
+
+/* 
+ * link_push_packet: Push one unsent packet to the media
+ */
+u32 link_push_packet(struct link *l_ptr)
+{
+	struct sk_buff *buf = l_ptr->first_out;
+	u32 r_q_size = l_ptr->retransm_queue_size;
+	u32 r_q_head = l_ptr->retransm_queue_head;
+
+	/* Step to position where retransmission failed, if any,    */
+	/* consider that buffers may have been released in meantime */
+
+	if (r_q_size && buf) {
+		u32 last = lesser(mod(r_q_head + r_q_size), 
+				  link_last_sent(l_ptr));
+		u32 first = msg_seqno(buf_msg(buf));
+
+		while (buf && less(first, r_q_head)) {
+			first = mod(first + 1);
+			buf = buf->next;
+		}
+		l_ptr->retransm_queue_head = r_q_head = first;
+		l_ptr->retransm_queue_size = r_q_size = mod(last - first);
+	}
+
+	/* Continue retransmission now, if there is anything: */
+
+	if (r_q_size && buf && !skb_cloned(buf)) {
+		msg_set_ack(buf_msg(buf), mod(l_ptr->next_in_no - 1));
+		msg_set_bcast_ack(buf_msg(buf), l_ptr->owner->bclink.last_in); 
+		if (bearer_send(l_ptr->b_ptr, buf, &l_ptr->media_addr)) {
+			msg_dbg(buf_msg(buf), ">DEF-RETR>");
+			l_ptr->retransm_queue_head = mod(++r_q_head);
+			l_ptr->retransm_queue_size = --r_q_size;
+			l_ptr->stats.retransmitted++;
+			return TIPC_OK;
+		} else {
+			l_ptr->stats.bearer_congs++;
+			msg_dbg(buf_msg(buf), "|>DEF-RETR>");
+			return PUSH_FAILED;
+		}
+	}
+
+	/* Send deferred protocol message, if any: */
+
+	buf = l_ptr->proto_msg_queue;
+	if (buf) {
+		msg_set_ack(buf_msg(buf), mod(l_ptr->next_in_no - 1));
+		msg_set_bcast_ack(buf_msg(buf),l_ptr->owner->bclink.last_in); 
+		if (bearer_send(l_ptr->b_ptr, buf, &l_ptr->media_addr)) {
+			msg_dbg(buf_msg(buf), ">DEF-PROT>");
+			l_ptr->unacked_window = 0;
+			buf_discard(buf);
+			l_ptr->proto_msg_queue = 0;
+			return TIPC_OK;
+		} else {
+			msg_dbg(buf_msg(buf), "|>DEF-PROT>");
+			l_ptr->stats.bearer_congs++;
+			return PUSH_FAILED;
+		}
+	}
+
+	/* Send one deferred data message, if send window not full: */
+
+	buf = l_ptr->next_out;
+	if (buf) {
+		struct tipc_msg *msg = buf_msg(buf);
+		u32 next = msg_seqno(msg);
+		u32 first = msg_seqno(buf_msg(l_ptr->first_out));
+
+		if (mod(next - first) < l_ptr->queue_limit[0]) {
+			msg_set_ack(msg, mod(l_ptr->next_in_no - 1));
+			msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in); 
+			if (bearer_send(l_ptr->b_ptr, buf, &l_ptr->media_addr)) {
+				if (msg_user(msg) == MSG_BUNDLER)
+					msg_set_type(msg, CLOSED_MSG);
+				msg_dbg(msg, ">PUSH-DATA>");
+				l_ptr->next_out = buf->next;
+				return TIPC_OK;
+			} else {
+				msg_dbg(msg, "|PUSH-DATA|");
+				l_ptr->stats.bearer_congs++;
+				return PUSH_FAILED;
+			}
+		}
+	}
+	return PUSH_FINISHED;
+}
+
+/*
+ * push_queue(): push out the unsent messages of a link where
+ *               congestion has abated. Node is locked
+ */
+void link_push_queue(struct link *l_ptr)
+{
+	u32 res;
+
+	if (bearer_congested(l_ptr->b_ptr, l_ptr))
+		return;
+
+	do {
+		res = link_push_packet(l_ptr);
+	}
+	while (res == TIPC_OK);
+	if (res == PUSH_FAILED)
+		bearer_schedule(l_ptr->b_ptr, l_ptr);
+}
+
+void link_retransmit(struct link *l_ptr, struct sk_buff *buf, 
+		     u32 retransmits)
+{
+	struct tipc_msg *msg;
+
+	dbg("Retransmitting %u in link %x\n", retransmits, l_ptr);
+
+	if (bearer_congested(l_ptr->b_ptr, l_ptr) && buf && !skb_cloned(buf)) {
+		msg_dbg(buf_msg(buf), ">NO_RETR->BCONG>");
+		dbg_print_link(l_ptr, "   ");
+		l_ptr->retransm_queue_head = msg_seqno(buf_msg(buf));
+		l_ptr->retransm_queue_size = retransmits;
+		return;
+	}
+	while (retransmits && (buf != l_ptr->next_out) && buf && !skb_cloned(buf)) {
+		msg = buf_msg(buf);
+		msg_set_ack(msg, mod(l_ptr->next_in_no - 1));
+		msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in); 
+		if (bearer_send(l_ptr->b_ptr, buf, &l_ptr->media_addr)) {
+                        /* Catch if retransmissions fail repeatedly: */
+                        if (l_ptr->last_retransmitted == msg_seqno(msg)) {
+                                if (++l_ptr->stale_count > 100) {
+                                        msg_print(CONS, buf_msg(buf), ">RETR>");
+                                        info("...Retransmitted %u times\n",
+					     l_ptr->stale_count);
+                                        link_print(l_ptr, CONS, "Resetting Link\n");;
+                                        link_reset(l_ptr);
+                                        break;
+                                }
+                        } else {
+                                l_ptr->stale_count = 0;
+                        }
+                        l_ptr->last_retransmitted = msg_seqno(msg);
+
+			msg_dbg(buf_msg(buf), ">RETR>");
+			buf = buf->next;
+			retransmits--;
+			l_ptr->stats.retransmitted++;
+		} else {
+			bearer_schedule(l_ptr->b_ptr, l_ptr);
+			l_ptr->stats.bearer_congs++;
+			l_ptr->retransm_queue_head = msg_seqno(buf_msg(buf));
+			l_ptr->retransm_queue_size = retransmits;
+			return;
+		}
+	}
+	l_ptr->retransm_queue_head = l_ptr->retransm_queue_size = 0;
+}
+
+/* 
+ * link_recv_non_seq: Receive packets which are outside
+ *                    the link sequence flow
+ */
+
+static void link_recv_non_seq(struct sk_buff *buf)
+{
+	struct tipc_msg *msg = buf_msg(buf);
+
+	if (msg_user(msg) ==  LINK_CONFIG)
+		disc_recv_msg(buf);
+	else
+		bclink_recv_pkt(buf);
+}
+
+/** 
+ * link_insert_deferred_queue - insert deferred messages back into receive chain
+ */
+
+static struct sk_buff *link_insert_deferred_queue(struct link *l_ptr, 
+						  struct sk_buff *buf)
+{
+	u32 seq_no;
+
+	if (l_ptr->oldest_deferred_in == NULL)
+		return buf;
+
+	seq_no = msg_seqno(buf_msg(l_ptr->oldest_deferred_in));
+	if (seq_no == mod(l_ptr->next_in_no)) {
+		l_ptr->newest_deferred_in->next = buf;
+		buf = l_ptr->oldest_deferred_in;
+		l_ptr->oldest_deferred_in = NULL;
+		l_ptr->deferred_inqueue_sz = 0;
+	}
+	return buf;
+}
+
+void tipc_recv_msg(struct sk_buff *head, struct tipc_bearer *tb_ptr)
+{
+	read_lock_bh(&net_lock);
+	while (head) {
+		struct bearer *b_ptr;
+		struct node *n_ptr;
+		struct link *l_ptr;
+		struct sk_buff *crs;
+		struct sk_buff *buf = head;
+		struct tipc_msg *msg = buf_msg(buf);
+		u32 seq_no = msg_seqno(msg);
+		u32 ackd = msg_ack(msg);
+		u32 released = 0;
+		int type;
+
+		b_ptr = (struct bearer *)tb_ptr;
+		TIPC_SKB_CB(buf)->handle = b_ptr;
+
+		head = head->next;
+		if (unlikely(msg_version(msg) != TIPC_VERSION))
+			goto cont;
+#if 0
+		if (msg_user(msg) != LINK_PROTOCOL)
+#endif
+			msg_dbg(msg,"<REC<");
+
+		if (unlikely(msg_non_seq(msg))) {
+			link_recv_non_seq(buf);
+			continue;
+		}
+		n_ptr = node_find(msg_prevnode(msg));
+		if (unlikely(!n_ptr))
+			goto cont;
+
+		node_lock(n_ptr);
+		l_ptr = n_ptr->links[b_ptr->identity];
+		if (unlikely(!l_ptr)) {
+			node_unlock(n_ptr);
+			goto cont;
+		}
+		/* 
+		 * Release acked messages 
+		 */
+		if (less(n_ptr->bclink.acked, msg_bcast_ack(msg))) {
+			if (node_is_up(n_ptr) && n_ptr->bclink.supported)
+				bclink_acknowledge(n_ptr, msg_bcast_ack(msg));
+		}
+
+		crs = l_ptr->first_out;
+		while ((crs != l_ptr->next_out) && 
+		       less_eq(msg_seqno(buf_msg(crs)), ackd)) {
+			struct sk_buff *next = crs->next;
+
+			buf_discard(crs);
+			crs = next;
+			released++;
+		}
+		if (released) {
+			l_ptr->first_out = crs;
+			l_ptr->out_queue_size -= released;
+		}
+		if (unlikely(l_ptr->next_out))
+			link_push_queue(l_ptr);
+		if (unlikely(!list_empty(&l_ptr->waiting_ports)))
+			link_wakeup_ports(l_ptr, 0);
+		if (unlikely(++l_ptr->unacked_window >= TIPC_MIN_LINK_WIN)) {
+			l_ptr->stats.sent_acks++;
+			link_send_proto_msg(l_ptr, STATE_MSG, 0, 0, 0, 0, 0);
+		}
+
+protocol_check:
+		if (likely(link_working_working(l_ptr))) {
+			if (likely(seq_no == mod(l_ptr->next_in_no))) {
+				l_ptr->next_in_no++;
+				if (unlikely(l_ptr->oldest_deferred_in))
+					head = link_insert_deferred_queue(l_ptr,
+									  head);
+				if (likely(msg_is_dest(msg, tipc_own_addr))) {
+deliver:
+					if (likely(msg_isdata(msg))) {
+						node_unlock(n_ptr);
+						port_recv_msg(buf);
+						continue;
+					}
+					switch (msg_user(msg)) {
+					case MSG_BUNDLER:
+						l_ptr->stats.recv_bundles++;
+						l_ptr->stats.recv_bundled += 
+							msg_msgcnt(msg);
+						node_unlock(n_ptr);
+						link_recv_bundle(buf);
+						continue;
+					case ROUTE_DISTRIBUTOR:
+						node_unlock(n_ptr);
+						cluster_recv_routing_table(buf);
+						continue;
+					case NAME_DISTRIBUTOR:
+						node_unlock(n_ptr);
+						named_recv(buf);
+						continue;
+					case CONN_MANAGER:
+						node_unlock(n_ptr);
+						port_recv_proto_msg(buf);
+						continue;
+					case MSG_FRAGMENTER:
+						l_ptr->stats.recv_fragments++;
+						if (link_recv_fragment(
+							&l_ptr->defragm_buf, 
+							&buf, &msg)) {
+							l_ptr->stats.recv_fragmented++;
+							goto deliver;
+						}
+						break;
+					case CHANGEOVER_PROTOCOL:
+						type = msg_type(msg);
+						if (link_recv_changeover_msg(
+							&l_ptr, &buf)) {
+							msg = buf_msg(buf);
+							seq_no = msg_seqno(msg);
+							TIPC_SKB_CB(buf)->handle 
+								= b_ptr;
+							if (type == ORIGINAL_MSG)
+								goto deliver;
+							goto protocol_check;
+						}
+						break;
+					}
+				}
+				node_unlock(n_ptr);
+				net_route_msg(buf);
+				continue;
+			}
+			link_handle_out_of_seq_msg(l_ptr, buf);
+			head = link_insert_deferred_queue(l_ptr, head);
+			node_unlock(n_ptr);
+			continue;
+		}
+
+		if (msg_user(msg) == LINK_PROTOCOL) {
+			link_recv_proto_msg(l_ptr, buf);
+			head = link_insert_deferred_queue(l_ptr, head);
+			node_unlock(n_ptr);
+			continue;
+		}
+		msg_dbg(msg,"NSEQ<REC<");
+		link_state_event(l_ptr, TRAFFIC_MSG_EVT);
+
+		if (link_working_working(l_ptr)) {
+			/* Re-insert in front of queue */
+			msg_dbg(msg,"RECV-REINS:");
+			buf->next = head;
+			head = buf;
+			node_unlock(n_ptr);
+			continue;
+		}
+		node_unlock(n_ptr);
+cont:
+		buf_discard(buf);
+	}
+	read_unlock_bh(&net_lock);
+}
+
+/* 
+ * link_defer_buf(): Sort a received out-of-sequence packet 
+ *                   into the deferred reception queue.
+ * Returns the increase of the queue length,i.e. 0 or 1
+ */
+
+u32 link_defer_pkt(struct sk_buff **head,
+		   struct sk_buff **tail,
+		   struct sk_buff *buf)
+{
+	struct sk_buff *prev = 0;
+	struct sk_buff *crs = *head;
+	u32 seq_no = msg_seqno(buf_msg(buf));
+
+	buf->next = NULL;
+
+	/* Empty queue ? */
+	if (*head == NULL) {
+		*head = *tail = buf;
+		return 1;
+	}
+
+	/* Last ? */
+	if (less(msg_seqno(buf_msg(*tail)), seq_no)) {
+		(*tail)->next = buf;
+		*tail = buf;
+		return 1;
+	}
+
+	/* Scan through queue and sort it in */
+	do {
+		struct tipc_msg *msg = buf_msg(crs);
+
+		if (less(seq_no, msg_seqno(msg))) {
+			buf->next = crs;
+			if (prev)
+				prev->next = buf;
+			else
+				*head = buf;   
+			return 1;
+		}
+		if (seq_no == msg_seqno(msg)) {
+			break;
+		}
+		prev = crs;
+		crs = crs->next;
+	}
+	while (crs);
+
+	/* Message is a duplicate of an existing message */
+
+	buf_discard(buf);
+	return 0;
+}
+
+/** 
+ * link_handle_out_of_seq_msg - handle arrival of out-of-sequence packet
+ */
+
+static void link_handle_out_of_seq_msg(struct link *l_ptr, 
+				       struct sk_buff *buf)
+{
+	u32 seq_no = msg_seqno(buf_msg(buf));
+
+	if (likely(msg_user(buf_msg(buf)) == LINK_PROTOCOL)) {
+		link_recv_proto_msg(l_ptr, buf);
+		return;
+	}
+
+	dbg("rx OOS msg: seq_no %u, expecting %u (%u)\n", 
+	    seq_no, mod(l_ptr->next_in_no), l_ptr->next_in_no);
+
+	/* Record OOS packet arrival (force mismatch on next timeout) */
+
+	l_ptr->checkpoint--;
+
+	/* 
+	 * Discard packet if a duplicate; otherwise add it to deferred queue
+	 * and notify peer of gap as per protocol specification
+	 */
+
+	if (less(seq_no, mod(l_ptr->next_in_no))) {
+		l_ptr->stats.duplicates++;
+		buf_discard(buf);
+		return;
+	}
+
+	if (link_defer_pkt(&l_ptr->oldest_deferred_in,
+			   &l_ptr->newest_deferred_in, buf)) {
+		l_ptr->deferred_inqueue_sz++;
+		l_ptr->stats.deferred_recv++;
+		if ((l_ptr->deferred_inqueue_sz % 16) == 1)
+			link_send_proto_msg(l_ptr, STATE_MSG, 0, 0, 0, 0, 0);
+	} else
+		l_ptr->stats.duplicates++;
+}
+
+/*
+ * Send protocol message to the other endpoint.
+ */
+void link_send_proto_msg(struct link *l_ptr, u32 msg_typ, int probe_msg,
+			 u32 gap, u32 tolerance, u32 priority, u32 ack_mtu)
+{
+	struct sk_buff *buf = 0;
+	struct tipc_msg *msg = l_ptr->pmsg;
+        u32 msg_size = sizeof(l_ptr->proto_msg);
+
+	if (link_blocked(l_ptr))
+		return;
+	msg_set_type(msg, msg_typ);
+	msg_set_net_plane(msg, l_ptr->b_ptr->net_plane);
+	msg_set_bcast_ack(msg, mod(l_ptr->owner->bclink.last_in)); 
+	msg_set_last_bcast(msg, bclink_get_last_sent());
+
+	if (msg_typ == STATE_MSG) {
+		u32 next_sent = mod(l_ptr->next_out_no);
+
+		if (!link_is_up(l_ptr))
+			return;
+		if (l_ptr->next_out)
+			next_sent = msg_seqno(buf_msg(l_ptr->next_out));
+		msg_set_next_sent(msg, next_sent);
+		if (l_ptr->oldest_deferred_in) {
+			u32 rec = msg_seqno(buf_msg(l_ptr->oldest_deferred_in));
+			gap = mod(rec - mod(l_ptr->next_in_no));
+		}
+		msg_set_seq_gap(msg, gap);
+		if (gap)
+			l_ptr->stats.sent_nacks++;
+		msg_set_link_tolerance(msg, tolerance);
+		msg_set_linkprio(msg, priority);
+		msg_set_max_pkt(msg, ack_mtu);
+		msg_set_ack(msg, mod(l_ptr->next_in_no - 1));
+		msg_set_probe(msg, probe_msg != 0);
+		if (probe_msg) { 
+			u32 mtu = l_ptr->max_pkt;
+
+                        if ((mtu < l_ptr->max_pkt_target) &&
+			    link_working_working(l_ptr) &&
+			    l_ptr->fsm_msg_cnt) {
+				msg_size = (mtu + (l_ptr->max_pkt_target - mtu)/2 + 2) & ~3;
+                                if (l_ptr->max_pkt_probes == 10) {
+                                        l_ptr->max_pkt_target = (msg_size - 4);
+                                        l_ptr->max_pkt_probes = 0;
+					msg_size = (mtu + (l_ptr->max_pkt_target - mtu)/2 + 2) & ~3;
+                                }
+				l_ptr->max_pkt_probes++;
+                        }
+
+			l_ptr->stats.sent_probes++;
+                }
+		l_ptr->stats.sent_states++;
+	} else {		/* RESET_MSG or ACTIVATE_MSG */
+		msg_set_ack(msg, mod(l_ptr->reset_checkpoint - 1));
+		msg_set_seq_gap(msg, 0);
+		msg_set_next_sent(msg, 1);
+		msg_set_link_tolerance(msg, l_ptr->tolerance);
+		msg_set_linkprio(msg, l_ptr->priority);
+		msg_set_max_pkt(msg, l_ptr->max_pkt_target);
+	}
+
+	if (node_has_redundant_links(l_ptr->owner)) {
+		msg_set_redundant_link(msg);
+	} else {
+		msg_clear_redundant_link(msg);
+	}
+	msg_set_linkprio(msg, l_ptr->priority);
+
+	/* Ensure sequence number will not fit : */
+
+	msg_set_seqno(msg, mod(l_ptr->next_out_no + (0xffff/2)));
+
+	/* Congestion? */
+
+	if (bearer_congested(l_ptr->b_ptr, l_ptr)) {
+		if (!l_ptr->proto_msg_queue) {
+			l_ptr->proto_msg_queue =
+				buf_acquire(sizeof(l_ptr->proto_msg));
+		}
+		buf = l_ptr->proto_msg_queue;
+		if (!buf)
+			return;
+		memcpy(buf->data, (unchar *)msg, sizeof(l_ptr->proto_msg));
+		return;
+	}
+	msg_set_timestamp(msg, jiffies_to_msecs(jiffies));
+
+	/* Message can be sent */
+
+	msg_dbg(msg, ">>");
+
+	buf = buf_acquire(msg_size);
+	if (!buf)
+		return;
+
+	memcpy(buf->data, (unchar *)msg, sizeof(l_ptr->proto_msg));
+        msg_set_size(buf_msg(buf), msg_size);
+
+	if (bearer_send(l_ptr->b_ptr, buf, &l_ptr->media_addr)) {
+		l_ptr->unacked_window = 0;
+		buf_discard(buf);
+		return;
+	}
+
+	/* New congestion */
+	bearer_schedule(l_ptr->b_ptr, l_ptr);
+	l_ptr->proto_msg_queue = buf;
+	l_ptr->stats.bearer_congs++;
+}
+
+/*
+ * Receive protocol message :
+ * Note that network plane id propagates through the network, and may 
+ * change at any time. The node with lowest address rules    
+ */
+
+static void link_recv_proto_msg(struct link *l_ptr, struct sk_buff *buf)
+{
+	u32 rec_gap = 0;
+	u32 max_pkt_info;
+        u32 max_pkt_ack;
+	u32 msg_tol;
+	struct tipc_msg *msg = buf_msg(buf);
+
+	dbg("AT(%u):", jiffies_to_msecs(jiffies));
+	msg_dbg(msg, "<<");
+	if (link_blocked(l_ptr))
+		goto exit;
+
+	/* record unnumbered packet arrival (force mismatch on next timeout) */
+
+	l_ptr->checkpoint--;
+
+	if (l_ptr->b_ptr->net_plane != msg_net_plane(msg))
+		if (tipc_own_addr > msg_prevnode(msg))
+			l_ptr->b_ptr->net_plane = msg_net_plane(msg);
+
+	l_ptr->owner->permit_changeover = msg_redundant_link(msg);
+
+	switch (msg_type(msg)) {
+	
+	case RESET_MSG:
+		if (!link_working_unknown(l_ptr) && l_ptr->peer_session) {
+			if (msg_session(msg) == l_ptr->peer_session) {
+				dbg("Duplicate RESET: %u<->%u\n",
+				    msg_session(msg), l_ptr->peer_session);                                     
+				break; /* duplicate: ignore */
+			}
+		}
+		/* fall thru' */
+	case ACTIVATE_MSG:
+		/* Update link settings according other endpoint's values */
+
+		strcpy((strrchr(l_ptr->name, ':') + 1), (char *)msg_data(msg));
+
+		if ((msg_tol = msg_link_tolerance(msg)) &&
+		    (msg_tol > l_ptr->tolerance))
+			link_set_supervision_props(l_ptr, msg_tol);
+
+		if (msg_linkprio(msg) > l_ptr->priority)
+			l_ptr->priority = msg_linkprio(msg);
+
+		max_pkt_info = msg_max_pkt(msg);
+                if (max_pkt_info) {
+			if (max_pkt_info < l_ptr->max_pkt_target)
+				l_ptr->max_pkt_target = max_pkt_info;
+			if (l_ptr->max_pkt > l_ptr->max_pkt_target)
+				l_ptr->max_pkt = l_ptr->max_pkt_target;
+		} else {
+                        l_ptr->max_pkt = l_ptr->max_pkt_target;
+		}
+		l_ptr->owner->bclink.supported = (max_pkt_info != 0);
+
+		link_state_event(l_ptr, msg_type(msg));
+
+		l_ptr->peer_session = msg_session(msg);
+		l_ptr->peer_bearer_id = msg_bearer_id(msg);
+
+		/* Synchronize broadcast sequence numbers */
+		if (!node_has_redundant_links(l_ptr->owner)) {
+			l_ptr->owner->bclink.last_in = mod(msg_last_bcast(msg));
+		}
+		break;
+	case STATE_MSG:
+
+		if ((msg_tol = msg_link_tolerance(msg)))
+			link_set_supervision_props(l_ptr, msg_tol);
+		
+		if (msg_linkprio(msg) && 
+		    (msg_linkprio(msg) != l_ptr->priority)) {
+			warn("Changing prio <%s>: %u->%u\n",
+			     l_ptr->name, l_ptr->priority, msg_linkprio(msg));
+			l_ptr->priority = msg_linkprio(msg);
+			link_reset(l_ptr); /* Enforce change to take effect */
+			break;
+		}
+		link_state_event(l_ptr, TRAFFIC_MSG_EVT);
+		l_ptr->stats.recv_states++;
+		if (link_reset_unknown(l_ptr))
+			break;
+
+		if (less_eq(mod(l_ptr->next_in_no), msg_next_sent(msg))) {
+			rec_gap = mod(msg_next_sent(msg) - 
+				      mod(l_ptr->next_in_no));
+		}
+
+		max_pkt_ack = msg_max_pkt(msg);
+                if (max_pkt_ack > l_ptr->max_pkt) {
+                        dbg("Link <%s> updated MTU %u -> %u\n",
+                            l_ptr->name, l_ptr->max_pkt, max_pkt_ack);
+                        l_ptr->max_pkt = max_pkt_ack;
+                        l_ptr->max_pkt_probes = 0;
+                }
+
+		max_pkt_ack = 0;
+                if (msg_probe(msg)) {
+			l_ptr->stats.recv_probes++;
+                        if (msg_size(msg) > sizeof(l_ptr->proto_msg)) {
+                                max_pkt_ack = msg_size(msg);
+                        }
+                }
+
+		/* Protocol message before retransmits, reduce loss risk */
+
+		bclink_check_gap(l_ptr->owner, msg_last_bcast(msg));
+
+		if (rec_gap || (msg_probe(msg))) {
+			link_send_proto_msg(l_ptr, STATE_MSG,
+					    0, rec_gap, 0, 0, max_pkt_ack);
+		}
+		if (msg_seq_gap(msg)) {
+			msg_dbg(msg, "With Gap:");
+			l_ptr->stats.recv_nacks++;
+			link_retransmit(l_ptr, l_ptr->first_out,
+					msg_seq_gap(msg));
+		}
+		break;
+	default:
+		msg_dbg(buf_msg(buf), "<DISCARDING UNKNOWN<");
+	}
+exit:
+	buf_discard(buf);
+}
+
+
+/*
+ * link_tunnel(): Send one message via a link belonging to 
+ * another bearer. Owner node is locked.
+ */
+void link_tunnel(struct link *l_ptr, 
+	    struct tipc_msg *tunnel_hdr, 
+	    struct tipc_msg  *msg,
+	    u32 selector)
+{
+	struct link *tunnel;
+	struct sk_buff *buf;
+	u32 length = msg_size(msg);
+
+	tunnel = l_ptr->owner->active_links[selector & 1];
+	if (!link_is_up(tunnel))
+		return;
+	msg_set_size(tunnel_hdr, length + INT_H_SIZE);
+	buf = buf_acquire(length + INT_H_SIZE);
+	if (!buf)
+		return;
+	memcpy(buf->data, (unchar *)tunnel_hdr, INT_H_SIZE);
+	memcpy(buf->data + INT_H_SIZE, (unchar *)msg, length);
+	dbg("%c->%c:", l_ptr->b_ptr->net_plane, tunnel->b_ptr->net_plane);
+	msg_dbg(buf_msg(buf), ">SEND>");
+	assert(tunnel);
+	link_send_buf(tunnel, buf);
+}
+
+
+
+/*
+ * changeover(): Send whole message queue via the remaining link
+ *               Owner node is locked.
+ */
+
+void link_changeover(struct link *l_ptr)
+{
+	u32 msgcount = l_ptr->out_queue_size;
+	struct sk_buff *crs = l_ptr->first_out;
+	struct link *tunnel = l_ptr->owner->active_links[0];
+	int split_bundles = node_has_redundant_links(l_ptr->owner);
+	struct tipc_msg tunnel_hdr;
+
+	if (!tunnel)
+		return;
+
+	if (!l_ptr->owner->permit_changeover)
+		return;
+
+	msg_init(&tunnel_hdr, CHANGEOVER_PROTOCOL,
+		 ORIGINAL_MSG, TIPC_OK, INT_H_SIZE, l_ptr->addr);
+	msg_set_bearer_id(&tunnel_hdr, l_ptr->peer_bearer_id);
+	msg_set_msgcnt(&tunnel_hdr, msgcount);
+	if (!l_ptr->first_out) {
+		struct sk_buff *buf;
+
+		assert(!msgcount);
+		buf = buf_acquire(INT_H_SIZE);
+		if (buf) {
+			memcpy(buf->data, (unchar *)&tunnel_hdr, INT_H_SIZE);
+			msg_set_size(&tunnel_hdr, INT_H_SIZE);
+			dbg("%c->%c:", l_ptr->b_ptr->net_plane,
+			    tunnel->b_ptr->net_plane);
+			msg_dbg(&tunnel_hdr, "EMPTY>SEND>");
+			link_send_buf(tunnel, buf);
+		} else {
+			warn("Memory squeeze; link changeover failed\n");
+		}
+		return;
+	}
+	while (crs) {
+		struct tipc_msg *msg = buf_msg(crs);
+
+		if ((msg_user(msg) == MSG_BUNDLER) && split_bundles) {
+			u32 msgcount = msg_msgcnt(msg);
+			struct tipc_msg *m = msg_get_wrapped(msg);
+			unchar* pos = (unchar*)m;
+
+			while (msgcount--) {
+				msg_set_seqno(m,msg_seqno(msg));
+				link_tunnel(l_ptr, &tunnel_hdr, m,
+					    msg_link_selector(m));
+				pos += align(msg_size(m));
+				m = (struct tipc_msg *)pos;
+			}
+		} else {
+			link_tunnel(l_ptr, &tunnel_hdr, msg,
+				    msg_link_selector(msg));
+		}
+		crs = crs->next;
+	}
+}
+
+void link_send_duplicate(struct link *l_ptr, struct link *tunnel)
+{
+	struct sk_buff *iter;
+	struct tipc_msg tunnel_hdr;
+
+	msg_init(&tunnel_hdr, CHANGEOVER_PROTOCOL,
+		 DUPLICATE_MSG, TIPC_OK, INT_H_SIZE, l_ptr->addr);
+	msg_set_msgcnt(&tunnel_hdr, l_ptr->out_queue_size);
+	msg_set_bearer_id(&tunnel_hdr, l_ptr->peer_bearer_id);
+	iter = l_ptr->first_out;
+	while (iter) {
+		struct sk_buff *outbuf;
+		struct tipc_msg *msg = buf_msg(iter);
+		u32 length = msg_size(msg);
+
+		if (msg_user(msg) == MSG_BUNDLER)
+			msg_set_type(msg, CLOSED_MSG);
+		msg_set_ack(msg, mod(l_ptr->next_in_no - 1));	/* Update */
+		msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in); 
+		msg_set_size(&tunnel_hdr, length + INT_H_SIZE);
+		outbuf = buf_acquire(length + INT_H_SIZE);
+		if (outbuf == NULL) {
+			warn("Memory squeeze; buffer duplication failed\n");
+			return;
+		}
+		memcpy(outbuf->data, (unchar *)&tunnel_hdr, INT_H_SIZE);
+		memcpy(outbuf->data + INT_H_SIZE, iter->data, length);
+		dbg("%c->%c:", l_ptr->b_ptr->net_plane,
+		    tunnel->b_ptr->net_plane);
+		msg_dbg(buf_msg(outbuf), ">SEND>");
+		link_send_buf(tunnel, outbuf);
+		if (!link_is_up(l_ptr))
+			return;
+		iter = iter->next;
+	}
+}
+
+
+
+/**
+ * buf_extract - extracts embedded TIPC message from another message
+ * @skb: encapsulating message buffer
+ * @from_pos: offset to extract from
+ *
+ * Returns a new message buffer containing an embedded message.  The 
+ * encapsulating message itself is left unchanged.
+ */
+
+static struct sk_buff *buf_extract(struct sk_buff *skb, u32 from_pos)
+{
+	struct tipc_msg *msg = (struct tipc_msg *)(skb->data + from_pos);
+	u32 size = msg_size(msg);
+	struct sk_buff *eb;
+
+	eb = buf_acquire(size);
+	if (eb)
+		memcpy(eb->data, (unchar *)msg, size);
+	return eb;
+}
+
+/* 
+ *  link_recv_changeover_msg(): Receive tunneled packet sent
+ *  via other link. Node is locked. Return extracted buffer.
+ */
+
+static int link_recv_changeover_msg(struct link **l_ptr,
+				    struct sk_buff **buf)
+{
+	struct sk_buff *tunnel_buf = *buf;
+	struct link *dest_link;
+	struct tipc_msg *msg;
+	struct tipc_msg *tunnel_msg = buf_msg(tunnel_buf);
+	u32 msg_typ = msg_type(tunnel_msg);
+	u32 msg_count = msg_msgcnt(tunnel_msg);
+
+	dest_link = (*l_ptr)->owner->links[msg_bearer_id(tunnel_msg)];
+	assert(dest_link != *l_ptr);
+	if (!dest_link) {
+		msg_dbg(tunnel_msg, "NOLINK/<REC<");
+		goto exit;
+	}
+	dbg("%c<-%c:", dest_link->b_ptr->net_plane,
+	    (*l_ptr)->b_ptr->net_plane);
+	*l_ptr = dest_link;
+	msg = msg_get_wrapped(tunnel_msg);
+
+	if (msg_typ == DUPLICATE_MSG) {
+		if (less(msg_seqno(msg), mod(dest_link->next_in_no))) {
+			msg_dbg(tunnel_msg, "DROP/<REC<");
+			goto exit;
+		}
+		*buf = buf_extract(tunnel_buf,INT_H_SIZE);
+		if (*buf == NULL) {
+			warn("Memory squeeze; failed to extract msg\n");
+			goto exit;
+		}
+		msg_dbg(tunnel_msg, "TNL<REC<");
+		buf_discard(tunnel_buf);
+		return 1;
+	}
+
+	/* First original message ?: */
+
+	if (link_is_up(dest_link)) {
+		msg_dbg(tunnel_msg, "UP/FIRST/<REC<");
+		link_reset(dest_link);
+		dest_link->exp_msg_count = msg_count;
+		if (!msg_count)
+			goto exit;
+	} else if (dest_link->exp_msg_count == START_CHANGEOVER) {
+		msg_dbg(tunnel_msg, "BLK/FIRST/<REC<");
+		dest_link->exp_msg_count = msg_count;
+		if (!msg_count)
+			goto exit;
+	}
+
+	/* Receive original message */
+
+	if (dest_link->exp_msg_count == 0) {
+		msg_dbg(tunnel_msg, "OVERDUE/DROP/<REC<");
+		dbg_print_link(dest_link, "LINK:");
+		goto exit;
+	}
+	dest_link->exp_msg_count--;
+	if (less(msg_seqno(msg), dest_link->reset_checkpoint)) {
+		msg_dbg(tunnel_msg, "DROP/DUPL/<REC<");
+		goto exit;
+	} else {
+		*buf = buf_extract(tunnel_buf, INT_H_SIZE);
+		if (*buf != NULL) {
+			msg_dbg(tunnel_msg, "TNL<REC<");
+			buf_discard(tunnel_buf);
+			return 1;
+		} else {
+			warn("Memory squeeze; dropped incoming msg\n");
+		}
+	}
+exit:
+	*buf = 0;
+	buf_discard(tunnel_buf);
+	return 0;
+}
+
+/*
+ *  Bundler functionality:
+ */
+void link_recv_bundle(struct sk_buff *buf)
+{
+	u32 msgcount = msg_msgcnt(buf_msg(buf));
+	u32 pos = INT_H_SIZE;
+	struct sk_buff *obuf;
+
+	msg_dbg(buf_msg(buf), "<BNDL<: ");
+	while (msgcount--) {
+		obuf = buf_extract(buf, pos);
+		if (obuf == NULL) {
+			char addr_string[16];
+
+			warn("Buffer allocation failure;\n");
+			warn("  incoming message(s) from %s lost\n",
+			     addr_string_fill(addr_string, 
+					      msg_orignode(buf_msg(buf))));
+			return;
+		};
+		pos += align(msg_size(buf_msg(obuf)));
+		msg_dbg(buf_msg(obuf), "     /");
+		net_route_msg(obuf);
+	}
+	buf_discard(buf);
+}
+
+/*
+ *  Fragmentation/defragmentation:
+ */
+
+
+/* 
+ * link_send_long_buf: Entry for buffers needing fragmentation.
+ * The buffer is complete, inclusive total message length. 
+ * Returns user data length.
+ */
+int link_send_long_buf(struct link *l_ptr, struct sk_buff *buf)
+{
+	struct tipc_msg *inmsg = buf_msg(buf);
+	struct tipc_msg fragm_hdr;
+	u32 insize = msg_size(inmsg);
+	u32 dsz = msg_data_sz(inmsg);
+	unchar *crs = buf->data;
+	u32 rest = insize;
+	u32 pack_sz = link_max_pkt(l_ptr);
+	u32 fragm_sz = pack_sz - INT_H_SIZE;
+	u32 fragm_no = 1;
+	u32 destaddr = msg_destnode(inmsg);
+
+	if (msg_short(inmsg))
+		destaddr = l_ptr->addr;
+
+	if (msg_routed(inmsg))
+		msg_set_prevnode(inmsg, tipc_own_addr);
+
+	/* Prepare reusable fragment header: */
+
+	msg_init(&fragm_hdr, MSG_FRAGMENTER, FIRST_FRAGMENT,
+		 TIPC_OK, INT_H_SIZE, destaddr);
+	msg_set_link_selector(&fragm_hdr, msg_link_selector(inmsg));
+	msg_set_long_msgno(&fragm_hdr, mod(l_ptr->long_msg_seq_no++));
+	msg_set_fragm_no(&fragm_hdr, fragm_no);
+	l_ptr->stats.sent_fragmented++;
+
+	/* Chop up message: */
+
+	while (rest > 0) {
+		struct sk_buff *fragm;
+
+		if (rest <= fragm_sz) {
+			fragm_sz = rest;
+			msg_set_type(&fragm_hdr, LAST_FRAGMENT);
+		}
+		fragm = buf_acquire(fragm_sz + INT_H_SIZE);
+		if (fragm == NULL) {
+			warn("Memory squeeze; failed to fragment msg\n");
+			dsz = -ENOMEM;
+			goto exit;
+		}
+		msg_set_size(&fragm_hdr, fragm_sz + INT_H_SIZE);
+		memcpy(fragm->data, (unchar *)&fragm_hdr, INT_H_SIZE);
+		memcpy(fragm->data + INT_H_SIZE, crs, fragm_sz);
+
+		/*  Send queued messages first, if any: */
+
+		l_ptr->stats.sent_fragments++;
+		link_send_buf(l_ptr, fragm);
+		if (!link_is_up(l_ptr))
+			return dsz;
+		msg_set_fragm_no(&fragm_hdr, ++fragm_no);
+		rest -= fragm_sz;
+		crs += fragm_sz;
+		msg_set_type(&fragm_hdr, FRAGMENT);
+	}
+exit:
+	buf_discard(buf);
+	return dsz;
+}
+
+/* 
+ * A pending message being re-assembled must store certain values 
+ * to handle subsequent fragments correctly. The following functions 
+ * help storing these values in unused, available fields in the
+ * pending message. This makes dynamic memory allocation unecessary.
+ */
+
+static inline u32 get_long_msg_seqno(struct sk_buff *buf)
+{
+	return msg_seqno(buf_msg(buf));
+}
+
+static inline void set_long_msg_seqno(struct sk_buff *buf, u32 seqno)
+{
+	msg_set_seqno(buf_msg(buf), seqno);
+}
+
+static inline u32 get_fragm_size(struct sk_buff *buf)
+{
+	return msg_ack(buf_msg(buf));
+}
+
+static inline void set_fragm_size(struct sk_buff *buf, u32 sz)
+{
+	msg_set_ack(buf_msg(buf), sz);
+}
+
+static inline u32 get_expected_frags(struct sk_buff *buf)
+{
+	return msg_bcast_ack(buf_msg(buf));
+}
+
+static inline void set_expected_frags(struct sk_buff *buf, u32 exp)
+{
+	msg_set_bcast_ack(buf_msg(buf), exp);
+}
+
+static inline u32 get_timer_cnt(struct sk_buff *buf)
+{
+	return msg_reroute_cnt(buf_msg(buf));
+}
+
+static inline void incr_timer_cnt(struct sk_buff *buf)
+{
+	msg_incr_reroute_cnt(buf_msg(buf));
+}
+
+/* 
+ * link_recv_fragment(): Called with node lock on. Returns 
+ * the reassembled buffer if message is complete.
+ */
+int link_recv_fragment(struct sk_buff **pending, struct sk_buff **fb, 
+		       struct tipc_msg **m)
+{
+	struct sk_buff *prev = 0;
+	struct sk_buff *fbuf = *fb;
+	struct tipc_msg *fragm = buf_msg(fbuf);
+	struct sk_buff *pbuf = *pending;
+	u32 long_msg_seq_no = msg_long_msgno(fragm);
+
+	*fb = 0;
+	msg_dbg(fragm,"FRG<REC<");
+
+	/* Is there an incomplete message waiting for this fragment? */
+
+	while (pbuf && ((msg_seqno(buf_msg(pbuf)) != long_msg_seq_no)
+			|| (msg_orignode(fragm) != msg_orignode(buf_msg(pbuf))))) {
+		prev = pbuf;
+		pbuf = pbuf->next;
+	}
+
+	if (!pbuf && (msg_type(fragm) == FIRST_FRAGMENT)) {
+		struct tipc_msg *imsg = (struct tipc_msg *)msg_data(fragm);
+		u32 msg_sz = msg_size(imsg);
+		u32 fragm_sz = msg_data_sz(fragm);
+		u32 exp_fragm_cnt = msg_sz/fragm_sz + !!(msg_sz % fragm_sz);
+		u32 max =  TIPC_MAX_USER_MSG_SIZE + LONG_H_SIZE;
+		if (msg_type(imsg) == TIPC_MCAST_MSG)
+			max = TIPC_MAX_USER_MSG_SIZE + MCAST_H_SIZE;
+		if (msg_size(imsg) > max) {
+			msg_dbg(fragm,"<REC<Oversized: ");
+			buf_discard(fbuf);
+			return 0;
+		}
+		pbuf = buf_acquire(msg_size(imsg));
+		if (pbuf != NULL) {
+			pbuf->next = *pending;
+			*pending = pbuf;
+			memcpy(pbuf->data, (unchar *)imsg, msg_data_sz(fragm));
+
+			/*  Prepare buffer for subsequent fragments. */
+
+			set_long_msg_seqno(pbuf, long_msg_seq_no); 
+			set_fragm_size(pbuf,fragm_sz); 
+			set_expected_frags(pbuf,exp_fragm_cnt - 1); 
+		} else {
+			warn("Memory squeeze; got no defragmenting buffer\n");
+		}
+		buf_discard(fbuf);
+		return 0;
+	} else if (pbuf && (msg_type(fragm) != FIRST_FRAGMENT)) {
+		u32 dsz = msg_data_sz(fragm);
+		u32 fsz = get_fragm_size(pbuf);
+		u32 crs = ((msg_fragm_no(fragm) - 1) * fsz);
+		u32 exp_frags = get_expected_frags(pbuf) - 1;
+		memcpy(pbuf->data + crs, msg_data(fragm), dsz);
+		buf_discard(fbuf);
+
+		/* Is message complete? */
+
+		if (exp_frags == 0) {
+			if (prev)
+				prev->next = pbuf->next;
+			else
+				*pending = pbuf->next;
+			msg_reset_reroute_cnt(buf_msg(pbuf));
+			*fb = pbuf;
+			*m = buf_msg(pbuf);
+			return 1;
+		}
+		set_expected_frags(pbuf,exp_frags);     
+		return 0;
+	}
+	dbg(" Discarding orphan fragment %x\n",fbuf);
+	msg_dbg(fragm,"ORPHAN:");
+	dbg("Pending long buffers:\n");
+	dbg_print_buf_chain(*pending);
+	buf_discard(fbuf);
+	return 0;
+}
+
+/**
+ * link_check_defragm_bufs - flush stale incoming message fragments
+ * @l_ptr: pointer to link
+ */
+
+static void link_check_defragm_bufs(struct link *l_ptr)
+{
+	struct sk_buff *prev = 0;
+	struct sk_buff *next = 0;
+	struct sk_buff *buf = l_ptr->defragm_buf;
+
+	if (!buf)
+		return;
+	if (!link_working_working(l_ptr))
+		return;
+	while (buf) {
+		u32 cnt = get_timer_cnt(buf);
+
+		next = buf->next;
+		if (cnt < 4) {
+			incr_timer_cnt(buf);
+			prev = buf;
+		} else {
+			dbg(" Discarding incomplete long buffer\n");
+			msg_dbg(buf_msg(buf), "LONG:");
+			dbg_print_link(l_ptr, "curr:");
+			dbg("Pending long buffers:\n");
+			dbg_print_buf_chain(l_ptr->defragm_buf);
+			if (prev)
+				prev->next = buf->next;
+			else
+				l_ptr->defragm_buf = buf->next;
+			buf_discard(buf);
+		}
+		buf = next;
+	}
+}
+
+
+
+static void link_set_supervision_props(struct link *l_ptr, u32 tolerance)
+{
+	l_ptr->tolerance = tolerance;
+	l_ptr->continuity_interval =
+		((tolerance / 4) > 500) ? 500 : tolerance / 4;
+	l_ptr->abort_limit = tolerance / (l_ptr->continuity_interval / 4);
+}
+
+
+void link_set_queue_limits(struct link *l_ptr, u32 window)
+{
+	/* Data messages from this node, inclusive FIRST_FRAGM */
+	l_ptr->queue_limit[DATA_LOW] = window;
+	l_ptr->queue_limit[DATA_MEDIUM] = (window / 3) * 4;
+	l_ptr->queue_limit[DATA_HIGH] = (window / 3) * 5;
+	l_ptr->queue_limit[DATA_CRITICAL] = (window / 3) * 6;
+	/* Transiting data messages,inclusive FIRST_FRAGM */
+	l_ptr->queue_limit[DATA_LOW + 4] = 300;
+	l_ptr->queue_limit[DATA_MEDIUM + 4] = 600;
+	l_ptr->queue_limit[DATA_HIGH + 4] = 900;
+	l_ptr->queue_limit[DATA_CRITICAL + 4] = 1200;
+	l_ptr->queue_limit[CONN_MANAGER] = 1200;
+	l_ptr->queue_limit[ROUTE_DISTRIBUTOR] = 1200;
+	l_ptr->queue_limit[CHANGEOVER_PROTOCOL] = 2500;
+	l_ptr->queue_limit[NAME_DISTRIBUTOR] = 3000;
+	/* FRAGMENT and LAST_FRAGMENT packets */
+	l_ptr->queue_limit[MSG_FRAGMENTER] = 4000;
+}
+
+/**
+ * link_find_link - locate link by name
+ * @name - ptr to link name string
+ * @node - ptr to area to be filled with ptr to associated node
+ * 
+ * Caller must hold 'net_lock' to ensure node and bearer are not deleted;
+ * this also prevents link deletion.
+ * 
+ * Returns pointer to link (or 0 if invalid link name).
+ */
+
+static struct link *link_find_link(const char *name, struct node **node)
+{
+	struct link_name link_name_parts;
+	struct bearer *b_ptr;
+	struct link *l_ptr; 
+
+	if (!link_name_validate(name, &link_name_parts))
+		return 0;
+
+	b_ptr = bearer_find_interface(link_name_parts.if_local);
+	if (!b_ptr)
+		return 0;
+
+	*node = node_find(link_name_parts.addr_peer); 
+	if (!*node)
+		return 0;
+
+	l_ptr = (*node)->links[b_ptr->identity];
+	if (!l_ptr || strcmp(l_ptr->name, name))
+		return 0;
+
+	return l_ptr;
+}
+
+struct sk_buff *link_cmd_config(const void *req_tlv_area, int req_tlv_space, 
+			        u16 cmd)
+{
+	struct tipc_link_config *args;
+        u32 new_value;
+	struct link *l_ptr;
+	struct node *node;
+        int res;
+
+	if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_LINK_CONFIG))
+		return cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
+
+	args = (struct tipc_link_config *)TLV_DATA(req_tlv_area);
+	new_value = ntohl(args->value);
+
+	if (!strcmp(args->name, bc_link_name)) {
+		if ((cmd == TIPC_CMD_SET_LINK_WINDOW) &&
+		    (bclink_set_queue_limits(new_value) == 0))
+			return cfg_reply_none();
+	       	return cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED
+					      " (cannot change setting on broadcast link)");
+	}
+
+	read_lock_bh(&net_lock);
+	l_ptr = link_find_link(args->name, &node); 
+	if (!l_ptr) {
+		read_unlock_bh(&net_lock);
+	       	return cfg_reply_error_string("link not found");
+	}
+
+	node_lock(node);
+	res = -EINVAL;
+	switch (cmd) {
+	case TIPC_CMD_SET_LINK_TOL: 
+		if ((new_value >= TIPC_MIN_LINK_TOL) && 
+		    (new_value <= TIPC_MAX_LINK_TOL)) {
+			link_set_supervision_props(l_ptr, new_value);
+			link_send_proto_msg(l_ptr, STATE_MSG, 
+					    0, 0, new_value, 0, 0);
+			res = TIPC_OK;
+		}
+		break;
+	case TIPC_CMD_SET_LINK_PRI: 
+		if (new_value < TIPC_NUM_LINK_PRI) {
+			l_ptr->priority = new_value;
+			link_send_proto_msg(l_ptr, STATE_MSG, 
+					    0, 0, 0, new_value, 0);
+			res = TIPC_OK;
+		}
+		break;
+	case TIPC_CMD_SET_LINK_WINDOW: 
+		if ((new_value >= TIPC_MIN_LINK_WIN) && 
+		    (new_value <= TIPC_MAX_LINK_WIN)) {
+			link_set_queue_limits(l_ptr, new_value);
+			res = TIPC_OK;
+		}
+		break;
+	}
+	node_unlock(node);
+
+	read_unlock_bh(&net_lock);
+	if (res)
+	       	return cfg_reply_error_string("cannot change link setting");
+
+	return cfg_reply_none();
+}
+
+/**
+ * link_reset_statistics - reset link statistics
+ * @l_ptr: pointer to link
+ */
+
+static void link_reset_statistics(struct link *l_ptr)
+{
+	memset(&l_ptr->stats, 0, sizeof(l_ptr->stats));
+	l_ptr->stats.sent_info = l_ptr->next_out_no;
+	l_ptr->stats.recv_info = l_ptr->next_in_no;
+}
+
+struct sk_buff *link_cmd_reset_stats(const void *req_tlv_area, int req_tlv_space)
+{
+	char *link_name;
+	struct link *l_ptr; 
+	struct node *node;
+
+	if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_LINK_NAME))
+		return cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
+
+	link_name = (char *)TLV_DATA(req_tlv_area);
+	if (!strcmp(link_name, bc_link_name)) {
+		if (bclink_reset_stats())
+			return cfg_reply_error_string("link not found");
+		return cfg_reply_none();
+	}
+
+	read_lock_bh(&net_lock);
+	l_ptr = link_find_link(link_name, &node); 
+	if (!l_ptr) {
+		read_unlock_bh(&net_lock);
+		return cfg_reply_error_string("link not found");
+	}
+
+	node_lock(node);
+	link_reset_statistics(l_ptr);
+	node_unlock(node);
+	read_unlock_bh(&net_lock);
+	return cfg_reply_none();
+}
+
+/**
+ * percent - convert count to a percentage of total (rounding up or down)
+ */
+
+static u32 percent(u32 count, u32 total)
+{
+	return (count * 100 + (total / 2)) / total;
+}
+
+/**
+ * link_stats - print link statistics
+ * @name: link name
+ * @buf: print buffer area
+ * @buf_size: size of print buffer area
+ * 
+ * Returns length of print buffer data string (or 0 if error)
+ */
+
+static int link_stats(const char *name, char *buf, const u32 buf_size)
+{
+	struct print_buf pb;
+	struct link *l_ptr; 
+	struct node *node;
+	char *status;
+	u32 profile_total = 0;
+
+	if (!strcmp(name, bc_link_name))
+		return bclink_stats(buf, buf_size);
+
+	printbuf_init(&pb, buf, buf_size);
+
+	read_lock_bh(&net_lock);
+	l_ptr = link_find_link(name, &node); 
+	if (!l_ptr) {
+		read_unlock_bh(&net_lock);
+		return 0;
+	}
+	node_lock(node);
+
+	if (link_is_active(l_ptr))
+		status = "ACTIVE";
+	else if (link_is_up(l_ptr))
+		status = "STANDBY";
+	else
+		status = "DEFUNCT";
+	tipc_printf(&pb, "Link <%s>\n"
+		         "  %s  MTU:%u  Priority:%u  Tolerance:%u ms"
+		         "  Window:%u packets\n", 
+		    l_ptr->name, status, link_max_pkt(l_ptr), 
+		    l_ptr->priority, l_ptr->tolerance, l_ptr->queue_limit[0]);
+	tipc_printf(&pb, "  RX packets:%u fragments:%u/%u bundles:%u/%u\n", 
+		    l_ptr->next_in_no - l_ptr->stats.recv_info,
+		    l_ptr->stats.recv_fragments,
+		    l_ptr->stats.recv_fragmented,
+		    l_ptr->stats.recv_bundles,
+		    l_ptr->stats.recv_bundled);
+	tipc_printf(&pb, "  TX packets:%u fragments:%u/%u bundles:%u/%u\n", 
+		    l_ptr->next_out_no - l_ptr->stats.sent_info,
+		    l_ptr->stats.sent_fragments,
+		    l_ptr->stats.sent_fragmented, 
+		    l_ptr->stats.sent_bundles,
+		    l_ptr->stats.sent_bundled);
+	profile_total = l_ptr->stats.msg_length_counts;
+	if (!profile_total)
+		profile_total = 1;
+	tipc_printf(&pb, "  TX profile sample:%u packets  average:%u octets\n"
+		         "  0-64:%u%% -256:%u%% -1024:%u%% -4096:%u%% "
+		         "-16354:%u%% -32768:%u%% -66000:%u%%\n",
+		    l_ptr->stats.msg_length_counts,
+		    l_ptr->stats.msg_lengths_total / profile_total,
+		    percent(l_ptr->stats.msg_length_profile[0], profile_total),
+		    percent(l_ptr->stats.msg_length_profile[1], profile_total),
+		    percent(l_ptr->stats.msg_length_profile[2], profile_total),
+		    percent(l_ptr->stats.msg_length_profile[3], profile_total),
+		    percent(l_ptr->stats.msg_length_profile[4], profile_total),
+		    percent(l_ptr->stats.msg_length_profile[5], profile_total),
+		    percent(l_ptr->stats.msg_length_profile[6], profile_total));
+	tipc_printf(&pb, "  RX states:%u probes:%u naks:%u defs:%u dups:%u\n", 
+		    l_ptr->stats.recv_states,
+		    l_ptr->stats.recv_probes,
+		    l_ptr->stats.recv_nacks,
+		    l_ptr->stats.deferred_recv, 
+		    l_ptr->stats.duplicates);
+	tipc_printf(&pb, "  TX states:%u probes:%u naks:%u acks:%u dups:%u\n", 
+		    l_ptr->stats.sent_states, 
+		    l_ptr->stats.sent_probes, 
+		    l_ptr->stats.sent_nacks, 
+		    l_ptr->stats.sent_acks, 
+		    l_ptr->stats.retransmitted);
+	tipc_printf(&pb, "  Congestion bearer:%u link:%u  Send queue max:%u avg:%u\n",
+		    l_ptr->stats.bearer_congs,
+		    l_ptr->stats.link_congs, 
+		    l_ptr->stats.max_queue_sz,
+		    l_ptr->stats.queue_sz_counts
+		    ? (l_ptr->stats.accu_queue_sz / l_ptr->stats.queue_sz_counts)
+		    : 0);
+
+	node_unlock(node);
+	read_unlock_bh(&net_lock);
+	return printbuf_validate(&pb);
+}
+
+#define MAX_LINK_STATS_INFO 2000
+
+struct sk_buff *link_cmd_show_stats(const void *req_tlv_area, int req_tlv_space)
+{
+	struct sk_buff *buf;
+	struct tlv_desc *rep_tlv;
+	int str_len;
+
+	if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_LINK_NAME))
+		return cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
+
+	buf = cfg_reply_alloc(TLV_SPACE(MAX_LINK_STATS_INFO));
+	if (!buf)
+		return NULL;
+
+	rep_tlv = (struct tlv_desc *)buf->data;
+
+	str_len = link_stats((char *)TLV_DATA(req_tlv_area),
+			     (char *)TLV_DATA(rep_tlv), MAX_LINK_STATS_INFO);
+	if (!str_len) {
+		buf_discard(buf);
+	       	return cfg_reply_error_string("link not found");
+	}
+
+	skb_put(buf, TLV_SPACE(str_len));
+	TLV_SET(rep_tlv, TIPC_TLV_ULTRA_STRING, NULL, str_len);
+
+	return buf;
+}
+
+#if 0
+int link_control(const char *name, u32 op, u32 val)
+{
+	int res = -EINVAL;
+	struct link *l_ptr;
+	u32 bearer_id;
+	struct node * node;
+	u32 a;
+
+	a = link_name2addr(name, &bearer_id);
+	read_lock_bh(&net_lock);
+	node = node_find(a);
+	if (node) {
+		node_lock(node);
+		l_ptr = node->links[bearer_id];
+		if (l_ptr) {
+			if (op == TIPC_REMOVE_LINK) {
+				struct bearer *b_ptr = l_ptr->b_ptr;
+				spin_lock_bh(&b_ptr->publ.lock);
+				link_delete(l_ptr);
+				spin_unlock_bh(&b_ptr->publ.lock);
+			}
+			if (op == TIPC_CMD_BLOCK_LINK) {
+				link_reset(l_ptr);
+				l_ptr->blocked = 1;
+			}
+			if (op == TIPC_CMD_UNBLOCK_LINK) {
+				l_ptr->blocked = 0;
+			}
+			res = TIPC_OK;
+		}
+		node_unlock(node);
+	}
+	read_unlock_bh(&net_lock);
+	return res;
+}
+#endif
+
+/**
+ * link_get_max_pkt - get maximum packet size to use when sending to destination
+ * @dest: network address of destination node
+ * @selector: used to select from set of active links
+ * 
+ * If no active link can be found, uses default maximum packet size.
+ */
+
+u32 link_get_max_pkt(u32 dest, u32 selector)
+{
+	struct node *n_ptr;
+	struct link *l_ptr;
+	u32 res = MAX_PKT_DEFAULT;
+	
+	if (dest == tipc_own_addr)
+		return MAX_MSG_SIZE;
+
+	read_lock_bh(&net_lock);        
+	n_ptr = node_select(dest, selector);
+	if (n_ptr) {
+		node_lock(n_ptr);
+		l_ptr = n_ptr->active_links[selector & 1];
+		if (l_ptr)
+			res = link_max_pkt(l_ptr);
+		node_unlock(n_ptr);
+	}
+	read_unlock_bh(&net_lock);       
+	return res;
+}
+
+#if 0
+static void link_dump_rec_queue(struct link *l_ptr)
+{
+	struct sk_buff *crs;
+
+	if (!l_ptr->oldest_deferred_in) {
+		info("Reception queue empty\n");
+		return;
+	}
+	info("Contents of Reception queue:\n");
+	crs = l_ptr->oldest_deferred_in;
+	while (crs) {
+		if (crs->data == (void *)0x0000a3a3) {
+			info("buffer %x invalid\n", crs);
+			return;
+		}
+		msg_dbg(buf_msg(crs), "In rec queue: \n");
+		crs = crs->next;
+	}
+}
+#endif
+
+static void link_dump_send_queue(struct link *l_ptr)
+{
+	if (l_ptr->next_out) {
+		info("\nContents of unsent queue:\n");
+		dbg_print_buf_chain(l_ptr->next_out);
+	}
+	info("\nContents of send queue:\n");
+	if (l_ptr->first_out) {
+		dbg_print_buf_chain(l_ptr->first_out);
+	}
+	info("Empty send queue\n");
+}
+
+static void link_print(struct link *l_ptr, struct print_buf *buf,
+		       const char *str)
+{
+	tipc_printf(buf, str);
+	if (link_reset_reset(l_ptr) || link_reset_unknown(l_ptr))
+		return;
+	tipc_printf(buf, "Link %x<%s>:",
+		    l_ptr->addr, l_ptr->b_ptr->publ.name);
+	tipc_printf(buf, ": NXO(%u):", mod(l_ptr->next_out_no));
+	tipc_printf(buf, "NXI(%u):", mod(l_ptr->next_in_no));
+	tipc_printf(buf, "SQUE");
+	if (l_ptr->first_out) {
+		tipc_printf(buf, "[%u..", msg_seqno(buf_msg(l_ptr->first_out)));
+		if (l_ptr->next_out)
+			tipc_printf(buf, "%u..",
+				    msg_seqno(buf_msg(l_ptr->next_out)));
+		tipc_printf(buf, "%u]",
+			    msg_seqno(buf_msg
+				      (l_ptr->last_out)), l_ptr->out_queue_size);
+		if ((mod(msg_seqno(buf_msg(l_ptr->last_out)) - 
+			 msg_seqno(buf_msg(l_ptr->first_out))) 
+		     != (l_ptr->out_queue_size - 1))
+		    || (l_ptr->last_out->next != 0)) {
+			tipc_printf(buf, "\nSend queue inconsistency\n");
+			tipc_printf(buf, "first_out= %x ", l_ptr->first_out);
+			tipc_printf(buf, "next_out= %x ", l_ptr->next_out);
+			tipc_printf(buf, "last_out= %x ", l_ptr->last_out);
+			link_dump_send_queue(l_ptr);
+		}
+	} else
+		tipc_printf(buf, "[]");
+	tipc_printf(buf, "SQSIZ(%u)", l_ptr->out_queue_size);
+	if (l_ptr->oldest_deferred_in) {
+		u32 o = msg_seqno(buf_msg(l_ptr->oldest_deferred_in));
+		u32 n = msg_seqno(buf_msg(l_ptr->newest_deferred_in));
+		tipc_printf(buf, ":RQUE[%u..%u]", o, n);
+		if (l_ptr->deferred_inqueue_sz != mod((n + 1) - o)) {
+			tipc_printf(buf, ":RQSIZ(%u)",
+				    l_ptr->deferred_inqueue_sz);
+		}
+	}
+	if (link_working_unknown(l_ptr))
+		tipc_printf(buf, ":WU");
+	if (link_reset_reset(l_ptr))
+		tipc_printf(buf, ":RR");
+	if (link_reset_unknown(l_ptr))
+		tipc_printf(buf, ":RU");
+	if (link_working_working(l_ptr))
+		tipc_printf(buf, ":WW");
+	tipc_printf(buf, "\n");
+}
+
diff --git a/net/tipc/link.h b/net/tipc/link.h
new file mode 100644
index 0000000..e7db347
--- /dev/null
+++ b/net/tipc/link.h
@@ -0,0 +1,293 @@
+/*
+ * net/tipc/link.h: Include file for TIPC link code
+ * 
+ * Copyright (c) 2003-2005, Ericsson Research Canada
+ * Copyright (c) 2004-2005, Wind River Systems
+ * Copyright (c) 2005-2006, Ericsson AB
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without 
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this 
+ * list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright notice, 
+ * this list of conditions and the following disclaimer in the documentation 
+ * and/or other materials provided with the distribution.
+ * Neither the names of the copyright holders nor the names of its 
+ * contributors may be used to endorse or promote products derived from this 
+ * software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _TIPC_LINK_H
+#define _TIPC_LINK_H
+
+#include "dbg.h"
+#include "msg.h"
+#include "bearer.h"
+#include "node.h"
+
+#define PUSH_FAILED   1
+#define PUSH_FINISHED 2
+
+/* 
+ * Link states 
+ */
+
+#define WORKING_WORKING 560810u
+#define WORKING_UNKNOWN 560811u
+#define RESET_UNKNOWN   560812u
+#define RESET_RESET     560813u
+
+/* 
+ * Starting value for maximum packet size negotiation on unicast links
+ * (unless bearer MTU is less)
+ */
+
+#define MAX_PKT_DEFAULT 1500
+
+/**
+ * struct link - TIPC link data structure
+ * @addr: network address of link's peer node
+ * @name: link name character string
+ * @media_addr: media address to use when sending messages over link
+ * @timer: link timer
+ * @owner: pointer to peer node
+ * @link_list: adjacent links in bearer's list of links
+ * @started: indicates if link has been started
+ * @checkpoint: reference point for triggering link continuity checking
+ * @peer_session: link session # being used by peer end of link
+ * @peer_bearer_id: bearer id used by link's peer endpoint
+ * @b_ptr: pointer to bearer used by link
+ * @tolerance: minimum link continuity loss needed to reset link [in ms] 
+ * @continuity_interval: link continuity testing interval [in ms]
+ * @abort_limit: # of unacknowledged continuity probes needed to reset link
+ * @state: current state of link FSM
+ * @blocked: indicates if link has been administratively blocked
+ * @fsm_msg_cnt: # of protocol messages link FSM has sent in current state
+ * @proto_msg: template for control messages generated by link
+ * @pmsg: convenience pointer to "proto_msg" field
+ * @priority: current link priority
+ * @queue_limit: outbound message queue congestion thresholds (indexed by user)
+ * @exp_msg_count: # of tunnelled messages expected during link changeover
+ * @reset_checkpoint: seq # of last acknowledged message at time of link reset
+ * @max_pkt: current maximum packet size for this link
+ * @max_pkt_target: desired maximum packet size for this link
+ * @max_pkt_probes: # of probes based on current (max_pkt, max_pkt_target)
+ * @out_queue_size: # of messages in outbound message queue
+ * @first_out: ptr to first outbound message in queue
+ * @last_out: ptr to last outbound message in queue
+ * @next_out_no: next sequence number to use for outbound messages
+ * @last_retransmitted: sequence number of most recently retransmitted message
+ * @stale_count: # of identical retransmit requests made by peer
+ * @next_in_no: next sequence number to expect for inbound messages
+ * @deferred_inqueue_sz: # of messages in inbound message queue
+ * @oldest_deferred_in: ptr to first inbound message in queue
+ * @newest_deferred_in: ptr to last inbound message in queue
+ * @unacked_window: # of inbound messages rx'd without ack'ing back to peer
+ * @proto_msg_queue: ptr to (single) outbound control message
+ * @retransm_queue_size: number of messages to retransmit
+ * @retransm_queue_head: sequence number of first message to retransmit
+ * @next_out: ptr to first unsent outbound message in queue
+ * @waiting_ports: linked list of ports waiting for link congestion to abate
+ * @long_msg_seq_no: next identifier to use for outbound fragmented messages
+ * @defragm_buf: list of partially reassembled inbound message fragments
+ * @stats: collects statistics regarding link activity
+ * @print_buf: print buffer used to log link activity
+ */
+ 
+struct link {
+	u32 addr;
+	char name[TIPC_MAX_LINK_NAME];
+	struct tipc_media_addr media_addr;
+	struct timer_list timer;
+	struct node *owner;
+	struct list_head link_list;
+
+	/* Management and link supervision data */
+	int started;
+	u32 checkpoint;
+	u32 peer_session;
+	u32 peer_bearer_id;
+	struct bearer *b_ptr;
+	u32 tolerance;
+	u32 continuity_interval;
+	u32 abort_limit;
+	int state;
+	int blocked;
+	u32 fsm_msg_cnt;
+	struct {
+		unchar hdr[INT_H_SIZE];
+		unchar body[TIPC_MAX_IF_NAME];
+	} proto_msg;
+	struct tipc_msg *pmsg;
+	u32 priority;
+	u32 queue_limit[15];	/* queue_limit[0]==window limit */
+
+	/* Changeover */
+	u32 exp_msg_count;
+	u32 reset_checkpoint;
+
+        /* Max packet negotiation */
+        u32 max_pkt;
+        u32 max_pkt_target;
+        u32 max_pkt_probes;
+
+	/* Sending */
+	u32 out_queue_size;
+	struct sk_buff *first_out;
+	struct sk_buff *last_out;
+	u32 next_out_no;
+        u32 last_retransmitted;
+        u32 stale_count;
+
+	/* Reception */
+	u32 next_in_no;
+	u32 deferred_inqueue_sz;
+	struct sk_buff *oldest_deferred_in;
+	struct sk_buff *newest_deferred_in;
+	u32 unacked_window;
+
+	/* Congestion handling */
+	struct sk_buff *proto_msg_queue;
+	u32 retransm_queue_size;
+	u32 retransm_queue_head;
+	struct sk_buff *next_out;
+	struct list_head waiting_ports;
+
+	/* Fragmentation/defragmentation */
+	u32 long_msg_seq_no;
+	struct sk_buff *defragm_buf;
+
+        /* Statistics */
+	struct {
+		u32 sent_info;		/* used in counting # sent packets */
+		u32 recv_info;		/* used in counting # recv'd packets */
+		u32 sent_states;
+		u32 recv_states;
+		u32 sent_probes;
+		u32 recv_probes;
+		u32 sent_nacks;
+		u32 recv_nacks;
+		u32 sent_acks;
+		u32 sent_bundled;
+		u32 sent_bundles;
+		u32 recv_bundled;
+		u32 recv_bundles;
+		u32 retransmitted;
+		u32 sent_fragmented;
+		u32 sent_fragments;
+		u32 recv_fragmented;
+		u32 recv_fragments;
+		u32 link_congs;		/* # port sends blocked by congestion */
+		u32 bearer_congs;
+		u32 deferred_recv;
+		u32 duplicates;
+
+		/* for statistical profiling of send queue size */
+
+		u32 max_queue_sz;
+		u32 accu_queue_sz;
+		u32 queue_sz_counts;
+
+		/* for statistical profiling of message lengths */
+
+		u32 msg_length_counts;
+		u32 msg_lengths_total;
+		u32 msg_length_profile[7];
+#if 0
+		u32 sent_tunneled;
+		u32 recv_tunneled;
+#endif
+	} stats;
+
+	struct print_buf print_buf;
+};
+
+struct port;
+
+struct link *link_create(struct bearer *b_ptr, const u32 peer,
+			 const struct tipc_media_addr *media_addr);
+void link_delete(struct link *l_ptr);
+void link_changeover(struct link *l_ptr);
+void link_send_duplicate(struct link *l_ptr, struct link *dest);
+void link_reset_fragments(struct link *l_ptr);
+int link_is_up(struct link *l_ptr);
+int link_is_active(struct link *l_ptr);
+void link_start(struct link *l_ptr);
+u32 link_push_packet(struct link *l_ptr);
+void link_stop(struct link *l_ptr);
+struct sk_buff *link_cmd_config(const void *req_tlv_area, int req_tlv_space, u16 cmd);
+struct sk_buff *link_cmd_show_stats(const void *req_tlv_area, int req_tlv_space);
+struct sk_buff *link_cmd_reset_stats(const void *req_tlv_area, int req_tlv_space);
+void link_reset(struct link *l_ptr);
+int link_send(struct sk_buff *buf, u32 dest, u32 selector);
+int link_send_buf(struct link *l_ptr, struct sk_buff *buf);
+u32 link_get_max_pkt(u32 dest,u32 selector);
+int link_send_sections_fast(struct port* sender, 
+			    struct iovec const *msg_sect,
+			    const u32 num_sect, 
+			    u32 destnode);
+
+int link_send_long_buf(struct link *l_ptr, struct sk_buff *buf);
+void link_tunnel(struct link *l_ptr, struct tipc_msg *tnl_hdr,
+		 struct tipc_msg *msg, u32 selector);
+void link_recv_bundle(struct sk_buff *buf);
+int  link_recv_fragment(struct sk_buff **pending,
+			struct sk_buff **fb,
+			struct tipc_msg **msg);
+void link_send_proto_msg(struct link *l_ptr, u32 msg_typ, int prob, u32 gap, 
+			 u32 tolerance, u32 priority, u32 acked_mtu);
+void link_push_queue(struct link *l_ptr);
+u32 link_defer_pkt(struct sk_buff **head, struct sk_buff **tail,
+		   struct sk_buff *buf);
+void link_wakeup_ports(struct link *l_ptr, int all);
+void link_set_queue_limits(struct link *l_ptr, u32 window);
+void link_retransmit(struct link *l_ptr, struct sk_buff *start, u32 retransmits);
+
+/*
+ * Link sequence number manipulation routines (uses modulo 2**16 arithmetic)
+ */
+
+static inline u32 mod(u32 x)
+{
+	return x & 0xffffu;
+}
+
+static inline int between(u32 lower, u32 upper, u32 n)
+{
+	if ((lower < n) && (n < upper))
+		return 1;
+	if ((upper < lower) && ((n > lower) || (n < upper)))
+		return 1;
+	return 0;
+}
+
+static inline int less_eq(u32 left, u32 right)
+{
+	return (mod(right - left) < 32768u);
+}
+
+static inline int less(u32 left, u32 right)
+{
+	return (less_eq(left, right) && (mod(right) != mod(left)));
+}
+
+static inline u32 lesser(u32 left, u32 right)
+{
+	return less_eq(left, right) ? left : right;
+}
+
+#endif
diff --git a/net/tipc/msg.c b/net/tipc/msg.c
new file mode 100644
index 0000000..c6e90ef
--- /dev/null
+++ b/net/tipc/msg.c
@@ -0,0 +1,331 @@
+/*
+ * net/tipc/msg.c: TIPC message header routines
+ *     
+ * Copyright (c) 2003-2005, Ericsson Research Canada
+ * Copyright (c) 2005, Wind River Systems
+ * Copyright (c) 2005-2006, Ericsson AB
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without 
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this 
+ * list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright notice, 
+ * this list of conditions and the following disclaimer in the documentation 
+ * and/or other materials provided with the distribution.
+ * Neither the names of the copyright holders nor the names of its 
+ * contributors may be used to endorse or promote products derived from this 
+ * software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "core.h"
+#include "addr.h"
+#include "dbg.h"
+#include "msg.h"
+#include "bearer.h"
+
+
+void msg_set_media_addr(struct tipc_msg *m, struct tipc_media_addr *a)
+{
+	memcpy(&((int *)m)[5], a, sizeof(*a));
+}
+
+void msg_get_media_addr(struct tipc_msg *m, struct tipc_media_addr *a)
+{
+	memcpy(a, &((int*)m)[5], sizeof(*a));
+}
+
+
+void msg_print(struct print_buf *buf, struct tipc_msg *msg, const char *str)
+{
+	u32 usr = msg_user(msg);
+	tipc_printf(buf, str);
+
+	switch (usr) {
+	case MSG_BUNDLER:
+		tipc_printf(buf, "BNDL::");
+		tipc_printf(buf, "MSGS(%u):", msg_msgcnt(msg));
+		break;
+	case BCAST_PROTOCOL:
+		tipc_printf(buf, "BCASTP::");
+		break;
+	case MSG_FRAGMENTER:
+		tipc_printf(buf, "FRAGM::");
+		switch (msg_type(msg)) {
+		case FIRST_FRAGMENT:
+			tipc_printf(buf, "FIRST:");
+			break;
+		case FRAGMENT:
+			tipc_printf(buf, "BODY:");
+			break;
+		case LAST_FRAGMENT:
+			tipc_printf(buf, "LAST:");
+			break;
+		default:
+			tipc_printf(buf, "UNKNOWN:%x",msg_type(msg));
+
+		}
+		tipc_printf(buf, "NO(%u/%u):",msg_long_msgno(msg),
+			    msg_fragm_no(msg));
+		break;
+	case DATA_LOW:
+	case DATA_MEDIUM:
+	case DATA_HIGH:
+	case DATA_CRITICAL:
+		tipc_printf(buf, "DAT%u:", msg_user(msg));
+		if (msg_short(msg)) {
+			tipc_printf(buf, "CON:");
+			break;
+		}
+		switch (msg_type(msg)) {
+		case TIPC_CONN_MSG:
+			tipc_printf(buf, "CON:");
+			break;
+		case TIPC_MCAST_MSG:
+			tipc_printf(buf, "MCST:");
+			break;
+		case TIPC_NAMED_MSG:
+			tipc_printf(buf, "NAM:");
+			break;
+		case TIPC_DIRECT_MSG:
+			tipc_printf(buf, "DIR:");
+			break;
+		default:
+			tipc_printf(buf, "UNKNOWN TYPE %u",msg_type(msg));
+		}
+		if (msg_routed(msg) && !msg_non_seq(msg))
+			tipc_printf(buf, "ROUT:");
+		if (msg_reroute_cnt(msg))
+			tipc_printf(buf, "REROUTED(%u):",
+				    msg_reroute_cnt(msg));
+		break;
+	case NAME_DISTRIBUTOR:
+		tipc_printf(buf, "NMD::");
+		switch (msg_type(msg)) {
+		case PUBLICATION:
+			tipc_printf(buf, "PUBL(%u):", (msg_size(msg) - msg_hdr_sz(msg)) / 20);	/* Items */
+			break;
+		case WITHDRAWAL:
+			tipc_printf(buf, "WDRW:");
+			break;
+		default:
+			tipc_printf(buf, "UNKNOWN:%x",msg_type(msg));
+		}
+		if (msg_routed(msg))
+			tipc_printf(buf, "ROUT:");
+		if (msg_reroute_cnt(msg))
+			tipc_printf(buf, "REROUTED(%u):",
+				    msg_reroute_cnt(msg));
+		break;
+	case CONN_MANAGER:
+		tipc_printf(buf, "CONN_MNG:");
+		switch (msg_type(msg)) {
+		case CONN_PROBE:
+			tipc_printf(buf, "PROBE:");
+			break;
+		case CONN_PROBE_REPLY:
+			tipc_printf(buf, "PROBE_REPLY:");
+			break;
+		case CONN_ACK:
+			tipc_printf(buf, "CONN_ACK:");
+			tipc_printf(buf, "ACK(%u):",msg_msgcnt(msg));
+			break;
+		default:
+			tipc_printf(buf, "UNKNOWN TYPE:%x",msg_type(msg));
+		}
+		if (msg_routed(msg))
+			tipc_printf(buf, "ROUT:");
+		if (msg_reroute_cnt(msg))
+			tipc_printf(buf, "REROUTED(%u):",msg_reroute_cnt(msg));
+		break;
+	case LINK_PROTOCOL:
+		tipc_printf(buf, "PROT:TIM(%u):",msg_timestamp(msg));
+		switch (msg_type(msg)) {
+		case STATE_MSG:
+			tipc_printf(buf, "STATE:");
+			tipc_printf(buf, "%s:",msg_probe(msg) ? "PRB" :"");
+			tipc_printf(buf, "NXS(%u):",msg_next_sent(msg));
+			tipc_printf(buf, "GAP(%u):",msg_seq_gap(msg));
+			tipc_printf(buf, "LSTBC(%u):",msg_last_bcast(msg));
+			break;
+		case RESET_MSG:
+			tipc_printf(buf, "RESET:");
+			if (msg_size(msg) != msg_hdr_sz(msg))
+				tipc_printf(buf, "BEAR:%s:",msg_data(msg));
+			break;
+		case ACTIVATE_MSG:
+			tipc_printf(buf, "ACTIVATE:");
+			break;
+		default:
+			tipc_printf(buf, "UNKNOWN TYPE:%x",msg_type(msg));
+		}
+		tipc_printf(buf, "PLANE(%c):",msg_net_plane(msg));
+		tipc_printf(buf, "SESS(%u):",msg_session(msg));
+		break;
+	case CHANGEOVER_PROTOCOL:
+		tipc_printf(buf, "TUNL:");
+		switch (msg_type(msg)) {
+		case DUPLICATE_MSG:
+			tipc_printf(buf, "DUPL:");
+			break;
+		case ORIGINAL_MSG:
+			tipc_printf(buf, "ORIG:");
+			tipc_printf(buf, "EXP(%u)",msg_msgcnt(msg));
+			break;
+		default:
+			tipc_printf(buf, "UNKNOWN TYPE:%x",msg_type(msg));
+		}
+		break;
+	case ROUTE_DISTRIBUTOR:
+		tipc_printf(buf, "ROUTING_MNG:");
+		switch (msg_type(msg)) {
+		case EXT_ROUTING_TABLE:
+			tipc_printf(buf, "EXT_TBL:");
+			tipc_printf(buf, "TO:%x:",msg_remote_node(msg));
+			break;
+		case LOCAL_ROUTING_TABLE:
+			tipc_printf(buf, "LOCAL_TBL:");
+			tipc_printf(buf, "TO:%x:",msg_remote_node(msg));
+			break;
+		case SLAVE_ROUTING_TABLE:
+			tipc_printf(buf, "DP_TBL:");
+			tipc_printf(buf, "TO:%x:",msg_remote_node(msg));
+			break;
+		case ROUTE_ADDITION:
+			tipc_printf(buf, "ADD:");
+			tipc_printf(buf, "TO:%x:",msg_remote_node(msg));
+			break;
+		case ROUTE_REMOVAL:
+			tipc_printf(buf, "REMOVE:");
+			tipc_printf(buf, "TO:%x:",msg_remote_node(msg));
+			break;
+		default:
+			tipc_printf(buf, "UNKNOWN TYPE:%x",msg_type(msg));
+		}
+		break;
+	case LINK_CONFIG:
+		tipc_printf(buf, "CFG:");
+		switch (msg_type(msg)) {
+		case DSC_REQ_MSG:
+			tipc_printf(buf, "DSC_REQ:");
+			break;
+		case DSC_RESP_MSG:
+			tipc_printf(buf, "DSC_RESP:");
+			break;
+		default:
+			tipc_printf(buf, "UNKNOWN TYPE:%x:",msg_type(msg));
+			break;
+		}
+		break;
+	default:
+		tipc_printf(buf, "UNKNOWN USER:");
+	}
+
+	switch (usr) {
+	case CONN_MANAGER:
+	case NAME_DISTRIBUTOR:
+	case DATA_LOW:
+	case DATA_MEDIUM:
+	case DATA_HIGH:
+	case DATA_CRITICAL:
+		if (msg_short(msg))
+			break;	/* No error */
+		switch (msg_errcode(msg)) {
+		case TIPC_OK:
+			break;
+		case TIPC_ERR_NO_NAME:
+			tipc_printf(buf, "NO_NAME:");
+			break;
+		case TIPC_ERR_NO_PORT:
+			tipc_printf(buf, "NO_PORT:");
+			break;
+		case TIPC_ERR_NO_NODE:
+			tipc_printf(buf, "NO_PROC:");
+			break;
+		case TIPC_ERR_OVERLOAD:
+			tipc_printf(buf, "OVERLOAD:");
+			break;
+		case TIPC_CONN_SHUTDOWN:
+			tipc_printf(buf, "SHUTDOWN:");
+			break;
+		default:
+			tipc_printf(buf, "UNKNOWN ERROR(%x):",
+				    msg_errcode(msg));
+		}
+	default:{}
+	}
+
+	tipc_printf(buf, "HZ(%u):", msg_hdr_sz(msg));
+	tipc_printf(buf, "SZ(%u):", msg_size(msg));
+	tipc_printf(buf, "SQNO(%u):", msg_seqno(msg));
+
+	if (msg_non_seq(msg))
+		tipc_printf(buf, "NOSEQ:");
+	else {
+		tipc_printf(buf, "ACK(%u):", msg_ack(msg));
+	}
+	tipc_printf(buf, "BACK(%u):", msg_bcast_ack(msg));
+	tipc_printf(buf, "PRND(%x)", msg_prevnode(msg));
+
+	if (msg_isdata(msg)) {
+		if (msg_named(msg)) {
+			tipc_printf(buf, "NTYP(%u):", msg_nametype(msg));
+			tipc_printf(buf, "NINST(%u)", msg_nameinst(msg));
+		}
+	}
+
+	if ((usr != LINK_PROTOCOL) && (usr != LINK_CONFIG) &&
+	    (usr != MSG_BUNDLER)) {
+		if (!msg_short(msg)) {
+			tipc_printf(buf, ":ORIG(%x:%u):",
+				    msg_orignode(msg), msg_origport(msg));
+			tipc_printf(buf, ":DEST(%x:%u):",
+				    msg_destnode(msg), msg_destport(msg));
+		} else {
+			tipc_printf(buf, ":OPRT(%u):", msg_origport(msg));
+			tipc_printf(buf, ":DPRT(%u):", msg_destport(msg));
+		}
+		if (msg_routed(msg) && !msg_non_seq(msg))
+			tipc_printf(buf, ":TSEQN(%u)", msg_transp_seqno(msg));
+	}
+	if (msg_user(msg) == NAME_DISTRIBUTOR) {
+		tipc_printf(buf, ":ONOD(%x):", msg_orignode(msg));
+		tipc_printf(buf, ":DNOD(%x):", msg_destnode(msg));
+		if (msg_routed(msg)) {
+			tipc_printf(buf, ":CSEQN(%u)", msg_transp_seqno(msg));
+		}
+	}
+
+	if (msg_user(msg) ==  LINK_CONFIG) {
+		u32* raw = (u32*)msg;
+		struct tipc_media_addr* orig = (struct tipc_media_addr*)&raw[5];
+		tipc_printf(buf, ":REQL(%u):", msg_req_links(msg));
+		tipc_printf(buf, ":DDOM(%x):", msg_dest_domain(msg));
+		tipc_printf(buf, ":NETID(%u):", msg_bc_netid(msg));
+		media_addr_printf(buf, orig);
+	}
+	if (msg_user(msg) == BCAST_PROTOCOL) {
+		tipc_printf(buf, "BCNACK:AFTER(%u):", msg_bcgap_after(msg));
+		tipc_printf(buf, "TO(%u):", msg_bcgap_to(msg));
+	}
+	tipc_printf(buf, "\n");
+	if ((usr == CHANGEOVER_PROTOCOL) && (msg_msgcnt(msg))) {
+		msg_print(buf,msg_get_wrapped(msg),"      /");
+	}
+	if ((usr == MSG_FRAGMENTER) && (msg_type(msg) == FIRST_FRAGMENT)) {
+		msg_print(buf,msg_get_wrapped(msg),"      /");
+	}
+}
diff --git a/net/tipc/msg.h b/net/tipc/msg.h
new file mode 100644
index 0000000..3dcd23f
--- /dev/null
+++ b/net/tipc/msg.h
@@ -0,0 +1,815 @@
+/*
+ * net/tipc/msg.h: Include file for TIPC message header routines
+ * 
+ * Copyright (c) 2003-2005, Ericsson Research Canada
+ * Copyright (c) 2005, Wind River Systems
+ * Copyright (c) 2005-2006, Ericsson AB
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without 
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this 
+ * list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright notice, 
+ * this list of conditions and the following disclaimer in the documentation 
+ * and/or other materials provided with the distribution.
+ * Neither the names of the copyright holders nor the names of its 
+ * contributors may be used to endorse or promote products derived from this 
+ * software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _TIPC_MSG_H
+#define _TIPC_MSG_H
+
+#include <net/tipc/tipc_msg.h>
+
+#define TIPC_VERSION              2
+#define DATA_LOW                  TIPC_LOW_IMPORTANCE
+#define DATA_MEDIUM               TIPC_MEDIUM_IMPORTANCE
+#define DATA_HIGH                 TIPC_HIGH_IMPORTANCE
+#define DATA_CRITICAL             TIPC_CRITICAL_IMPORTANCE
+#define SHORT_H_SIZE              24	/* Connected,in cluster */
+#define DIR_MSG_H_SIZE            32	/* Directly addressed messages */
+#define CONN_MSG_H_SIZE           36	/* Routed connected msgs*/
+#define LONG_H_SIZE               40	/* Named Messages */
+#define MCAST_H_SIZE              44	/* Multicast messages */
+#define MAX_H_SIZE                60	/* Inclusive full options */
+#define MAX_MSG_SIZE (MAX_H_SIZE + TIPC_MAX_USER_MSG_SIZE)
+#define LINK_CONFIG               13
+
+
+/*
+		TIPC user data message header format, version 2
+		
+	- Fundamental definitions available to privileged TIPC users
+	  are located in tipc_msg.h.
+	- Remaining definitions available to TIPC internal users appear below. 
+*/
+
+
+static inline void msg_set_word(struct tipc_msg *m, u32 w, u32 val)
+{
+	m->hdr[w] = htonl(val);
+}
+
+static inline void msg_set_bits(struct tipc_msg *m, u32 w,
+				u32 pos, u32 mask, u32 val)
+{
+	u32 word = msg_word(m,w) & ~(mask << pos);
+	msg_set_word(m, w, (word |= (val << pos)));
+}
+
+/* 
+ * Word 0
+ */
+
+static inline u32 msg_version(struct tipc_msg *m)
+{
+	return msg_bits(m, 0, 29, 7);
+}
+
+static inline void msg_set_version(struct tipc_msg *m) 
+{
+	msg_set_bits(m, 0, 29, 0xf, TIPC_VERSION);
+}
+
+static inline u32 msg_user(struct tipc_msg *m)
+{
+	return msg_bits(m, 0, 25, 0xf);
+}
+
+static inline u32 msg_isdata(struct tipc_msg *m)
+{
+	return (msg_user(m) <= DATA_CRITICAL);
+}
+
+static inline void msg_set_user(struct tipc_msg *m, u32 n) 
+{
+	msg_set_bits(m, 0, 25, 0xf, n);
+}
+
+static inline void msg_set_importance(struct tipc_msg *m, u32 i) 
+{
+	msg_set_user(m, i);
+}
+
+static inline void msg_set_hdr_sz(struct tipc_msg *m,u32 n) 
+{
+	msg_set_bits(m, 0, 21, 0xf, n>>2);
+}
+
+static inline int msg_non_seq(struct tipc_msg *m) 
+{
+	return msg_bits(m, 0, 20, 1);
+}
+
+static inline void msg_set_non_seq(struct tipc_msg *m) 
+{
+	msg_set_bits(m, 0, 20, 1, 1);
+}
+
+static inline int msg_dest_droppable(struct tipc_msg *m) 
+{
+	return msg_bits(m, 0, 19, 1);
+}
+
+static inline void msg_set_dest_droppable(struct tipc_msg *m, u32 d) 
+{
+	msg_set_bits(m, 0, 19, 1, d);
+}
+
+static inline int msg_src_droppable(struct tipc_msg *m) 
+{
+	return msg_bits(m, 0, 18, 1);
+}
+
+static inline void msg_set_src_droppable(struct tipc_msg *m, u32 d) 
+{
+	msg_set_bits(m, 0, 18, 1, d);
+}
+
+static inline void msg_set_size(struct tipc_msg *m, u32 sz)
+{
+	m->hdr[0] = htonl((msg_word(m, 0) & ~0x1ffff) | sz);
+}
+
+
+/* 
+ * Word 1
+ */
+
+static inline void msg_set_type(struct tipc_msg *m, u32 n) 
+{
+	msg_set_bits(m, 1, 29, 0x7, n);
+}
+
+static inline void msg_set_errcode(struct tipc_msg *m, u32 err) 
+{
+	msg_set_bits(m, 1, 25, 0xf, err);
+}
+
+static inline u32 msg_reroute_cnt(struct tipc_msg *m) 
+{
+	return msg_bits(m, 1, 21, 0xf);
+}
+
+static inline void msg_incr_reroute_cnt(struct tipc_msg *m) 
+{
+	msg_set_bits(m, 1, 21, 0xf, msg_reroute_cnt(m) + 1);
+}
+
+static inline void msg_reset_reroute_cnt(struct tipc_msg *m) 
+{
+	msg_set_bits(m, 1, 21, 0xf, 0);
+}
+
+static inline u32 msg_lookup_scope(struct tipc_msg *m)
+{
+	return msg_bits(m, 1, 19, 0x3);
+}
+
+static inline void msg_set_lookup_scope(struct tipc_msg *m, u32 n) 
+{
+	msg_set_bits(m, 1, 19, 0x3, n);
+}
+
+static inline void msg_set_options(struct tipc_msg *m, const char *opt, u32 sz) 
+{
+	u32 hsz = msg_hdr_sz(m);
+	char *to = (char *)&m->hdr[hsz/4];
+
+	if ((hsz < DIR_MSG_H_SIZE) || ((hsz + sz) > MAX_H_SIZE))
+		return;
+	msg_set_bits(m, 1, 16, 0x7, (hsz - 28)/4);
+	msg_set_hdr_sz(m, hsz + sz);
+	memcpy(to, opt, sz);
+}
+
+static inline u32 msg_bcast_ack(struct tipc_msg *m)
+{
+	return msg_bits(m, 1, 0, 0xffff);
+}
+
+static inline void msg_set_bcast_ack(struct tipc_msg *m, u32 n) 
+{
+	msg_set_bits(m, 1, 0, 0xffff, n);
+}
+
+
+/* 
+ * Word 2
+ */
+
+static inline u32 msg_ack(struct tipc_msg *m)
+{
+	return msg_bits(m, 2, 16, 0xffff);
+}
+
+static inline void msg_set_ack(struct tipc_msg *m, u32 n) 
+{
+	msg_set_bits(m, 2, 16, 0xffff, n);
+}
+
+static inline u32 msg_seqno(struct tipc_msg *m)
+{
+	return msg_bits(m, 2, 0, 0xffff);
+}
+
+static inline void msg_set_seqno(struct tipc_msg *m, u32 n) 
+{
+	msg_set_bits(m, 2, 0, 0xffff, n);
+}
+
+
+/* 
+ * Words 3-10
+ */
+
+
+static inline void msg_set_prevnode(struct tipc_msg *m, u32 a) 
+{
+	msg_set_word(m, 3, a);
+}
+
+static inline void msg_set_origport(struct tipc_msg *m, u32 p) 
+{
+	msg_set_word(m, 4, p);
+}
+
+static inline void msg_set_destport(struct tipc_msg *m, u32 p) 
+{
+	msg_set_word(m, 5, p);
+}
+
+static inline void msg_set_mc_netid(struct tipc_msg *m, u32 p) 
+{
+	msg_set_word(m, 5, p);
+}
+
+static inline void msg_set_orignode(struct tipc_msg *m, u32 a) 
+{
+	msg_set_word(m, 6, a);
+}
+
+static inline void msg_set_destnode(struct tipc_msg *m, u32 a) 
+{
+	msg_set_word(m, 7, a);
+}
+
+static inline int msg_is_dest(struct tipc_msg *m, u32 d) 
+{
+	return(msg_short(m) || (msg_destnode(m) == d));
+}
+
+static inline u32 msg_routed(struct tipc_msg *m)
+{
+	if (likely(msg_short(m)))
+		return 0;
+	return(msg_destnode(m) ^ msg_orignode(m)) >> 11;
+}
+
+static inline void msg_set_nametype(struct tipc_msg *m, u32 n) 
+{
+	msg_set_word(m, 8, n);
+}
+
+static inline u32 msg_transp_seqno(struct tipc_msg *m)
+{
+	return msg_word(m, 8);
+}
+
+static inline void msg_set_timestamp(struct tipc_msg *m, u32 n)
+{
+	msg_set_word(m, 8, n);
+}
+
+static inline u32 msg_timestamp(struct tipc_msg *m)
+{
+	return msg_word(m, 8);
+}
+
+static inline void msg_set_transp_seqno(struct tipc_msg *m, u32 n)
+{
+	msg_set_word(m, 8, n);
+}
+
+static inline void msg_set_namelower(struct tipc_msg *m, u32 n) 
+{
+	msg_set_word(m, 9, n);
+}
+
+static inline void msg_set_nameinst(struct tipc_msg *m, u32 n) 
+{
+	msg_set_namelower(m, n);
+}
+
+static inline void msg_set_nameupper(struct tipc_msg *m, u32 n) 
+{
+	msg_set_word(m, 10, n);
+}
+
+static inline struct tipc_msg *msg_get_wrapped(struct tipc_msg *m)
+{
+	return (struct tipc_msg *)msg_data(m);
+}
+
+static inline void msg_expand(struct tipc_msg *m, u32 destnode) 
+{
+	if (!msg_short(m))
+		return;
+	msg_set_hdr_sz(m, LONG_H_SIZE);
+	msg_set_orignode(m, msg_prevnode(m));
+	msg_set_destnode(m, destnode);
+	memset(&m->hdr[8], 0, 12);
+}
+
+
+
+/*
+		TIPC internal message header format, version 2
+
+       1 0 9 8 7 6 5 4|3 2 1 0 9 8 7 6|5 4 3 2 1 0 9 8|7 6 5 4 3 2 1 0 
+      +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+   w0:|vers |msg usr|hdr sz |n|resrv|            packet size          |
+      +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+   w1:|m typ|rsv=0|   sequence gap    |       broadcast ack no        |
+      +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+   w2:| link level ack no/bc_gap_from |     seq no / bcast_gap_to     |
+      +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+   w3:|                       previous node                           |
+      +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+   w4:|  next sent broadcast/fragm no | next sent pkt/ fragm msg no   |
+      +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+   w5:|          session no           |rsv=0|r|berid|link prio|netpl|p|
+      +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+   w6:|                      originating node                         |
+      +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+   w7:|                      destination node                         |
+      +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+   w8:|                   transport sequence number                   |
+      +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+   w9:|   msg count / bcast tag       |       link tolerance          |
+      +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+      \                                                               \
+      /                     User Specific Data                        /
+      \                                                               \
+      +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+
+      NB: CONN_MANAGER use data message format. LINK_CONFIG has own format.
+*/   
+
+/* 
+ * Internal users
+ */
+
+#define  BCAST_PROTOCOL       5
+#define  MSG_BUNDLER          6
+#define  LINK_PROTOCOL        7
+#define  CONN_MANAGER         8
+#define  ROUTE_DISTRIBUTOR    9
+#define  CHANGEOVER_PROTOCOL  10
+#define  NAME_DISTRIBUTOR     11
+#define  MSG_FRAGMENTER       12
+#define  LINK_CONFIG          13
+#define  INT_H_SIZE           40
+#define  DSC_H_SIZE           40
+
+/* 
+ *  Connection management protocol messages
+ */
+
+#define CONN_PROBE        0
+#define CONN_PROBE_REPLY  1
+#define CONN_ACK          2
+
+/* 
+ * Name distributor messages
+ */
+
+#define PUBLICATION       0
+#define WITHDRAWAL        1
+
+
+/* 
+ * Word 1
+ */
+
+static inline u32 msg_seq_gap(struct tipc_msg *m)
+{
+	return msg_bits(m, 1, 16, 0xff);
+}
+
+static inline void msg_set_seq_gap(struct tipc_msg *m, u32 n)
+{
+	msg_set_bits(m, 1, 16, 0xff, n);
+}
+
+static inline u32 msg_req_links(struct tipc_msg *m)
+{
+	return msg_bits(m, 1, 16, 0xfff);
+}
+
+static inline void msg_set_req_links(struct tipc_msg *m, u32 n) 
+{
+	msg_set_bits(m, 1, 16, 0xfff, n);
+}
+
+
+/* 
+ * Word 2
+ */
+
+static inline u32 msg_dest_domain(struct tipc_msg *m)
+{
+	return msg_word(m, 2);
+}
+
+static inline void msg_set_dest_domain(struct tipc_msg *m, u32 n) 
+{
+	msg_set_word(m, 2, n);
+}
+
+static inline u32 msg_bcgap_after(struct tipc_msg *m)
+{
+	return msg_bits(m, 2, 16, 0xffff);
+}
+
+static inline void msg_set_bcgap_after(struct tipc_msg *m, u32 n)
+{
+	msg_set_bits(m, 2, 16, 0xffff, n);
+}
+
+static inline u32 msg_bcgap_to(struct tipc_msg *m)
+{
+	return msg_bits(m, 2, 0, 0xffff);
+}
+
+static inline void msg_set_bcgap_to(struct tipc_msg *m, u32 n) 
+{
+	msg_set_bits(m, 2, 0, 0xffff, n);
+}
+
+
+/* 
+ * Word 4
+ */
+
+static inline u32 msg_last_bcast(struct tipc_msg *m)
+{
+	return msg_bits(m, 4, 16, 0xffff);
+}
+
+static inline void msg_set_last_bcast(struct tipc_msg *m, u32 n)
+{
+	msg_set_bits(m, 4, 16, 0xffff, n);
+}
+
+
+static inline u32 msg_fragm_no(struct tipc_msg *m)
+{
+	return msg_bits(m, 4, 16, 0xffff);
+}
+
+static inline void msg_set_fragm_no(struct tipc_msg *m, u32 n)
+{
+	msg_set_bits(m, 4, 16, 0xffff, n);
+}
+
+
+static inline u32 msg_next_sent(struct tipc_msg *m)
+{
+	return msg_bits(m, 4, 0, 0xffff);
+}
+
+static inline void msg_set_next_sent(struct tipc_msg *m, u32 n)
+{
+	msg_set_bits(m, 4, 0, 0xffff, n);
+}
+
+
+static inline u32 msg_long_msgno(struct tipc_msg *m)
+{
+	return msg_bits(m, 4, 0, 0xffff);
+}
+
+static inline void msg_set_long_msgno(struct tipc_msg *m, u32 n)
+{
+	msg_set_bits(m, 4, 0, 0xffff, n);
+}
+
+static inline u32 msg_bc_netid(struct tipc_msg *m)
+{
+	return msg_word(m, 4);
+}
+
+static inline void msg_set_bc_netid(struct tipc_msg *m, u32 id)
+{
+	msg_set_word(m, 4, id);
+}
+
+static inline u32 msg_link_selector(struct tipc_msg *m)
+{
+	return msg_bits(m, 4, 0, 1);
+}
+
+static inline void msg_set_link_selector(struct tipc_msg *m, u32 n)
+{
+	msg_set_bits(m, 4, 0, 1, (n & 1));
+}
+
+/* 
+ * Word 5
+ */
+
+static inline u32 msg_session(struct tipc_msg *m)
+{
+	return msg_bits(m, 5, 16, 0xffff);
+}
+
+static inline void msg_set_session(struct tipc_msg *m, u32 n)
+{
+	msg_set_bits(m, 5, 16, 0xffff, n);
+}
+
+static inline u32 msg_probe(struct tipc_msg *m)
+{
+	return msg_bits(m, 5, 0, 1);
+}
+
+static inline void msg_set_probe(struct tipc_msg *m, u32 val)
+{
+	msg_set_bits(m, 5, 0, 1, (val & 1));
+}
+
+static inline char msg_net_plane(struct tipc_msg *m)
+{
+	return msg_bits(m, 5, 1, 7) + 'A';
+}
+
+static inline void msg_set_net_plane(struct tipc_msg *m, char n)
+{
+	msg_set_bits(m, 5, 1, 7, (n - 'A'));
+}
+
+static inline u32 msg_linkprio(struct tipc_msg *m)
+{
+	return msg_bits(m, 5, 4, 0x1f);
+}
+
+static inline void msg_set_linkprio(struct tipc_msg *m, u32 n)
+{
+	msg_set_bits(m, 5, 4, 0x1f, n);
+}
+
+static inline u32 msg_bearer_id(struct tipc_msg *m)
+{
+	return msg_bits(m, 5, 9, 0x7);
+}
+
+static inline void msg_set_bearer_id(struct tipc_msg *m, u32 n)
+{
+	msg_set_bits(m, 5, 9, 0x7, n);
+}
+
+static inline u32 msg_redundant_link(struct tipc_msg *m)
+{
+	return msg_bits(m, 5, 12, 0x1);
+}
+
+static inline void msg_set_redundant_link(struct tipc_msg *m)
+{
+	msg_set_bits(m, 5, 12, 0x1, 1);
+}
+
+static inline void msg_clear_redundant_link(struct tipc_msg *m)
+{
+	msg_set_bits(m, 5, 12, 0x1, 0);
+}
+
+
+/* 
+ * Word 9
+ */
+
+static inline u32 msg_msgcnt(struct tipc_msg *m)
+{
+	return msg_bits(m, 9, 16, 0xffff);
+}
+
+static inline void msg_set_msgcnt(struct tipc_msg *m, u32 n)
+{
+	msg_set_bits(m, 9, 16, 0xffff, n);
+}
+
+static inline u32 msg_bcast_tag(struct tipc_msg *m)
+{
+	return msg_bits(m, 9, 16, 0xffff);
+}
+
+static inline void msg_set_bcast_tag(struct tipc_msg *m, u32 n)
+{
+	msg_set_bits(m, 9, 16, 0xffff, n);
+}
+
+static inline u32 msg_max_pkt(struct tipc_msg *m) 
+{
+	return (msg_bits(m, 9, 16, 0xffff) * 4);
+}
+
+static inline void msg_set_max_pkt(struct tipc_msg *m, u32 n) 
+{
+	msg_set_bits(m, 9, 16, 0xffff, (n / 4));
+}
+
+static inline u32 msg_link_tolerance(struct tipc_msg *m)
+{
+	return msg_bits(m, 9, 0, 0xffff);
+}
+
+static inline void msg_set_link_tolerance(struct tipc_msg *m, u32 n)
+{
+	msg_set_bits(m, 9, 0, 0xffff, n);
+}
+
+/* 
+ * Routing table message data
+ */
+
+
+static inline u32 msg_remote_node(struct tipc_msg *m)
+{
+	return msg_word(m, msg_hdr_sz(m)/4);
+}
+
+static inline void msg_set_remote_node(struct tipc_msg *m, u32 a)
+{
+	msg_set_word(m, msg_hdr_sz(m)/4, a);
+}
+
+static inline int msg_dataoctet(struct tipc_msg *m, u32 pos)
+{
+	return(msg_data(m)[pos + 4] != 0);
+}
+
+static inline void msg_set_dataoctet(struct tipc_msg *m, u32 pos)
+{
+	msg_data(m)[pos + 4] = 1;
+}
+
+/* 
+ * Segmentation message types
+ */
+
+#define FIRST_FRAGMENT     0
+#define FRAGMENT           1
+#define LAST_FRAGMENT      2
+
+/* 
+ * Link management protocol message types
+ */
+
+#define STATE_MSG       0
+#define RESET_MSG       1
+#define ACTIVATE_MSG    2
+
+/* 
+ * Changeover tunnel message types
+ */
+#define DUPLICATE_MSG    0
+#define ORIGINAL_MSG     1
+
+/* 
+ * Routing table message types
+ */
+#define EXT_ROUTING_TABLE    0
+#define LOCAL_ROUTING_TABLE  1
+#define SLAVE_ROUTING_TABLE  2
+#define ROUTE_ADDITION       3
+#define ROUTE_REMOVAL        4
+
+/* 
+ * Config protocol message types
+ */
+
+#define DSC_REQ_MSG          0
+#define DSC_RESP_MSG         1
+
+static inline u32 msg_tot_importance(struct tipc_msg *m)
+{
+	if (likely(msg_isdata(m))) {
+		if (likely(msg_orignode(m) == tipc_own_addr))
+			return msg_importance(m);
+		return msg_importance(m) + 4;
+	}
+	if ((msg_user(m) == MSG_FRAGMENTER)  &&
+	    (msg_type(m) == FIRST_FRAGMENT))
+		return msg_importance(msg_get_wrapped(m));
+	return msg_importance(m);
+}
+
+
+static inline void msg_init(struct tipc_msg *m, u32 user, u32 type, 
+			    u32 err, u32 hsize, u32 destnode)
+{
+	memset(m, 0, hsize);
+	msg_set_version(m);
+	msg_set_user(m, user);
+	msg_set_hdr_sz(m, hsize);
+	msg_set_size(m, hsize);
+	msg_set_prevnode(m, tipc_own_addr);
+	msg_set_type(m, type);
+	msg_set_errcode(m, err);
+	if (!msg_short(m)) {
+		msg_set_orignode(m, tipc_own_addr);
+		msg_set_destnode(m, destnode);
+	}
+}
+
+/** 
+ * msg_calc_data_size - determine total data size for message
+ */
+
+static inline int msg_calc_data_size(struct iovec const *msg_sect, u32 num_sect)
+{
+	int dsz = 0;
+	int i;
+
+	for (i = 0; i < num_sect; i++)
+		dsz += msg_sect[i].iov_len;
+	return dsz;
+}
+
+/** 
+ * msg_build - create message using specified header and data
+ * 
+ * Note: Caller must not hold any locks in case copy_from_user() is interrupted!
+ * 
+ * Returns message data size or errno
+ */
+
+static inline int msg_build(struct tipc_msg *hdr, 
+			    struct iovec const *msg_sect, u32 num_sect,
+			    int max_size, int usrmem, struct sk_buff** buf)
+{
+	int dsz, sz, hsz, pos, res, cnt;
+
+	dsz = msg_calc_data_size(msg_sect, num_sect);
+	if (unlikely(dsz > TIPC_MAX_USER_MSG_SIZE)) {
+		*buf = NULL;
+		return -EINVAL;
+	}
+
+	pos = hsz = msg_hdr_sz(hdr);
+	sz = hsz + dsz;
+	msg_set_size(hdr, sz);
+	if (unlikely(sz > max_size)) {
+		*buf = NULL;
+		return dsz;
+	}
+
+	*buf = buf_acquire(sz);
+	if (!(*buf))
+		return -ENOMEM;
+	memcpy((*buf)->data, (unchar *)hdr, hsz);
+	for (res = 1, cnt = 0; res && (cnt < num_sect); cnt++) {
+		if (likely(usrmem))
+			res = !copy_from_user((*buf)->data + pos, 
+					      msg_sect[cnt].iov_base, 
+					      msg_sect[cnt].iov_len);
+		else
+			memcpy((*buf)->data + pos, msg_sect[cnt].iov_base, 
+			       msg_sect[cnt].iov_len);
+		pos += msg_sect[cnt].iov_len;
+	}
+	if (likely(res))
+		return dsz;
+
+	buf_discard(*buf);
+	*buf = NULL;
+	return -EFAULT;
+}
+
+
+struct tipc_media_addr;
+
+extern void msg_set_media_addr(struct tipc_msg *m,
+			       struct tipc_media_addr *a);
+
+extern void msg_get_media_addr(struct tipc_msg *m,
+			       struct tipc_media_addr *a);
+
+
+#endif
diff --git a/net/tipc/name_distr.c b/net/tipc/name_distr.c
new file mode 100644
index 0000000..5e0604c
--- /dev/null
+++ b/net/tipc/name_distr.c
@@ -0,0 +1,306 @@
+/*
+ * net/tipc/name_distr.c: TIPC name distribution code
+ * 
+ * Copyright (c) 2003-2005, Ericsson Research Canada
+ * Copyright (c) 2005, Wind River Systems
+ * Copyright (c) 2005-2006, Ericsson AB
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without 
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this 
+ * list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright notice, 
+ * this list of conditions and the following disclaimer in the documentation 
+ * and/or other materials provided with the distribution.
+ * Neither the names of the copyright holders nor the names of its 
+ * contributors may be used to endorse or promote products derived from this 
+ * software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "core.h"
+#include "cluster.h"
+#include "dbg.h"
+#include "link.h"
+#include "msg.h"
+#include "name_distr.h"
+
+#undef  DBG_OUTPUT
+#define DBG_OUTPUT NULL
+
+#define ITEM_SIZE sizeof(struct distr_item)
+
+/**
+ * struct distr_item - publication info distributed to other nodes
+ * @type: name sequence type
+ * @lower: name sequence lower bound
+ * @upper: name sequence upper bound
+ * @ref: publishing port reference
+ * @key: publication key
+ * 
+ * ===> All fields are stored in network byte order. <===
+ * 
+ * First 3 fields identify (name or) name sequence being published.
+ * Reference field uniquely identifies port that published name sequence.
+ * Key field uniquely identifies publication, in the event a port has
+ * multiple publications of the same name sequence.
+ * 
+ * Note: There is no field that identifies the publishing node because it is 
+ * the same for all items contained within a publication message.
+ */
+
+struct distr_item {
+	u32 type;
+	u32 lower;
+	u32 upper;
+	u32 ref;
+	u32 key;
+};
+
+/**
+ * List of externally visible publications by this node -- 
+ * that is, all publications having scope > TIPC_NODE_SCOPE.
+ */
+
+static LIST_HEAD(publ_root);
+static u32 publ_cnt = 0;		
+
+/**
+ * publ_to_item - add publication info to a publication message
+ */
+
+static void publ_to_item(struct distr_item *i, struct publication *p)
+{
+	i->type = htonl(p->type);
+	i->lower = htonl(p->lower);
+	i->upper = htonl(p->upper);
+	i->ref = htonl(p->ref);
+	i->key = htonl(p->key);
+	dbg("publ_to_item: %u, %u, %u\n", p->type, p->lower, p->upper);
+}
+
+/**
+ * named_prepare_buf - allocate & initialize a publication message
+ */
+
+static struct sk_buff *named_prepare_buf(u32 type, u32 size, u32 dest)
+{
+	struct sk_buff *buf = buf_acquire(LONG_H_SIZE + size);  
+	struct tipc_msg *msg;
+
+	if (buf != NULL) {
+		msg = buf_msg(buf);
+		msg_init(msg, NAME_DISTRIBUTOR, type, TIPC_OK, 
+			 LONG_H_SIZE, dest);
+		msg_set_size(msg, LONG_H_SIZE + size);
+	}
+	return buf;
+}
+
+/**
+ * named_publish - tell other nodes about a new publication by this node
+ */
+
+void named_publish(struct publication *publ)
+{
+	struct sk_buff *buf;
+	struct distr_item *item;
+
+	list_add(&publ->local_list, &publ_root);
+	publ_cnt++;
+
+	buf = named_prepare_buf(PUBLICATION, ITEM_SIZE, 0);
+	if (!buf) {
+		warn("Memory squeeze; failed to distribute publication\n");
+		return;
+	}
+
+	item = (struct distr_item *)msg_data(buf_msg(buf));
+	publ_to_item(item, publ);
+	dbg("named_withdraw: broadcasting publish msg\n");
+	cluster_broadcast(buf);
+}
+
+/**
+ * named_withdraw - tell other nodes about a withdrawn publication by this node
+ */
+
+void named_withdraw(struct publication *publ)
+{
+	struct sk_buff *buf;
+	struct distr_item *item;
+
+	list_del(&publ->local_list);
+	publ_cnt--;
+
+	buf = named_prepare_buf(WITHDRAWAL, ITEM_SIZE, 0);
+	if (!buf) {
+		warn("Memory squeeze; failed to distribute withdrawal\n");
+		return;
+	}
+
+	item = (struct distr_item *)msg_data(buf_msg(buf));
+	publ_to_item(item, publ);
+	dbg("named_withdraw: broadcasting withdraw msg\n");
+	cluster_broadcast(buf);
+}
+
+/**
+ * named_node_up - tell specified node about all publications by this node
+ */
+
+void named_node_up(unsigned long node)
+{
+	struct publication *publ;
+	struct distr_item *item = 0;
+	struct sk_buff *buf = 0;
+	u32 left = 0;
+	u32 rest;
+	u32 max_item_buf;
+
+	assert(in_own_cluster(node));
+	read_lock_bh(&nametbl_lock); 
+	max_item_buf = TIPC_MAX_USER_MSG_SIZE / ITEM_SIZE;
+	max_item_buf *= ITEM_SIZE;
+	rest = publ_cnt * ITEM_SIZE;
+
+	list_for_each_entry(publ, &publ_root, local_list) {
+		if (!buf) {
+			left = (rest <= max_item_buf) ? rest : max_item_buf;
+			rest -= left;
+			buf = named_prepare_buf(PUBLICATION, left, node);       
+			if (buf == NULL) {
+				warn("Memory Squeeze; could not send publication\n");
+				goto exit;
+			}
+			item = (struct distr_item *)msg_data(buf_msg(buf));
+		}
+		publ_to_item(item, publ);
+		item++;
+		left -= ITEM_SIZE;
+		if (!left) {
+			msg_set_link_selector(buf_msg(buf), node);
+			dbg("named_node_up: sending publish msg to "
+			    "<%u.%u.%u>\n", tipc_zone(node), 
+			    tipc_cluster(node), tipc_node(node));
+			link_send(buf, node, node);
+			buf = 0;
+		}
+	}
+exit:
+	read_unlock_bh(&nametbl_lock); 
+}
+
+/**
+ * node_is_down - remove publication associated with a failed node
+ * 
+ * Invoked for each publication issued by a newly failed node.  
+ * Removes publication structure from name table & deletes it.
+ * In rare cases the link may have come back up again when this
+ * function is called, and we have two items representing the same
+ * publication. Nudge this item's key to distinguish it from the other.
+ * (Note: Publication's node subscription is already unsubscribed.)
+ */
+
+static void node_is_down(struct publication *publ)
+{
+	struct publication *p;
+        write_lock_bh(&nametbl_lock);
+	dbg("node_is_down: withdrawing %u, %u, %u\n", 
+	    publ->type, publ->lower, publ->upper);
+        publ->key += 1222345;
+	p = nametbl_remove_publ(publ->type, publ->lower, 
+				publ->node, publ->ref, publ->key);
+        assert(p == publ);
+	write_unlock_bh(&nametbl_lock);
+	if (publ)
+		kfree(publ);
+}
+
+/**
+ * named_recv - process name table update message sent by another node
+ */
+
+void named_recv(struct sk_buff *buf)
+{
+	struct publication *publ;
+	struct tipc_msg *msg = buf_msg(buf);
+	struct distr_item *item = (struct distr_item *)msg_data(msg);
+	u32 count = msg_data_sz(msg) / ITEM_SIZE;
+
+	write_lock_bh(&nametbl_lock); 
+	while (count--) {
+		if (msg_type(msg) == PUBLICATION) {
+			dbg("named_recv: got publication for %u, %u, %u\n", 
+			    ntohl(item->type), ntohl(item->lower),
+			    ntohl(item->upper));
+			publ = nametbl_insert_publ(ntohl(item->type), 
+						   ntohl(item->lower),
+						   ntohl(item->upper),
+						   TIPC_CLUSTER_SCOPE,
+						   msg_orignode(msg), 
+						   ntohl(item->ref),
+						   ntohl(item->key));
+			if (publ) {
+				nodesub_subscribe(&publ->subscr, 
+						  msg_orignode(msg), 
+						  publ,
+						  (net_ev_handler)node_is_down);
+			}
+		} else if (msg_type(msg) == WITHDRAWAL) {
+			dbg("named_recv: got withdrawl for %u, %u, %u\n", 
+			    ntohl(item->type), ntohl(item->lower),
+			    ntohl(item->upper));
+			publ = nametbl_remove_publ(ntohl(item->type),
+						   ntohl(item->lower),
+						   msg_orignode(msg),
+						   ntohl(item->ref),
+						   ntohl(item->key));
+
+			if (publ) {
+				nodesub_unsubscribe(&publ->subscr);
+        			kfree(publ);
+			}
+		} else {
+			warn("named_recv: unknown msg\n");
+		}
+		item++;
+	}
+	write_unlock_bh(&nametbl_lock); 
+	buf_discard(buf);
+}
+
+/**
+ * named_reinit - re-initialize local publication list
+ * 
+ * This routine is called whenever TIPC networking is (re)enabled.
+ * All existing publications by this node that have "cluster" or "zone" scope
+ * are updated to reflect the node's current network address.
+ * (If the node's address is unchanged, the update loop terminates immediately.)
+ */
+
+void named_reinit(void)
+{
+	struct publication *publ;
+
+	write_lock_bh(&nametbl_lock); 
+	list_for_each_entry(publ, &publ_root, local_list) {
+		if (publ->node == tipc_own_addr)
+			break;
+		publ->node = tipc_own_addr;
+	}
+	write_unlock_bh(&nametbl_lock); 
+}
diff --git a/net/tipc/name_distr.h b/net/tipc/name_distr.h
new file mode 100644
index 0000000..7c6616a
--- /dev/null
+++ b/net/tipc/name_distr.h
@@ -0,0 +1,45 @@
+/*
+ * net/tipc/name_distr.h: Include file for TIPC name distribution code
+ * 
+ * Copyright (c) 2003-2005, Ericsson Research Canada
+ * Copyright (c) 2005, Wind River Systems
+ * Copyright (c) 2005-2006, Ericsson AB
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without 
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this 
+ * list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright notice, 
+ * this list of conditions and the following disclaimer in the documentation 
+ * and/or other materials provided with the distribution.
+ * Neither the names of the copyright holders nor the names of its 
+ * contributors may be used to endorse or promote products derived from this 
+ * software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _TIPC_NAME_DISTR_H
+#define _TIPC_NAME_DISTR_H
+
+#include "name_table.h"
+
+void named_publish(struct publication *publ);
+void named_withdraw(struct publication *publ);
+void named_node_up(unsigned long node);
+void named_recv(struct sk_buff *buf);
+void named_reinit(void);
+
+#endif
diff --git a/net/tipc/name_table.c b/net/tipc/name_table.c
new file mode 100644
index 0000000..9626c5b
--- /dev/null
+++ b/net/tipc/name_table.c
@@ -0,0 +1,1076 @@
+/*
+ * net/tipc/name_table.c: TIPC name table code
+ * 
+ * Copyright (c) 2003-2005, Ericsson Research Canada
+ * Copyright (c) 2004-2005, Wind River Systems
+ * Copyright (c) 2005-2006, Ericsson AB
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without 
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this 
+ * list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright notice, 
+ * this list of conditions and the following disclaimer in the documentation 
+ * and/or other materials provided with the distribution.
+ * Neither the names of the copyright holders nor the names of its 
+ * contributors may be used to endorse or promote products derived from this 
+ * software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "core.h"
+#include "config.h"
+#include "dbg.h"
+#include "name_table.h"
+#include "name_distr.h"
+#include "addr.h"
+#include "node_subscr.h"
+#include "subscr.h"
+#include "port.h"
+#include "cluster.h"
+#include "bcast.h"
+
+int tipc_nametbl_size = 1024;		/* must be a power of 2 */
+
+/**
+ * struct sub_seq - container for all published instances of a name sequence
+ * @lower: name sequence lower bound
+ * @upper: name sequence upper bound
+ * @node_list: circular list of matching publications with >= node scope
+ * @cluster_list: circular list of matching publications with >= cluster scope
+ * @zone_list: circular list of matching publications with >= zone scope
+ */
+
+struct sub_seq {
+	u32 lower;
+	u32 upper;
+	struct publication *node_list;
+	struct publication *cluster_list;
+	struct publication *zone_list;
+};
+
+/** 
+ * struct name_seq - container for all published instances of a name type
+ * @type: 32 bit 'type' value for name sequence
+ * @sseq: pointer to dynamically-sized array of sub-sequences of this 'type';
+ *        sub-sequences are sorted in ascending order
+ * @alloc: number of sub-sequences currently in array
+ * @first_free: upper bound of highest sub-sequence + 1
+ * @ns_list: links to adjacent name sequences in hash chain
+ * @subscriptions: list of subscriptions for this 'type'
+ * @lock: spinlock controlling access to name sequence structure
+ */
+
+struct name_seq {
+	u32 type;
+	struct sub_seq *sseqs;
+	u32 alloc;
+	u32 first_free;
+	struct hlist_node ns_list;
+	struct list_head subscriptions;
+	spinlock_t lock;
+};
+
+/**
+ * struct name_table - table containing all existing port name publications
+ * @types: pointer to fixed-sized array of name sequence lists, 
+ *         accessed via hashing on 'type'; name sequence lists are *not* sorted
+ * @local_publ_count: number of publications issued by this node
+ */
+
+struct name_table {
+	struct hlist_head *types;
+	u32 local_publ_count;
+};
+
+struct name_table table = { NULL } ;
+static atomic_t rsv_publ_ok = ATOMIC_INIT(0);
+rwlock_t nametbl_lock = RW_LOCK_UNLOCKED;
+
+
+static inline int hash(int x)
+{
+	return(x & (tipc_nametbl_size - 1));
+}
+
+/**
+ * publ_create - create a publication structure
+ */
+
+static struct publication *publ_create(u32 type, u32 lower, u32 upper, 
+				       u32 scope, u32 node, u32 port_ref,   
+				       u32 key)
+{
+	struct publication *publ =
+		(struct publication *)kmalloc(sizeof(*publ), GFP_ATOMIC);
+	if (publ == NULL) {
+		warn("Memory squeeze; failed to create publication\n");
+		return 0;
+	}
+
+	memset(publ, 0, sizeof(*publ));
+	publ->type = type;
+	publ->lower = lower;
+	publ->upper = upper;
+	publ->scope = scope;
+	publ->node = node;
+	publ->ref = port_ref;
+	publ->key = key;
+	INIT_LIST_HEAD(&publ->local_list);
+	INIT_LIST_HEAD(&publ->pport_list);
+	INIT_LIST_HEAD(&publ->subscr.nodesub_list);
+	return publ;
+}
+
+/**
+ * subseq_alloc - allocate a specified number of sub-sequence structures
+ */
+
+struct sub_seq *subseq_alloc(u32 cnt)
+{
+	u32 sz = cnt * sizeof(struct sub_seq);
+	struct sub_seq *sseq = (struct sub_seq *)kmalloc(sz, GFP_ATOMIC);
+
+	if (sseq)
+		memset(sseq, 0, sz);
+	return sseq;
+}
+
+/**
+ * nameseq_create - create a name sequence structure for the specified 'type'
+ * 
+ * Allocates a single sub-sequence structure and sets it to all 0's.
+ */
+
+struct name_seq *nameseq_create(u32 type, struct hlist_head *seq_head)
+{
+	struct name_seq *nseq = 
+		(struct name_seq *)kmalloc(sizeof(*nseq), GFP_ATOMIC);
+	struct sub_seq *sseq = subseq_alloc(1);
+
+	if (!nseq || !sseq) {
+		warn("Memory squeeze; failed to create name sequence\n");
+		kfree(nseq);
+		kfree(sseq);
+		return 0;
+	}
+
+	memset(nseq, 0, sizeof(*nseq));
+	nseq->lock = SPIN_LOCK_UNLOCKED;
+	nseq->type = type;
+	nseq->sseqs = sseq;
+	dbg("nameseq_create() nseq = %x type %u, ssseqs %x, ff: %u\n",
+	    nseq, type, nseq->sseqs, nseq->first_free);
+	nseq->alloc = 1;
+	INIT_HLIST_NODE(&nseq->ns_list);
+	INIT_LIST_HEAD(&nseq->subscriptions);
+	hlist_add_head(&nseq->ns_list, seq_head);
+	return nseq;
+}
+
+/**
+ * nameseq_find_subseq - find sub-sequence (if any) matching a name instance
+ *  
+ * Very time-critical, so binary searches through sub-sequence array.
+ */
+
+static inline struct sub_seq *nameseq_find_subseq(struct name_seq *nseq, 
+						  u32 instance)
+{
+	struct sub_seq *sseqs = nseq->sseqs;
+	int low = 0;
+	int high = nseq->first_free - 1;
+	int mid;
+
+	while (low <= high) {
+		mid = (low + high) / 2;
+		if (instance < sseqs[mid].lower)
+			high = mid - 1;
+		else if (instance > sseqs[mid].upper)
+			low = mid + 1;
+		else
+			return &sseqs[mid];
+	}
+	return 0;
+}
+
+/**
+ * nameseq_locate_subseq - determine position of name instance in sub-sequence
+ * 
+ * Returns index in sub-sequence array of the entry that contains the specified
+ * instance value; if no entry contains that value, returns the position
+ * where a new entry for it would be inserted in the array.
+ *
+ * Note: Similar to binary search code for locating a sub-sequence.
+ */
+
+static u32 nameseq_locate_subseq(struct name_seq *nseq, u32 instance)
+{
+	struct sub_seq *sseqs = nseq->sseqs;
+	int low = 0;
+	int high = nseq->first_free - 1;
+	int mid;
+
+	while (low <= high) {
+		mid = (low + high) / 2;
+		if (instance < sseqs[mid].lower)
+			high = mid - 1;
+		else if (instance > sseqs[mid].upper)
+			low = mid + 1;
+		else
+			return mid;
+	}
+	return low;
+}
+
+/**
+ * nameseq_insert_publ - 
+ */
+
+struct publication *nameseq_insert_publ(struct name_seq *nseq,
+					u32 type, u32 lower, u32 upper,
+					u32 scope, u32 node, u32 port, u32 key)
+{
+	struct subscription *s;
+	struct subscription *st;
+	struct publication *publ;
+	struct sub_seq *sseq;
+	int created_subseq = 0;
+
+	assert(nseq->first_free <= nseq->alloc);
+	sseq = nameseq_find_subseq(nseq, lower);
+	dbg("nameseq_ins: for seq %x,<%u,%u>, found sseq %x\n",
+	    nseq, type, lower, sseq);
+	if (sseq) {
+
+		/* Lower end overlaps existing entry => need an exact match */
+
+		if ((sseq->lower != lower) || (sseq->upper != upper)) {
+			warn("Overlapping publ <%u,%u,%u>\n", type, lower, upper);
+			return 0;
+		}
+	} else {
+		u32 inspos;
+		struct sub_seq *freesseq;
+
+		/* Find where lower end should be inserted */
+
+		inspos = nameseq_locate_subseq(nseq, lower);
+
+		/* Fail if upper end overlaps into an existing entry */
+
+		if ((inspos < nseq->first_free) &&
+		    (upper >= nseq->sseqs[inspos].lower)) {
+			warn("Overlapping publ <%u,%u,%u>\n", type, lower, upper);
+			return 0;
+		}
+
+		/* Ensure there is space for new sub-sequence */
+
+		if (nseq->first_free == nseq->alloc) {
+			struct sub_seq *sseqs = nseq->sseqs;
+			nseq->sseqs = subseq_alloc(nseq->alloc * 2);
+			if (nseq->sseqs != NULL) {
+				memcpy(nseq->sseqs, sseqs,
+				       nseq->alloc * sizeof (struct sub_seq));
+				kfree(sseqs);
+				dbg("Allocated %u sseqs\n", nseq->alloc);
+				nseq->alloc *= 2;
+			} else {
+				warn("Memory squeeze; failed to create sub-sequence\n");
+				return 0;
+			}
+		}
+		dbg("Have %u sseqs for type %u\n", nseq->alloc, type);
+
+		/* Insert new sub-sequence */
+
+		dbg("ins in pos %u, ff = %u\n", inspos, nseq->first_free);
+		sseq = &nseq->sseqs[inspos];
+		freesseq = &nseq->sseqs[nseq->first_free];
+		memmove(sseq + 1, sseq, (freesseq - sseq) * sizeof (*sseq));
+		memset(sseq, 0, sizeof (*sseq));
+		nseq->first_free++;
+		sseq->lower = lower;
+		sseq->upper = upper;
+		created_subseq = 1;
+	}
+	dbg("inserting (%u %u %u) from %x:%u into sseq %x(%u,%u) of seq %x\n",
+	    type, lower, upper, node, port, sseq,
+	    sseq->lower, sseq->upper, nseq);
+
+	/* Insert a publication: */
+
+	publ = publ_create(type, lower, upper, scope, node, port, key);
+	if (!publ)
+		return 0;
+	dbg("inserting publ %x, node=%x publ->node=%x, subscr->node=%x\n",
+	    publ, node, publ->node, publ->subscr.node);
+
+	if (!sseq->zone_list)
+		sseq->zone_list = publ->zone_list_next = publ;
+	else {
+		publ->zone_list_next = sseq->zone_list->zone_list_next;
+		sseq->zone_list->zone_list_next = publ;
+	}
+
+	if (in_own_cluster(node)) {
+		if (!sseq->cluster_list)
+			sseq->cluster_list = publ->cluster_list_next = publ;
+		else {
+			publ->cluster_list_next =
+			sseq->cluster_list->cluster_list_next;
+			sseq->cluster_list->cluster_list_next = publ;
+		}
+	}
+
+	if (node == tipc_own_addr) {
+		if (!sseq->node_list)
+			sseq->node_list = publ->node_list_next = publ;
+		else {
+			publ->node_list_next = sseq->node_list->node_list_next;
+			sseq->node_list->node_list_next = publ;
+		}
+	}
+
+	/* 
+	 * Any subscriptions waiting for notification? 
+	 */
+	list_for_each_entry_safe(s, st, &nseq->subscriptions, nameseq_list) {
+		dbg("calling report_overlap()\n");
+		subscr_report_overlap(s,
+				      publ->lower,
+				      publ->upper,
+				      TIPC_PUBLISHED,
+				      publ->ref, 
+				      publ->node,
+				      created_subseq);
+	}
+	return publ;
+}
+
+/**
+ * nameseq_remove_publ -
+ */
+
+struct publication *nameseq_remove_publ(struct name_seq *nseq, u32 inst,
+					u32 node, u32 ref, u32 key)
+{
+	struct publication *publ;
+	struct publication *prev;
+	struct sub_seq *sseq = nameseq_find_subseq(nseq, inst);
+	struct sub_seq *free;
+	struct subscription *s, *st;
+	int removed_subseq = 0;
+
+	assert(nseq);
+
+	if (!sseq) {
+		int i;
+
+		warn("Withdraw unknown <%u,%u>?\n", nseq->type, inst);
+		assert(nseq->sseqs);
+		dbg("Dumping subseqs %x for %x, alloc = %u,ff=%u\n",
+		    nseq->sseqs, nseq, nseq->alloc, 
+		    nseq->first_free);
+		for (i = 0; i < nseq->first_free; i++) {
+			dbg("Subseq %u(%x): lower = %u,upper = %u\n",
+			    i, &nseq->sseqs[i], nseq->sseqs[i].lower,
+			    nseq->sseqs[i].upper);
+		}
+		return 0;
+	}
+	dbg("nameseq_remove: seq: %x, sseq %x, <%u,%u> key %u\n",
+	    nseq, sseq, nseq->type, inst, key);
+
+	prev = sseq->zone_list;
+	publ = sseq->zone_list->zone_list_next;
+	while ((publ->key != key) || (publ->ref != ref) || 
+	       (publ->node && (publ->node != node))) {
+		prev = publ;
+		publ = publ->zone_list_next;
+		assert(prev != sseq->zone_list);
+	}
+	if (publ != sseq->zone_list)
+		prev->zone_list_next = publ->zone_list_next;
+	else if (publ->zone_list_next != publ) {
+		prev->zone_list_next = publ->zone_list_next;
+		sseq->zone_list = publ->zone_list_next;
+	} else {
+		sseq->zone_list = 0;
+	}
+
+	if (in_own_cluster(node)) {
+		prev = sseq->cluster_list;
+		publ = sseq->cluster_list->cluster_list_next;
+		while ((publ->key != key) || (publ->ref != ref) || 
+		       (publ->node && (publ->node != node))) {
+			prev = publ;
+			publ = publ->cluster_list_next;
+			assert(prev != sseq->cluster_list);
+		}
+		if (publ != sseq->cluster_list)
+			prev->cluster_list_next = publ->cluster_list_next;
+		else if (publ->cluster_list_next != publ) {
+			prev->cluster_list_next = publ->cluster_list_next;
+			sseq->cluster_list = publ->cluster_list_next;
+		} else {
+			sseq->cluster_list = 0;
+		}
+	}
+
+	if (node == tipc_own_addr) {
+		prev = sseq->node_list;
+		publ = sseq->node_list->node_list_next;
+		while ((publ->key != key) || (publ->ref != ref) || 
+		       (publ->node && (publ->node != node))) {
+			prev = publ;
+			publ = publ->node_list_next;
+			assert(prev != sseq->node_list);
+		}
+		if (publ != sseq->node_list)
+			prev->node_list_next = publ->node_list_next;
+		else if (publ->node_list_next != publ) {
+			prev->node_list_next = publ->node_list_next;
+			sseq->node_list = publ->node_list_next;
+		} else {
+			sseq->node_list = 0;
+		}
+	}
+	assert(!publ->node || (publ->node == node));
+	assert(publ->ref == ref);
+	assert(publ->key == key);
+
+	/* 
+	 * Contract subseq list if no more publications:
+	 */
+	if (!sseq->node_list && !sseq->cluster_list && !sseq->zone_list) {
+		free = &nseq->sseqs[nseq->first_free--];
+		memmove(sseq, sseq + 1, (free - (sseq + 1)) * sizeof (*sseq));
+		removed_subseq = 1;
+	}
+
+	/* 
+	 * Any subscriptions waiting ? 
+	 */
+	list_for_each_entry_safe(s, st, &nseq->subscriptions, nameseq_list) {
+		subscr_report_overlap(s,
+				      publ->lower,
+				      publ->upper,
+				      TIPC_WITHDRAWN, 
+				      publ->ref, 
+				      publ->node,
+				      removed_subseq);
+	}
+	return publ;
+}
+
+/**
+ * nameseq_subscribe: attach a subscription, and issue
+ * the prescribed number of events if there is any sub-
+ * sequence overlapping with the requested sequence
+ */
+
+void nameseq_subscribe(struct name_seq *nseq, struct subscription *s)
+{
+	struct sub_seq *sseq = nseq->sseqs;
+
+	list_add(&s->nameseq_list, &nseq->subscriptions);
+
+	if (!sseq)
+		return;
+
+	while (sseq != &nseq->sseqs[nseq->first_free]) {
+		struct publication *zl = sseq->zone_list;
+		if (zl && subscr_overlap(s,sseq->lower,sseq->upper)) {
+			struct publication *crs = zl;
+			int must_report = 1;
+
+			do {
+				subscr_report_overlap(s, 
+						       sseq->lower, 
+						       sseq->upper,
+						       TIPC_PUBLISHED,
+						       crs->ref,
+						       crs->node,
+						       must_report);
+				must_report = 0;
+				crs = crs->zone_list_next;
+			} while (crs != zl);
+		}
+		sseq++;
+	}
+}
+
+static struct name_seq *nametbl_find_seq(u32 type)
+{
+	struct hlist_head *seq_head;
+	struct hlist_node *seq_node;
+	struct name_seq *ns;
+
+	dbg("find_seq %u,(%u,0x%x) table = %p, hash[type] = %u\n",
+	    type, ntohl(type), type, table.types, hash(type));
+
+	seq_head = &table.types[hash(type)];
+	hlist_for_each_entry(ns, seq_node, seq_head, ns_list) {
+		if (ns->type == type) {
+			dbg("found %x\n", ns);
+			return ns;
+		}
+	}
+
+	return 0;
+};
+
+struct publication *nametbl_insert_publ(u32 type, u32 lower, u32 upper,
+		    u32 scope, u32 node, u32 port, u32 key)
+{
+	struct name_seq *seq = nametbl_find_seq(type);
+
+	dbg("ins_publ: <%u,%x,%x> found %x\n", type, lower, upper, seq);
+	if (lower > upper) {
+		warn("Failed to publish illegal <%u,%u,%u>\n",
+		     type, lower, upper);
+		return 0;
+	}
+
+	dbg("Publishing <%u,%u,%u> from %x\n", type, lower, upper, node);
+	if (!seq) {
+		seq = nameseq_create(type, &table.types[hash(type)]);
+		dbg("nametbl_insert_publ: created %x\n", seq);
+	}
+	if (!seq)
+		return 0;
+
+	assert(seq->type == type);
+	return nameseq_insert_publ(seq, type, lower, upper,
+				   scope, node, port, key);
+}
+
+struct publication *nametbl_remove_publ(u32 type, u32 lower, 
+					u32 node, u32 ref, u32 key)
+{
+	struct publication *publ;
+	struct name_seq *seq = nametbl_find_seq(type);
+
+	if (!seq)
+		return 0;
+
+	dbg("Withdrawing <%u,%u> from %x\n", type, lower, node);
+	publ = nameseq_remove_publ(seq, lower, node, ref, key);
+
+	if (!seq->first_free && list_empty(&seq->subscriptions)) {
+		hlist_del_init(&seq->ns_list);
+		kfree(seq->sseqs);
+		kfree(seq);
+	}
+	return publ;
+}
+
+/*
+ * nametbl_translate(): Translate tipc_name -> tipc_portid.
+ *                      Very time-critical.
+ *
+ * Note: on entry 'destnode' is the search domain used during translation;
+ *       on exit it passes back the node address of the matching port (if any)
+ */
+
+u32 nametbl_translate(u32 type, u32 instance, u32 *destnode)
+{
+	struct sub_seq *sseq;
+	struct publication *publ = 0;
+	struct name_seq *seq;
+	u32 ref;
+
+	if (!in_scope(*destnode, tipc_own_addr))
+		return 0;
+
+	read_lock_bh(&nametbl_lock);
+	seq = nametbl_find_seq(type);
+	if (unlikely(!seq))
+		goto not_found;
+	sseq = nameseq_find_subseq(seq, instance);
+	if (unlikely(!sseq))
+		goto not_found;
+	spin_lock_bh(&seq->lock);
+
+	/* Closest-First Algorithm: */
+	if (likely(!*destnode)) {
+		publ = sseq->node_list;
+		if (publ) {
+			sseq->node_list = publ->node_list_next;
+found:
+			ref = publ->ref;
+			*destnode = publ->node;
+			spin_unlock_bh(&seq->lock);
+			read_unlock_bh(&nametbl_lock);
+			return ref;
+		}
+		publ = sseq->cluster_list;
+		if (publ) {
+			sseq->cluster_list = publ->cluster_list_next;
+			goto found;
+		}
+		publ = sseq->zone_list;
+		if (publ) {
+			sseq->zone_list = publ->zone_list_next;
+			goto found;
+		}
+	}
+
+	/* Round-Robin Algorithm: */
+	else if (*destnode == tipc_own_addr) {
+		publ = sseq->node_list;
+		if (publ) {
+			sseq->node_list = publ->node_list_next;
+			goto found;
+		}
+	} else if (in_own_cluster(*destnode)) {
+		publ = sseq->cluster_list;
+		if (publ) {
+			sseq->cluster_list = publ->cluster_list_next;
+			goto found;
+		}
+	} else {
+		publ = sseq->zone_list;
+		if (publ) {
+			sseq->zone_list = publ->zone_list_next;
+			goto found;
+		}
+	}
+	spin_unlock_bh(&seq->lock);
+not_found:
+	*destnode = 0;
+	read_unlock_bh(&nametbl_lock);
+	return 0;
+}
+
+/**
+ * nametbl_mc_translate - find multicast destinations
+ * 
+ * Creates list of all local ports that overlap the given multicast address;
+ * also determines if any off-node ports overlap.
+ *
+ * Note: Publications with a scope narrower than 'limit' are ignored.
+ * (i.e. local node-scope publications mustn't receive messages arriving
+ * from another node, even if the multcast link brought it here)
+ * 
+ * Returns non-zero if any off-node ports overlap
+ */
+
+int nametbl_mc_translate(u32 type, u32 lower, u32 upper, u32 limit,
+			 struct port_list *dports)
+{
+	struct name_seq *seq;
+	struct sub_seq *sseq;
+	struct sub_seq *sseq_stop;
+	int res = 0;
+
+	read_lock_bh(&nametbl_lock);
+	seq = nametbl_find_seq(type);
+	if (!seq)
+		goto exit;
+
+	spin_lock_bh(&seq->lock);
+
+	sseq = seq->sseqs + nameseq_locate_subseq(seq, lower);
+	sseq_stop = seq->sseqs + seq->first_free;
+	for (; sseq != sseq_stop; sseq++) {
+		struct publication *publ;
+
+		if (sseq->lower > upper)
+			break;
+		publ = sseq->cluster_list;
+		if (publ && (publ->scope <= limit))
+			do {
+				if (publ->node == tipc_own_addr)
+					port_list_add(dports, publ->ref);
+				else
+					res = 1;
+				publ = publ->cluster_list_next;
+			} while (publ != sseq->cluster_list);
+	}
+
+	spin_unlock_bh(&seq->lock);
+exit:
+	read_unlock_bh(&nametbl_lock);
+	return res;
+}
+
+/**
+ * nametbl_publish_rsv - publish port name using a reserved name type
+ */
+
+int nametbl_publish_rsv(u32 ref, unsigned int scope, 
+			struct tipc_name_seq const *seq)
+{
+	int res;
+
+	atomic_inc(&rsv_publ_ok);
+	res = tipc_publish(ref, scope, seq);
+	atomic_dec(&rsv_publ_ok);
+	return res;
+}
+
+/**
+ * nametbl_publish - add name publication to network name tables
+ */
+
+struct publication *nametbl_publish(u32 type, u32 lower, u32 upper, 
+				    u32 scope, u32 port_ref, u32 key)
+{
+	struct publication *publ;
+
+	if (table.local_publ_count >= tipc_max_publications) {
+		warn("Failed publish: max %u local publication\n", 
+		     tipc_max_publications);
+		return 0;
+	}
+	if ((type < TIPC_RESERVED_TYPES) && !atomic_read(&rsv_publ_ok)) {
+		warn("Failed to publish reserved name <%u,%u,%u>\n",
+		     type, lower, upper);
+		return 0;
+	}
+
+	write_lock_bh(&nametbl_lock);
+	table.local_publ_count++;
+	publ = nametbl_insert_publ(type, lower, upper, scope,
+				   tipc_own_addr, port_ref, key);
+	if (publ && (scope != TIPC_NODE_SCOPE)) {
+		named_publish(publ);
+	}
+	write_unlock_bh(&nametbl_lock);
+	return publ;
+}
+
+/**
+ * nametbl_withdraw - withdraw name publication from network name tables
+ */
+
+int nametbl_withdraw(u32 type, u32 lower, u32 ref, u32 key)
+{
+	struct publication *publ;
+
+	dbg("nametbl_withdraw:<%d,%d,%d>\n", type, lower, key);
+	write_lock_bh(&nametbl_lock);
+	publ = nametbl_remove_publ(type, lower, tipc_own_addr, ref, key);
+	if (publ) {
+		table.local_publ_count--;
+		if (publ->scope != TIPC_NODE_SCOPE)
+			named_withdraw(publ);
+		write_unlock_bh(&nametbl_lock);
+		list_del_init(&publ->pport_list);
+		kfree(publ);
+		return 1;
+	}
+	write_unlock_bh(&nametbl_lock);
+	return 0;
+}
+
+/**
+ * nametbl_subscribe - add a subscription object to the name table
+ */
+
+void
+nametbl_subscribe(struct subscription *s)
+{
+	u32 type = s->seq.type;
+	struct name_seq *seq;
+
+        write_lock_bh(&nametbl_lock);
+	seq = nametbl_find_seq(type);
+	if (!seq) {
+		seq = nameseq_create(type, &table.types[hash(type)]);
+	}
+        if (seq){
+                spin_lock_bh(&seq->lock);
+                dbg("nametbl_subscribe:found %x for <%u,%u,%u>\n",
+                    seq, type, s->seq.lower, s->seq.upper);
+                assert(seq->type == type);
+                nameseq_subscribe(seq, s);
+                spin_unlock_bh(&seq->lock);
+        }
+        write_unlock_bh(&nametbl_lock);
+}
+
+/**
+ * nametbl_unsubscribe - remove a subscription object from name table
+ */
+
+void
+nametbl_unsubscribe(struct subscription *s)
+{
+	struct name_seq *seq;
+
+        write_lock_bh(&nametbl_lock);
+        seq = nametbl_find_seq(s->seq.type);
+	if (seq != NULL){
+                spin_lock_bh(&seq->lock);
+                list_del_init(&s->nameseq_list);
+                spin_unlock_bh(&seq->lock);
+                if ((seq->first_free == 0) && list_empty(&seq->subscriptions)) {
+                        hlist_del_init(&seq->ns_list);
+                        kfree(seq->sseqs);
+                        kfree(seq);
+                }
+        }
+        write_unlock_bh(&nametbl_lock);
+}
+
+
+/**
+ * subseq_list: print specified sub-sequence contents into the given buffer
+ */
+
+static void subseq_list(struct sub_seq *sseq, struct print_buf *buf, u32 depth,
+			u32 index)
+{
+	char portIdStr[27];
+	char *scopeStr;
+	struct publication *publ = sseq->zone_list;
+
+	tipc_printf(buf, "%-10u %-10u ", sseq->lower, sseq->upper);
+
+	if (depth == 2 || !publ) {
+		tipc_printf(buf, "\n");
+		return;
+	}
+
+	do {
+		sprintf (portIdStr, "<%u.%u.%u:%u>",
+			 tipc_zone(publ->node), tipc_cluster(publ->node),
+			 tipc_node(publ->node), publ->ref);
+		tipc_printf(buf, "%-26s ", portIdStr);
+		if (depth > 3) {
+			if (publ->node != tipc_own_addr)
+				scopeStr = "";
+			else if (publ->scope == TIPC_NODE_SCOPE)
+				scopeStr = "node";
+			else if (publ->scope == TIPC_CLUSTER_SCOPE)
+				scopeStr = "cluster";
+			else
+				scopeStr = "zone";
+			tipc_printf(buf, "%-10u %s", publ->key, scopeStr);
+		}
+
+		publ = publ->zone_list_next;
+		if (publ == sseq->zone_list)
+			break;
+
+		tipc_printf(buf, "\n%33s", " ");
+	} while (1);
+
+	tipc_printf(buf, "\n");
+}
+
+/**
+ * nameseq_list: print specified name sequence contents into the given buffer
+ */
+
+static void nameseq_list(struct name_seq *seq, struct print_buf *buf, u32 depth,
+			 u32 type, u32 lowbound, u32 upbound, u32 index)
+{
+	struct sub_seq *sseq;
+	char typearea[11];
+
+	sprintf(typearea, "%-10u", seq->type);
+
+	if (depth == 1) {
+		tipc_printf(buf, "%s\n", typearea);
+		return;
+	}
+
+	for (sseq = seq->sseqs; sseq != &seq->sseqs[seq->first_free]; sseq++) {
+		if ((lowbound <= sseq->upper) && (upbound >= sseq->lower)) {
+			tipc_printf(buf, "%s ", typearea);
+			subseq_list(sseq, buf, depth, index);
+			sprintf(typearea, "%10s", " ");
+		}
+	}
+}
+
+/**
+ * nametbl_header - print name table header into the given buffer
+ */
+
+static void nametbl_header(struct print_buf *buf, u32 depth)
+{
+	tipc_printf(buf, "Type       ");
+
+	if (depth > 1)
+		tipc_printf(buf, "Lower      Upper      ");
+	if (depth > 2)
+		tipc_printf(buf, "Port Identity              ");
+	if (depth > 3)
+		tipc_printf(buf, "Publication");
+
+	tipc_printf(buf, "\n-----------");
+
+	if (depth > 1)
+		tipc_printf(buf, "--------------------- ");
+	if (depth > 2)
+		tipc_printf(buf, "-------------------------- ");
+	if (depth > 3)
+		tipc_printf(buf, "------------------");
+
+	tipc_printf(buf, "\n");
+}
+
+/**
+ * nametbl_list - print specified name table contents into the given buffer
+ */
+
+static void nametbl_list(struct print_buf *buf, u32 depth_info, 
+			 u32 type, u32 lowbound, u32 upbound)
+{
+	struct hlist_head *seq_head;
+	struct hlist_node *seq_node;
+	struct name_seq *seq;
+	int all_types;
+	u32 depth;
+	u32 i;
+
+	all_types = (depth_info & TIPC_NTQ_ALLTYPES);
+	depth = (depth_info & ~TIPC_NTQ_ALLTYPES);
+
+	if (depth == 0)
+		return;
+
+	if (all_types) {
+		/* display all entries in name table to specified depth */
+		nametbl_header(buf, depth);
+		lowbound = 0;
+		upbound = ~0;
+		for (i = 0; i < tipc_nametbl_size; i++) {
+			seq_head = &table.types[i];
+			hlist_for_each_entry(seq, seq_node, seq_head, ns_list) {
+				nameseq_list(seq, buf, depth, seq->type, 
+					     lowbound, upbound, i);
+			}
+		}
+	} else {
+		/* display only the sequence that matches the specified type */
+		if (upbound < lowbound) {
+			tipc_printf(buf, "invalid name sequence specified\n");
+			return;
+		}
+		nametbl_header(buf, depth);
+		i = hash(type);
+		seq_head = &table.types[i];
+		hlist_for_each_entry(seq, seq_node, seq_head, ns_list) {
+			if (seq->type == type) {
+				nameseq_list(seq, buf, depth, type, 
+					     lowbound, upbound, i);
+				break;
+			}
+		}
+	}
+}
+
+void nametbl_print(struct print_buf *buf, const char *str)
+{
+	tipc_printf(buf, str);
+	read_lock_bh(&nametbl_lock);
+	nametbl_list(buf, 0, 0, 0, 0);
+	read_unlock_bh(&nametbl_lock);
+}
+
+#define MAX_NAME_TBL_QUERY 32768
+
+struct sk_buff *nametbl_get(const void *req_tlv_area, int req_tlv_space)
+{
+	struct sk_buff *buf;
+	struct tipc_name_table_query *argv;
+	struct tlv_desc *rep_tlv;
+	struct print_buf b;
+	int str_len;
+
+	if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_NAME_TBL_QUERY))
+		return cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
+
+	buf = cfg_reply_alloc(TLV_SPACE(MAX_NAME_TBL_QUERY));
+	if (!buf)
+		return NULL;
+
+	rep_tlv = (struct tlv_desc *)buf->data;
+	printbuf_init(&b, TLV_DATA(rep_tlv), MAX_NAME_TBL_QUERY);
+	argv = (struct tipc_name_table_query *)TLV_DATA(req_tlv_area);
+	read_lock_bh(&nametbl_lock);
+	nametbl_list(&b, ntohl(argv->depth), ntohl(argv->type), 
+		     ntohl(argv->lowbound), ntohl(argv->upbound));
+	read_unlock_bh(&nametbl_lock);
+	str_len = printbuf_validate(&b);
+
+	skb_put(buf, TLV_SPACE(str_len));
+	TLV_SET(rep_tlv, TIPC_TLV_ULTRA_STRING, NULL, str_len);
+
+	return buf;
+}
+
+void nametbl_dump(void)
+{
+	nametbl_list(CONS, 0, 0, 0, 0);
+}
+
+int nametbl_init(void)
+{
+	int array_size = sizeof(struct hlist_head) * tipc_nametbl_size;
+
+	table.types = (struct hlist_head *)kmalloc(array_size, GFP_ATOMIC);
+	if (!table.types)
+		return -ENOMEM;
+
+	write_lock_bh(&nametbl_lock);
+	memset(table.types, 0, array_size);
+	table.local_publ_count = 0;
+	write_unlock_bh(&nametbl_lock);
+	return 0;
+}
+
+void nametbl_stop(void)
+{
+	struct hlist_head *seq_head;
+	struct hlist_node *seq_node;
+	struct hlist_node *tmp;
+	struct name_seq *seq;
+	u32 i;
+
+	if (!table.types)
+		return;
+
+	write_lock_bh(&nametbl_lock);
+	for (i = 0; i < tipc_nametbl_size; i++) {
+		seq_head = &table.types[i];
+		hlist_for_each_entry_safe(seq, seq_node, tmp, seq_head, ns_list) {
+			struct sub_seq *sseq = seq->sseqs;
+
+			for (; sseq != &seq->sseqs[seq->first_free]; sseq++) {
+				struct publication *publ = sseq->zone_list;
+				assert(publ);
+				do {
+					struct publication *next =
+						publ->zone_list_next;
+					kfree(publ);
+					publ = next;
+				}
+				while (publ != sseq->zone_list);
+			}
+		}
+	}
+	kfree(table.types);
+	table.types = NULL;
+	write_unlock_bh(&nametbl_lock);
+}
diff --git a/net/tipc/name_table.h b/net/tipc/name_table.h
new file mode 100644
index 0000000..5036b5b
--- /dev/null
+++ b/net/tipc/name_table.h
@@ -0,0 +1,105 @@
+/*
+ * net/tipc/name_table.h: Include file for TIPC name table code
+ * 
+ * Copyright (c) 2003-2005, Ericsson Research Canada
+ * Copyright (c) 2004-2005, Wind River Systems
+ * Copyright (c) 2005-2006, Ericsson AB
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without 
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this 
+ * list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright notice, 
+ * this list of conditions and the following disclaimer in the documentation 
+ * and/or other materials provided with the distribution.
+ * Neither the names of the copyright holders nor the names of its 
+ * contributors may be used to endorse or promote products derived from this 
+ * software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _TIPC_NAME_TABLE_H
+#define _TIPC_NAME_TABLE_H
+
+#include "node_subscr.h"
+
+struct subscription;
+struct port_list;
+
+/*
+ * TIPC name types reserved for internal TIPC use (both current and planned)
+ */
+
+#define TIPC_ZM_SRV 3  		/* zone master service name type */
+
+
+/**
+ * struct publication - info about a published (name or) name sequence
+ * @type: name sequence type
+ * @lower: name sequence lower bound
+ * @upper: name sequence upper bound
+ * @scope: scope of publication
+ * @node: network address of publishing port's node
+ * @ref: publishing port
+ * @key: publication key
+ * @subscr: subscription to "node down" event (for off-node publications only)
+ * @local_list: adjacent entries in list of publications made by this node
+ * @pport_list: adjacent entries in list of publications made by this port
+ * @node_list: next matching name seq publication with >= node scope
+ * @cluster_list: next matching name seq publication with >= cluster scope
+ * @zone_list: next matching name seq publication with >= zone scope
+ * 
+ * Note that the node list, cluster list, and zone list are circular lists.
+ */
+
+struct publication {
+	u32 type;
+	u32 lower;
+	u32 upper;
+	u32 scope;
+	u32 node;
+	u32 ref;
+	u32 key;
+	struct node_subscr subscr;
+	struct list_head local_list;
+	struct list_head pport_list;
+	struct publication *node_list_next;
+	struct publication *cluster_list_next;
+	struct publication *zone_list_next;
+};
+
+
+extern rwlock_t nametbl_lock;
+
+struct sk_buff *nametbl_get(const void *req_tlv_area, int req_tlv_space);
+u32 nametbl_translate(u32 type, u32 instance, u32 *node);
+int nametbl_mc_translate(u32 type, u32 lower, u32 upper, u32 limit, 
+			 struct port_list *dports);
+int nametbl_publish_rsv(u32 ref, unsigned int scope, 
+			struct tipc_name_seq const *seq);
+struct publication *nametbl_publish(u32 type, u32 lower, u32 upper,
+				    u32 scope, u32 port_ref, u32 key);
+int nametbl_withdraw(u32 type, u32 lower, u32 ref, u32 key);
+struct publication *nametbl_insert_publ(u32 type, u32 lower, u32 upper,
+					u32 scope, u32 node, u32 ref, u32 key);
+struct publication *nametbl_remove_publ(u32 type, u32 lower, 
+					u32 node, u32 ref, u32 key);
+void nametbl_subscribe(struct subscription *s);
+void nametbl_unsubscribe(struct subscription *s);
+int nametbl_init(void);
+void nametbl_stop(void);
+
+#endif
diff --git a/net/tipc/net.c b/net/tipc/net.c
new file mode 100644
index 0000000..eba8803
--- /dev/null
+++ b/net/tipc/net.c
@@ -0,0 +1,308 @@
+/*
+ * net/tipc/net.c: TIPC network routing code
+ * 
+ * Copyright (c) 2003-2005, Ericsson Research Canada
+ * Copyright (c) 2005, Wind River Systems
+ * Copyright (c) 2005-2006, Ericsson AB
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * Neither the names of the copyright holders nor the names of its 
+ * contributors may be used to endorse or promote products derived from this
+ * software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "core.h"
+#include "bearer.h"
+#include "net.h"
+#include "zone.h"
+#include "addr.h"
+#include "name_table.h"
+#include "name_distr.h"
+#include "subscr.h"
+#include "link.h"
+#include "msg.h"
+#include "port.h"
+#include "bcast.h"
+#include "discover.h"
+#include "config.h"
+
+/* 
+ * The TIPC locking policy is designed to ensure a very fine locking
+ * granularity, permitting complete parallel access to individual
+ * port and node/link instances. The code consists of three major 
+ * locking domains, each protected with their own disjunct set of locks.
+ *
+ * 1: The routing hierarchy.
+ *    Comprises the structures 'zone', 'cluster', 'node', 'link' 
+ *    and 'bearer'. The whole hierarchy is protected by a big 
+ *    read/write lock, net_lock, to enssure that nothing is added 
+ *    or removed while code is accessing any of these structures. 
+ *    This layer must not be called from the two others while they 
+ *    hold any of their own locks.
+ *    Neither must it itself do any upcalls to the other two before
+ *    it has released net_lock and other protective locks.
+ *
+ *   Within the net_lock domain there are two sub-domains;'node' and 
+ *   'bearer', where local write operations are permitted,
+ *   provided that those are protected by individual spin_locks
+ *   per instance. Code holding net_lock(read) and a node spin_lock 
+ *   is permitted to poke around in both the node itself and its
+ *   subordinate links. I.e, it can update link counters and queues, 
+ *   change link state, send protocol messages, and alter the 
+ *   "active_links" array in the node; but it can _not_ remove a link 
+ *   or a node from the overall structure.
+ *   Correspondingly, individual bearers may change status within a 
+ *   net_lock(read), protected by an individual spin_lock ber bearer 
+ *   instance, but it needs net_lock(write) to remove/add any bearers.
+ *     
+ *
+ *  2: The transport level of the protocol. 
+ *     This consists of the structures port, (and its user level 
+ *     representations, such as user_port and tipc_sock), reference and 
+ *     tipc_user (port.c, reg.c, socket.c). 
+ *
+ *     This layer has four different locks:
+ *     - The tipc_port spin_lock. This is protecting each port instance
+ *       from parallel data access and removal. Since we can not place 
+ *       this lock in the port itself, it has been placed in the 
+ *       corresponding reference table entry, which has the same life
+ *       cycle as the module. This entry is difficult to access from 
+ *       outside the TIPC core, however, so a pointer to the lock has 
+ *       been added in the port instance, -to be used for unlocking 
+ *       only.
+ *     - A read/write lock to protect the reference table itself (teg.c). 
+ *       (Nobody is using read-only access to this, so it can just as 
+ *       well be changed to a spin_lock)
+ *     - A spin lock to protect the registry of kernel/driver users (reg.c)
+ *     - A global spin_lock (port_lock), which only task is to ensure 
+ *       consistency where more than one port is involved in an operation,
+ *       i.e., whe a port is part of a linked list of ports.
+ *       There are two such lists; 'port_list', which is used for management,
+ *       and 'wait_list', which is used to queue ports during congestion.
+ *     
+ *  3: The name table (name_table.c, name_distr.c, subscription.c)
+ *     - There is one big read/write-lock (nametbl_lock) protecting the 
+ *       overall name table structure. Nothing must be added/removed to 
+ *       this structure without holding write access to it.
+ *     - There is one local spin_lock per sub_sequence, which can be seen
+ *       as a sub-domain to the nametbl_lock domain. It is used only
+ *       for translation operations, and is needed because a translation
+ *       steps the root of the 'publication' linked list between each lookup.
+ *       This is always used within the scope of a nametbl_lock(read).
+ *     - A local spin_lock protecting the queue of subscriber events.
+*/
+
+rwlock_t net_lock = RW_LOCK_UNLOCKED;
+struct network net = { 0 };
+
+struct node *net_select_remote_node(u32 addr, u32 ref) 
+{
+	return zone_select_remote_node(net.zones[tipc_zone(addr)], addr, ref);
+}
+
+u32 net_select_router(u32 addr, u32 ref)
+{
+	return zone_select_router(net.zones[tipc_zone(addr)], addr, ref);
+}
+
+
+u32 net_next_node(u32 a)
+{
+	if (net.zones[tipc_zone(a)])
+		return zone_next_node(a);
+	return 0;
+}
+
+void net_remove_as_router(u32 router)
+{
+	u32 z_num;
+
+	for (z_num = 1; z_num <= tipc_max_zones; z_num++) {
+		if (!net.zones[z_num])
+			continue;
+		zone_remove_as_router(net.zones[z_num], router);
+	}
+}
+
+void net_send_external_routes(u32 dest)
+{
+	u32 z_num;
+
+	for (z_num = 1; z_num <= tipc_max_zones; z_num++) {
+		if (net.zones[z_num])
+			zone_send_external_routes(net.zones[z_num], dest);
+	}
+}
+
+int net_init(void)
+{
+	u32 sz = sizeof(struct _zone *) * (tipc_max_zones + 1);
+
+	memset(&net, 0, sizeof(net));
+	net.zones = (struct _zone **)kmalloc(sz, GFP_ATOMIC);
+	if (!net.zones) {
+		return -ENOMEM;
+	}
+	memset(net.zones, 0, sz);
+	return TIPC_OK;
+}
+
+void net_stop(void)
+{
+	u32 z_num;
+
+	if (!net.zones)
+		return;
+
+	for (z_num = 1; z_num <= tipc_max_zones; z_num++) {
+		zone_delete(net.zones[z_num]);
+	}
+	kfree(net.zones);
+	net.zones = 0;
+}
+
+static void net_route_named_msg(struct sk_buff *buf)
+{
+	struct tipc_msg *msg = buf_msg(buf);
+	u32 dnode;
+	u32 dport;
+
+	if (!msg_named(msg)) {
+		msg_dbg(msg, "net->drop_nam:");
+		buf_discard(buf);
+		return;
+	}
+
+	dnode = addr_domain(msg_lookup_scope(msg));
+	dport = nametbl_translate(msg_nametype(msg), msg_nameinst(msg), &dnode);
+	dbg("net->lookup<%u,%u>-><%u,%x>\n",
+	    msg_nametype(msg), msg_nameinst(msg), dport, dnode);
+	if (dport) {
+		msg_set_destnode(msg, dnode);
+		msg_set_destport(msg, dport);
+		net_route_msg(buf);
+		return;
+	}
+	msg_dbg(msg, "net->rej:NO NAME: ");
+	tipc_reject_msg(buf, TIPC_ERR_NO_NAME);
+}
+
+void net_route_msg(struct sk_buff *buf)
+{
+	struct tipc_msg *msg;
+	u32 dnode;
+
+	if (!buf)
+		return;
+	msg = buf_msg(buf);
+
+	msg_incr_reroute_cnt(msg);
+	if (msg_reroute_cnt(msg) > 6) {
+		if (msg_errcode(msg)) {
+			msg_dbg(msg, "NET>DISC>:");
+			buf_discard(buf);
+		} else {
+			msg_dbg(msg, "NET>REJ>:");
+			tipc_reject_msg(buf, msg_destport(msg) ? 
+					TIPC_ERR_NO_PORT : TIPC_ERR_NO_NAME);
+		}
+		return;
+	}
+
+	msg_dbg(msg, "net->rout: ");
+
+	/* Handle message for this node */
+	dnode = msg_short(msg) ? tipc_own_addr : msg_destnode(msg);
+	if (in_scope(dnode, tipc_own_addr)) {
+		if (msg_isdata(msg)) {
+			if (msg_mcast(msg)) 
+				port_recv_mcast(buf, NULL);
+			else if (msg_destport(msg))
+				port_recv_msg(buf);
+			else
+				net_route_named_msg(buf);
+			return;
+		}
+		switch (msg_user(msg)) {
+		case ROUTE_DISTRIBUTOR:
+			cluster_recv_routing_table(buf);
+			break;
+		case NAME_DISTRIBUTOR:
+			named_recv(buf);
+			break;
+		case CONN_MANAGER:
+			port_recv_proto_msg(buf);
+			break;
+		default:
+			msg_dbg(msg,"DROP/NET/<REC<");
+			buf_discard(buf);
+		}
+		return;
+	}
+
+	/* Handle message for another node */
+	msg_dbg(msg, "NET>SEND>: ");
+	link_send(buf, dnode, msg_link_selector(msg));
+}
+
+int tipc_start_net(void)
+{
+	char addr_string[16];
+	int res;
+
+	if (tipc_mode != TIPC_NODE_MODE)
+		return -ENOPROTOOPT;
+
+	tipc_mode = TIPC_NET_MODE;
+	named_reinit();
+	port_reinit();
+
+	if ((res = bearer_init()) ||
+	    (res = net_init()) ||
+	    (res = cluster_init()) ||
+	    (res = bclink_init())) {
+		return res;
+	}
+        subscr_stop();
+	cfg_stop();
+	k_signal((Handler)subscr_start, 0);
+	k_signal((Handler)cfg_init, 0);
+	info("Started in network mode\n");
+	info("Own node address %s, network identity %u\n",
+	     addr_string_fill(addr_string, tipc_own_addr), tipc_net_id);
+	return TIPC_OK;
+}
+
+void tipc_stop_net(void)
+{
+	if (tipc_mode != TIPC_NET_MODE)
+		return;
+        write_lock_bh(&net_lock);
+	bearer_stop();
+	tipc_mode = TIPC_NODE_MODE;
+	bclink_stop();
+	net_stop();
+        write_unlock_bh(&net_lock);
+	info("Left network mode \n");
+}
+
diff --git a/net/tipc/net.h b/net/tipc/net.h
new file mode 100644
index 0000000..5009845
--- /dev/null
+++ b/net/tipc/net.h
@@ -0,0 +1,63 @@
+/*
+ * net/tipc/net.h: Include file for TIPC network routing code
+ * 
+ * Copyright (c) 2003-2005, Ericsson Research Canada
+ * Copyright (c) 2005, Wind River Systems
+ * Copyright (c) 2005-2006, Ericsson AB
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * 
+ * Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * Neither the names of the copyright holders nor the names of its 
+ * contributors may be used to endorse or promote products derived from this
+ * software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _TIPC_NET_H
+#define _TIPC_NET_H
+
+struct _zone;
+
+/**
+ * struct network - TIPC network structure
+ * @zones: array of pointers to all zones within network
+ */
+ 
+struct network {
+	struct _zone **zones;
+};
+
+
+extern struct network net;
+extern rwlock_t net_lock;
+
+int net_init(void);
+void net_stop(void);
+void net_remove_as_router(u32 router);
+void net_send_external_routes(u32 dest);
+void net_route_msg(struct sk_buff *buf);
+struct node *net_select_remote_node(u32 addr, u32 ref);
+u32 net_select_router(u32 addr, u32 ref);
+
+int tipc_start_net(void);
+void tipc_stop_net(void);
+
+#endif
diff --git a/net/tipc/netlink.c b/net/tipc/netlink.c
new file mode 100644
index 0000000..35fbc79
--- /dev/null
+++ b/net/tipc/netlink.c
@@ -0,0 +1,107 @@
+/*
+ * net/tipc/netlink.c: TIPC configuration handling
+ * 
+ * Copyright (c) 2005, Wind River Systems
+ * Copyright (c) 2005-2006, Ericsson AB
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without 
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this 
+ * list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright notice, 
+ * this list of conditions and the following disclaimer in the documentation 
+ * and/or other materials provided with the distribution.
+ * Neither the names of the copyright holders nor the names of its 
+ * contributors may be used to endorse or promote products derived from this 
+ * software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "core.h"
+#include "config.h"
+#include <net/genetlink.h>
+
+static int handle_cmd(struct sk_buff *skb, struct genl_info *info)
+{
+	struct sk_buff *rep_buf;
+	struct nlmsghdr *rep_nlh;
+	struct nlmsghdr *req_nlh = info->nlhdr;
+	struct tipc_genlmsghdr *req_userhdr = info->userhdr;
+	int hdr_space = NLMSG_SPACE(0);
+
+	if ((req_userhdr->cmd & 0xC000) && (!capable(CAP_NET_ADMIN)))
+		rep_buf = cfg_reply_error_string(TIPC_CFG_NOT_NET_ADMIN);
+	else
+		rep_buf = cfg_do_cmd(req_userhdr->dest,
+				     req_userhdr->cmd,
+				     NLMSG_DATA(req_nlh) + GENL_HDRLEN + TIPC_GENL_HDRLEN,
+				     NLMSG_PAYLOAD(req_nlh, GENL_HDRLEN + TIPC_GENL_HDRLEN),
+				     hdr_space);
+
+	if (rep_buf) {
+		skb_push(rep_buf, hdr_space);
+		rep_nlh = (struct nlmsghdr *)rep_buf->data;
+		memcpy(rep_nlh, req_nlh, hdr_space);
+		rep_nlh->nlmsg_len = rep_buf->len;
+		genlmsg_unicast(rep_buf, req_nlh->nlmsg_pid);
+	}
+
+        return 0;
+}
+
+static struct genl_family family = {
+        .id		= TIPC_GENL_FAMILY,
+        .name		= TIPC_GENL_NAME,
+        .version	= TIPC_GENL_VERSION,
+        .hdrsize	= TIPC_GENL_HDRLEN,
+        .maxattr	= 0,
+	.owner		= THIS_MODULE,
+};
+
+static struct genl_ops ops = {
+	.cmd		= TIPC_GENL_CMD,
+	.doit		= handle_cmd,
+};
+
+static int family_registered = 0;
+
+int netlink_start(void)
+{
+	if (genl_register_family(&family))
+		goto err;
+
+	family_registered = 1;
+
+	if (genl_register_ops(&family, &ops))
+		goto err_unregister;
+
+	return 0;
+
+ err_unregister:
+	genl_unregister_family(&family);
+	family_registered = 0;
+ err:
+	err("Failed to register netlink interface");
+	return -EFAULT;
+}
+
+void netlink_stop(void)
+{
+	if (family_registered) {
+		genl_unregister_family(&family);
+		family_registered = 0;
+	}
+}
diff --git a/net/tipc/node.c b/net/tipc/node.c
new file mode 100644
index 0000000..e311638
--- /dev/null
+++ b/net/tipc/node.c
@@ -0,0 +1,676 @@
+/*
+ * net/tipc/node.c: TIPC node management routines
+ * 
+ * Copyright (c) 2003-2005, Ericsson Research Canada
+ * Copyright (c) 2005, Wind River Systems
+ * Copyright (c) 2005-2006, Ericsson AB
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without 
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this 
+ * list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright notice, 
+ * this list of conditions and the following disclaimer in the documentation 
+ * and/or other materials provided with the distribution.
+ * Neither the names of the copyright holders nor the names of its 
+ * contributors may be used to endorse or promote products derived from this 
+ * software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "core.h"
+#include "config.h"
+#include "node.h"
+#include "cluster.h"
+#include "net.h"
+#include "addr.h"
+#include "node_subscr.h"
+#include "link.h"
+#include "port.h"
+#include "bearer.h"
+#include "name_distr.h"
+#include "net.h"
+
+void node_print(struct print_buf *buf, struct node *n_ptr, char *str);
+static void node_lost_contact(struct node *n_ptr);
+static void node_established_contact(struct node *n_ptr);
+
+struct node *nodes = NULL;	/* sorted list of nodes within cluster */
+
+u32 tipc_own_tag = 0;
+
+struct node *node_create(u32 addr)
+{
+	struct cluster *c_ptr;
+	struct node *n_ptr;
+        struct node **curr_node;
+
+	n_ptr = kmalloc(sizeof(*n_ptr),GFP_ATOMIC);
+        if (n_ptr != NULL) {
+                memset(n_ptr, 0, sizeof(*n_ptr));
+                n_ptr->addr = addr;
+                n_ptr->lock =  SPIN_LOCK_UNLOCKED;	
+                INIT_LIST_HEAD(&n_ptr->nsub);
+	
+		c_ptr = cluster_find(addr);
+                if (c_ptr == NULL)
+                        c_ptr = cluster_create(addr);
+                if (c_ptr != NULL) {
+                        n_ptr->owner = c_ptr;
+                        cluster_attach_node(c_ptr, n_ptr);
+                        n_ptr->last_router = -1;
+
+                        /* Insert node into ordered list */
+                        for (curr_node = &nodes; *curr_node; 
+			     curr_node = &(*curr_node)->next) {
+                                if (addr < (*curr_node)->addr) {
+                                        n_ptr->next = *curr_node;
+                                        break;
+                                }
+                        }
+                        (*curr_node) = n_ptr;
+                } else {
+                        kfree(n_ptr);
+                        n_ptr = NULL;
+                }
+        }
+	return n_ptr;
+}
+
+void node_delete(struct node *n_ptr)
+{
+	if (!n_ptr)
+		return;
+
+#if 0
+	/* Not needed because links are already deleted via bearer_stop() */
+
+	u32 l_num;
+
+	for (l_num = 0; l_num < MAX_BEARERS; l_num++) {
+		link_delete(n_ptr->links[l_num]);
+	}
+#endif
+
+	dbg("node %x deleted\n", n_ptr->addr);
+	kfree(n_ptr);
+}
+
+
+/**
+ * node_link_up - handle addition of link
+ * 
+ * Link becomes active (alone or shared) or standby, depending on its priority.
+ */
+
+void node_link_up(struct node *n_ptr, struct link *l_ptr)
+{
+	struct link **active = &n_ptr->active_links[0];
+
+	info("Established link <%s> on network plane %c\n",
+	     l_ptr->name, l_ptr->b_ptr->net_plane);
+	
+	if (!active[0]) {
+		dbg(" link %x into %x/%x\n", l_ptr, &active[0], &active[1]);
+		active[0] = active[1] = l_ptr;
+		node_established_contact(n_ptr);
+		return;
+	}
+	if (l_ptr->priority < active[0]->priority) { 
+		info("Link is standby\n");
+		return;
+	}
+	link_send_duplicate(active[0], l_ptr);
+	if (l_ptr->priority == active[0]->priority) { 
+		active[0] = l_ptr;
+		return;
+	}
+	info("Link <%s> on network plane %c becomes standby\n",
+	     active[0]->name, active[0]->b_ptr->net_plane);
+	active[0] = active[1] = l_ptr;
+}
+
+/**
+ * node_select_active_links - select active link
+ */
+
+static void node_select_active_links(struct node *n_ptr)
+{
+	struct link **active = &n_ptr->active_links[0];
+	u32 i;
+	u32 highest_prio = 0;
+
+        active[0] = active[1] = 0;
+
+	for (i = 0; i < MAX_BEARERS; i++) {
+                struct link *l_ptr = n_ptr->links[i];
+
+		if (!l_ptr || !link_is_up(l_ptr) ||
+		    (l_ptr->priority < highest_prio))
+			continue;
+
+		if (l_ptr->priority > highest_prio) {
+                        highest_prio = l_ptr->priority;
+			active[0] = active[1] = l_ptr;
+		} else {
+			active[1] = l_ptr;
+		}
+	}
+}
+
+/**
+ * node_link_down - handle loss of link
+ */
+
+void node_link_down(struct node *n_ptr, struct link *l_ptr)
+{
+	struct link **active;
+
+	if (!link_is_active(l_ptr)) {
+		info("Lost standby link <%s> on network plane %c\n",
+		     l_ptr->name, l_ptr->b_ptr->net_plane);
+		return;
+	}
+	info("Lost link <%s> on network plane %c\n",
+		l_ptr->name, l_ptr->b_ptr->net_plane);
+
+	active = &n_ptr->active_links[0];
+	if (active[0] == l_ptr)
+		active[0] = active[1];
+	if (active[1] == l_ptr)
+		active[1] = active[0];
+	if (active[0] == l_ptr)
+		node_select_active_links(n_ptr);
+	if (node_is_up(n_ptr)) 
+		link_changeover(l_ptr);
+	else 
+		node_lost_contact(n_ptr);
+}
+
+int node_has_active_links(struct node *n_ptr)
+{
+	return (n_ptr && 
+		((n_ptr->active_links[0]) || (n_ptr->active_links[1])));
+}
+
+int node_has_redundant_links(struct node *n_ptr)
+{
+	return (node_has_active_links(n_ptr) &&
+		(n_ptr->active_links[0] != n_ptr->active_links[1]));
+}
+
+int node_has_active_routes(struct node *n_ptr)
+{
+	return (n_ptr && (n_ptr->last_router >= 0));
+}
+
+int node_is_up(struct node *n_ptr)
+{
+	return (node_has_active_links(n_ptr) || node_has_active_routes(n_ptr));
+}
+
+struct node *node_attach_link(struct link *l_ptr)
+{
+	struct node *n_ptr = node_find(l_ptr->addr);
+
+	if (!n_ptr)
+		n_ptr = node_create(l_ptr->addr);
+        if (n_ptr) {
+		u32 bearer_id = l_ptr->b_ptr->identity;
+		char addr_string[16];
+
+                assert(bearer_id < MAX_BEARERS);
+                if (n_ptr->link_cnt >= 2) {
+			char addr_string[16];
+
+                        err("Attempt to create third link to %s\n",
+			    addr_string_fill(addr_string, n_ptr->addr));
+                        return 0;
+                }
+
+                if (!n_ptr->links[bearer_id]) {
+                        n_ptr->links[bearer_id] = l_ptr;
+                        net.zones[tipc_zone(l_ptr->addr)]->links++;
+                        n_ptr->link_cnt++;
+                        return n_ptr;
+                }
+                err("Attempt to establish second link on <%s> to <%s> \n",
+                    l_ptr->b_ptr->publ.name, 
+		    addr_string_fill(addr_string, l_ptr->addr));
+        }
+	return 0;
+}
+
+void node_detach_link(struct node *n_ptr, struct link *l_ptr)
+{
+	n_ptr->links[l_ptr->b_ptr->identity] = 0;
+	net.zones[tipc_zone(l_ptr->addr)]->links--;
+	n_ptr->link_cnt--;
+}
+
+/*
+ * Routing table management - five cases to handle:
+ *
+ * 1: A link towards a zone/cluster external node comes up.
+ *    => Send a multicast message updating routing tables of all 
+ *    system nodes within own cluster that the new destination 
+ *    can be reached via this node. 
+ *    (node.establishedContact()=>cluster.multicastNewRoute())
+ *
+ * 2: A link towards a slave node comes up.
+ *    => Send a multicast message updating routing tables of all 
+ *    system nodes within own cluster that the new destination 
+ *    can be reached via this node. 
+ *    (node.establishedContact()=>cluster.multicastNewRoute())
+ *    => Send a  message to the slave node about existence 
+ *    of all system nodes within cluster:
+ *    (node.establishedContact()=>cluster.sendLocalRoutes())
+ *
+ * 3: A new cluster local system node becomes available.
+ *    => Send message(s) to this particular node containing
+ *    information about all cluster external and slave
+ *     nodes which can be reached via this node.
+ *    (node.establishedContact()==>network.sendExternalRoutes())
+ *    (node.establishedContact()==>network.sendSlaveRoutes())
+ *    => Send messages to all directly connected slave nodes 
+ *    containing information about the existence of the new node
+ *    (node.establishedContact()=>cluster.multicastNewRoute())
+ *    
+ * 4: The link towards a zone/cluster external node or slave
+ *    node goes down.
+ *    => Send a multcast message updating routing tables of all 
+ *    nodes within cluster that the new destination can not any
+ *    longer be reached via this node.
+ *    (node.lostAllLinks()=>cluster.bcastLostRoute())
+ *
+ * 5: A cluster local system node becomes unavailable.
+ *    => Remove all references to this node from the local
+ *    routing tables. Note: This is a completely node
+ *    local operation.
+ *    (node.lostAllLinks()=>network.removeAsRouter())
+ *    => Send messages to all directly connected slave nodes 
+ *    containing information about loss of the node
+ *    (node.establishedContact()=>cluster.multicastLostRoute())
+ *
+ */
+
+static void node_established_contact(struct node *n_ptr)
+{
+	struct cluster *c_ptr;
+
+	dbg("node_established_contact:-> %x\n", n_ptr->addr);
+	if (!node_has_active_routes(n_ptr)) { 
+		k_signal((Handler)named_node_up, n_ptr->addr);
+	}
+
+        /* Syncronize broadcast acks */
+        n_ptr->bclink.acked = bclink_get_last_sent();
+
+	if (is_slave(tipc_own_addr))
+		return;
+	if (!in_own_cluster(n_ptr->addr)) {
+		/* Usage case 1 (see above) */
+		c_ptr = cluster_find(tipc_own_addr);
+		if (!c_ptr)
+			c_ptr = cluster_create(tipc_own_addr);
+                if (c_ptr)
+                        cluster_bcast_new_route(c_ptr, n_ptr->addr, 1, 
+						tipc_max_nodes);
+		return;
+	} 
+
+	c_ptr = n_ptr->owner;
+	if (is_slave(n_ptr->addr)) {
+		/* Usage case 2 (see above) */
+		cluster_bcast_new_route(c_ptr, n_ptr->addr, 1, tipc_max_nodes);
+		cluster_send_local_routes(c_ptr, n_ptr->addr);
+		return;
+	}
+
+	if (n_ptr->bclink.supported) {
+		nmap_add(&cluster_bcast_nodes, n_ptr->addr);
+		if (n_ptr->addr < tipc_own_addr)
+			tipc_own_tag++;
+	}
+
+	/* Case 3 (see above) */
+	net_send_external_routes(n_ptr->addr);
+	cluster_send_slave_routes(c_ptr, n_ptr->addr);
+	cluster_bcast_new_route(c_ptr, n_ptr->addr, LOWEST_SLAVE,
+				highest_allowed_slave);
+}
+
+static void node_lost_contact(struct node *n_ptr)
+{
+	struct cluster *c_ptr;
+	struct node_subscr *ns, *tns;
+	char addr_string[16];
+	u32 i;
+
+        /* Clean up broadcast reception remains */
+        n_ptr->bclink.gap_after = n_ptr->bclink.gap_to = 0;
+        while (n_ptr->bclink.deferred_head) {
+                struct sk_buff* buf = n_ptr->bclink.deferred_head;
+                n_ptr->bclink.deferred_head = buf->next;
+                buf_discard(buf);
+        }
+        if (n_ptr->bclink.defragm) {
+                buf_discard(n_ptr->bclink.defragm);  
+                n_ptr->bclink.defragm = NULL;
+        }            
+        if (in_own_cluster(n_ptr->addr) && n_ptr->bclink.supported) { 
+                bclink_acknowledge(n_ptr, mod(n_ptr->bclink.acked + 10000));
+        }
+
+        /* Update routing tables */
+	if (is_slave(tipc_own_addr)) {
+		net_remove_as_router(n_ptr->addr);
+	} else {
+		if (!in_own_cluster(n_ptr->addr)) { 
+			/* Case 4 (see above) */
+			c_ptr = cluster_find(tipc_own_addr);
+			cluster_bcast_lost_route(c_ptr, n_ptr->addr, 1,
+						 tipc_max_nodes);
+		} else {
+			/* Case 5 (see above) */
+			c_ptr = cluster_find(n_ptr->addr);
+			if (is_slave(n_ptr->addr)) {
+				cluster_bcast_lost_route(c_ptr, n_ptr->addr, 1,
+							 tipc_max_nodes);
+			} else {
+				if (n_ptr->bclink.supported) {
+					nmap_remove(&cluster_bcast_nodes, 
+						    n_ptr->addr);
+					if (n_ptr->addr < tipc_own_addr)
+						tipc_own_tag--;
+				}
+				net_remove_as_router(n_ptr->addr);
+				cluster_bcast_lost_route(c_ptr, n_ptr->addr,
+							 LOWEST_SLAVE,
+							 highest_allowed_slave);
+			}
+		}
+	}
+	if (node_has_active_routes(n_ptr))
+		return;
+
+	info("Lost contact with %s\n", 
+	     addr_string_fill(addr_string, n_ptr->addr));
+
+	/* Abort link changeover */
+	for (i = 0; i < MAX_BEARERS; i++) {
+		struct link *l_ptr = n_ptr->links[i];
+		if (!l_ptr) 
+			continue;
+		l_ptr->reset_checkpoint = l_ptr->next_in_no;
+		l_ptr->exp_msg_count = 0;
+		link_reset_fragments(l_ptr);
+	}
+
+	/* Notify subscribers */
+	list_for_each_entry_safe(ns, tns, &n_ptr->nsub, nodesub_list) {
+                ns->node = 0;
+		list_del_init(&ns->nodesub_list);
+		k_signal((Handler)ns->handle_node_down,
+			 (unsigned long)ns->usr_handle);
+	}
+}
+
+/**
+ * node_select_next_hop - find the next-hop node for a message
+ * 
+ * Called by when cluster local lookup has failed.
+ */
+
+struct node *node_select_next_hop(u32 addr, u32 selector)
+{
+	struct node *n_ptr;
+	u32 router_addr;
+
+        if (!addr_domain_valid(addr))
+                return 0;
+
+	/* Look for direct link to destination processsor */
+	n_ptr = node_find(addr);
+	if (n_ptr && node_has_active_links(n_ptr))
+                return n_ptr;
+
+	/* Cluster local system nodes *must* have direct links */
+	if (!is_slave(addr) && in_own_cluster(addr))
+		return 0;
+
+	/* Look for cluster local router with direct link to node */
+	router_addr = node_select_router(n_ptr, selector);
+	if (router_addr) 
+                return node_select(router_addr, selector);
+
+	/* Slave nodes can only be accessed within own cluster via a 
+	   known router with direct link -- if no router was found,give up */
+	if (is_slave(addr))
+		return 0;
+
+	/* Inter zone/cluster -- find any direct link to remote cluster */
+	addr = tipc_addr(tipc_zone(addr), tipc_cluster(addr), 0);
+	n_ptr = net_select_remote_node(addr, selector);
+	if (n_ptr && node_has_active_links(n_ptr))
+                return n_ptr;
+
+	/* Last resort -- look for any router to anywhere in remote zone */
+	router_addr =  net_select_router(addr, selector);
+	if (router_addr) 
+                return node_select(router_addr, selector);
+
+        return 0;
+}
+
+/**
+ * node_select_router - select router to reach specified node
+ * 
+ * Uses a deterministic and fair algorithm for selecting router node. 
+ */
+
+u32 node_select_router(struct node *n_ptr, u32 ref)
+{
+	u32 ulim;
+	u32 mask;
+	u32 start;
+	u32 r;
+
+        if (!n_ptr)
+                return 0;
+
+	if (n_ptr->last_router < 0)
+		return 0;
+	ulim = ((n_ptr->last_router + 1) * 32) - 1;
+
+	/* Start entry must be random */
+	mask = tipc_max_nodes;
+	while (mask > ulim)
+		mask >>= 1;
+	start = ref & mask;
+	r = start;
+
+	/* Lookup upwards with wrap-around */
+	do {
+		if (((n_ptr->routers[r / 32]) >> (r % 32)) & 1)
+			break;
+	} while (++r <= ulim);
+	if (r > ulim) {
+		r = 1;
+		do {
+			if (((n_ptr->routers[r / 32]) >> (r % 32)) & 1)
+				break;
+		} while (++r < start);
+		assert(r != start);
+	}
+	assert(r && (r <= ulim));
+	return tipc_addr(own_zone(), own_cluster(), r);
+}
+
+void node_add_router(struct node *n_ptr, u32 router)
+{
+	u32 r_num = tipc_node(router);
+
+	n_ptr->routers[r_num / 32] = 
+		((1 << (r_num % 32)) | n_ptr->routers[r_num / 32]);
+	n_ptr->last_router = tipc_max_nodes / 32;
+	while ((--n_ptr->last_router >= 0) && 
+	       !n_ptr->routers[n_ptr->last_router]);
+}
+
+void node_remove_router(struct node *n_ptr, u32 router)
+{
+	u32 r_num = tipc_node(router);
+
+	if (n_ptr->last_router < 0)
+		return;		/* No routes */
+
+	n_ptr->routers[r_num / 32] =
+		((~(1 << (r_num % 32))) & (n_ptr->routers[r_num / 32]));
+	n_ptr->last_router = tipc_max_nodes / 32;
+	while ((--n_ptr->last_router >= 0) && 
+	       !n_ptr->routers[n_ptr->last_router]);
+
+	if (!node_is_up(n_ptr))
+		node_lost_contact(n_ptr);
+}
+
+#if 0
+void node_print(struct print_buf *buf, struct node *n_ptr, char *str)
+{
+	u32 i;
+
+	tipc_printf(buf, "\n\n%s", str);
+	for (i = 0; i < MAX_BEARERS; i++) {
+		if (!n_ptr->links[i]) 
+			continue;
+		tipc_printf(buf, "Links[%u]: %x, ", i, n_ptr->links[i]);
+	}
+	tipc_printf(buf, "Active links: [%x,%x]\n",
+		    n_ptr->active_links[0], n_ptr->active_links[1]);
+}
+#endif
+
+u32 tipc_available_nodes(const u32 domain)
+{
+	struct node *n_ptr;
+	u32 cnt = 0;
+
+	for (n_ptr = nodes; n_ptr; n_ptr = n_ptr->next) {
+		if (!in_scope(domain, n_ptr->addr))
+			continue;
+		if (node_is_up(n_ptr))
+			cnt++;
+	}
+	return cnt;
+}
+
+struct sk_buff *node_get_nodes(const void *req_tlv_area, int req_tlv_space)
+{
+	u32 domain;
+	struct sk_buff *buf;
+	struct node *n_ptr;
+        struct tipc_node_info node_info;
+
+	if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_NET_ADDR))
+		return cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
+
+	domain = *(u32 *)TLV_DATA(req_tlv_area);
+	domain = ntohl(domain);
+	if (!addr_domain_valid(domain))
+		return cfg_reply_error_string(TIPC_CFG_INVALID_VALUE
+					      " (network address)");
+
+        if (!nodes)
+                return cfg_reply_none();
+
+	/* For now, get space for all other nodes 
+	   (will need to modify this when slave nodes are supported */
+
+	buf = cfg_reply_alloc(TLV_SPACE(sizeof(node_info)) *
+			    (tipc_max_nodes - 1));
+	if (!buf)
+		return NULL;
+
+	/* Add TLVs for all nodes in scope */
+
+	for (n_ptr = nodes; n_ptr; n_ptr = n_ptr->next) {
+		if (!in_scope(domain, n_ptr->addr))
+			continue;
+                node_info.addr = htonl(n_ptr->addr);
+                node_info.up = htonl(node_is_up(n_ptr));
+		cfg_append_tlv(buf, TIPC_TLV_NODE_INFO, 
+			       &node_info, sizeof(node_info));
+	}
+
+	return buf;
+}
+
+struct sk_buff *node_get_links(const void *req_tlv_area, int req_tlv_space)
+{
+	u32 domain;
+	struct sk_buff *buf;
+	struct node *n_ptr;
+        struct tipc_link_info link_info;
+
+	if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_NET_ADDR))
+		return cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
+
+	domain = *(u32 *)TLV_DATA(req_tlv_area);
+	domain = ntohl(domain);
+	if (!addr_domain_valid(domain))
+		return cfg_reply_error_string(TIPC_CFG_INVALID_VALUE
+					      " (network address)");
+
+        if (!nodes)
+                return cfg_reply_none();
+
+	/* For now, get space for 2 links to all other nodes + bcast link 
+	   (will need to modify this when slave nodes are supported */
+
+	buf = cfg_reply_alloc(TLV_SPACE(sizeof(link_info)) *
+			    (2 * (tipc_max_nodes - 1) + 1));
+	if (!buf)
+		return NULL;
+
+	/* Add TLV for broadcast link */
+
+        link_info.dest = tipc_own_addr & 0xfffff00;
+	link_info.dest = htonl(link_info.dest);
+        link_info.up = htonl(1);
+        sprintf(link_info.str, bc_link_name);
+	cfg_append_tlv(buf, TIPC_TLV_LINK_INFO, &link_info, sizeof(link_info));
+
+	/* Add TLVs for any other links in scope */
+
+	for (n_ptr = nodes; n_ptr; n_ptr = n_ptr->next) {
+                u32 i;
+
+		if (!in_scope(domain, n_ptr->addr))
+			continue;
+                for (i = 0; i < MAX_BEARERS; i++) {
+                        if (!n_ptr->links[i]) 
+                                continue;
+                        link_info.dest = htonl(n_ptr->addr);
+                        link_info.up = htonl(link_is_up(n_ptr->links[i]));
+                        strcpy(link_info.str, n_ptr->links[i]->name);
+			cfg_append_tlv(buf, TIPC_TLV_LINK_INFO, 
+				       &link_info, sizeof(link_info));
+                }
+	}
+
+	return buf;
+}
diff --git a/net/tipc/node.h b/net/tipc/node.h
new file mode 100644
index 0000000..1f61687
--- /dev/null
+++ b/net/tipc/node.h
@@ -0,0 +1,141 @@
+/*
+ * net/tipc/node.h: Include file for TIPC node management routines
+ * 
+ * Copyright (c) 2003-2005, Ericsson Research Canada
+ * Copyright (c) 2005, Wind River Systems
+ * Copyright (c) 2005-2006, Ericsson AB
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without 
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this 
+ * list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright notice, 
+ * this list of conditions and the following disclaimer in the documentation 
+ * and/or other materials provided with the distribution.
+ * Neither the names of the copyright holders nor the names of its 
+ * contributors may be used to endorse or promote products derived from this 
+ * software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _TIPC_NODE_H
+#define _TIPC_NODE_H
+
+#include "node_subscr.h"
+#include "addr.h"
+#include "cluster.h"
+#include "bearer.h"
+
+/**
+ * struct node - TIPC node structure
+ * @addr: network address of node
+ * @lock: spinlock governing access to structure
+ * @owner: pointer to cluster that node belongs to
+ * @next: pointer to next node in sorted list of cluster's nodes
+ * @nsub: list of "node down" subscriptions monitoring node
+ * @active_links: pointers to active links to node
+ * @links: pointers to all links to node
+ * @link_cnt: number of links to node
+ * @permit_changeover: non-zero if node has redundant links to this system
+ * @routers: bitmap (used for multicluster communication)
+ * @last_router: (used for multicluster communication)
+ * @bclink: broadcast-related info
+ *    @supported: non-zero if node supports TIPC b'cast capability
+ *    @acked: sequence # of last outbound b'cast message acknowledged by node
+ *    @last_in: sequence # of last in-sequence b'cast message received from node
+ *    @gap_after: sequence # of last message not requiring a NAK request
+ *    @gap_to: sequence # of last message requiring a NAK request
+ *    @nack_sync: counter that determines when NAK requests should be sent
+ *    @deferred_head: oldest OOS b'cast message received from node
+ *    @deferred_tail: newest OOS b'cast message received from node
+ *    @defragm: list of partially reassembled b'cast message fragments from node
+ */
+ 
+struct node {
+	u32 addr;
+	spinlock_t lock;
+	struct cluster *owner;
+	struct node *next;
+	struct list_head nsub;
+	struct link *active_links[2];
+	struct link *links[MAX_BEARERS];
+	int link_cnt;
+	int permit_changeover;
+	u32 routers[512/32];
+	int last_router;
+	struct {
+		int supported;
+		u32 acked;
+		u32 last_in;
+		u32 gap_after; 
+		u32 gap_to; 
+		u32 nack_sync;
+		struct sk_buff *deferred_head;
+		struct sk_buff *deferred_tail;
+		struct sk_buff *defragm;
+	} bclink;
+};
+
+extern struct node *nodes;
+extern u32 tipc_own_tag;
+
+struct node *node_create(u32 addr);
+void node_delete(struct node *n_ptr);
+struct node *node_attach_link(struct link *l_ptr);
+void node_detach_link(struct node *n_ptr, struct link *l_ptr);
+void node_link_down(struct node *n_ptr, struct link *l_ptr);
+void node_link_up(struct node *n_ptr, struct link *l_ptr);
+int node_has_active_links(struct node *n_ptr);
+int node_has_redundant_links(struct node *n_ptr);
+u32 node_select_router(struct node *n_ptr, u32 ref);
+struct node *node_select_next_hop(u32 addr, u32 selector);
+int node_is_up(struct node *n_ptr);
+void node_add_router(struct node *n_ptr, u32 router);
+void node_remove_router(struct node *n_ptr, u32 router);
+struct sk_buff *node_get_links(const void *req_tlv_area, int req_tlv_space);
+struct sk_buff *node_get_nodes(const void *req_tlv_area, int req_tlv_space);
+
+static inline struct node *node_find(u32 addr)
+{
+	if (likely(in_own_cluster(addr)))
+		return local_nodes[tipc_node(addr)];
+	else if (addr_domain_valid(addr)) {
+		struct cluster *c_ptr = cluster_find(addr);
+
+		if (c_ptr)
+			return c_ptr->nodes[tipc_node(addr)];
+	}
+	return 0;
+}
+
+static inline struct node *node_select(u32 addr, u32 selector)
+{
+	if (likely(in_own_cluster(addr)))
+		return local_nodes[tipc_node(addr)];
+	return node_select_next_hop(addr, selector);
+}
+
+static inline void node_lock(struct node *n_ptr)
+{
+	spin_lock_bh(&n_ptr->lock);
+}
+
+static inline void node_unlock(struct node *n_ptr)
+{
+	spin_unlock_bh(&n_ptr->lock);
+}
+
+#endif
diff --git a/net/tipc/node_subscr.c b/net/tipc/node_subscr.c
new file mode 100644
index 0000000..c38a3b0
--- /dev/null
+++ b/net/tipc/node_subscr.c
@@ -0,0 +1,76 @@
+/*
+ * net/tipc/node_subscr.c: TIPC "node down" subscription handling
+ * 
+ * Copyright (c) 2003-2005, Ericsson Research Canada
+ * Copyright (c) 2005, Wind River Systems
+ * Copyright (c) 2005-2006, Ericsson AB
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without 
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this 
+ * list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright notice, 
+ * this list of conditions and the following disclaimer in the documentation 
+ * and/or other materials provided with the distribution.
+ * Neither the names of the copyright holders nor the names of its 
+ * contributors may be used to endorse or promote products derived from this 
+ * software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "core.h"
+#include "dbg.h"
+#include "node_subscr.h"
+#include "node.h"
+#include "addr.h"
+
+/**
+ * nodesub_subscribe - create "node down" subscription for specified node
+ */
+
+void nodesub_subscribe(struct node_subscr *node_sub, u32 addr, 
+		       void *usr_handle, net_ev_handler handle_down)
+{
+	node_sub->node = 0;
+	if (addr == tipc_own_addr)
+		return;
+	if (!addr_node_valid(addr)) {
+		warn("node_subscr with illegal %x\n", addr);
+		return;
+	}
+
+	node_sub->handle_node_down = handle_down;
+	node_sub->usr_handle = usr_handle;
+	node_sub->node = node_find(addr);
+	assert(node_sub->node);
+	node_lock(node_sub->node);
+	list_add_tail(&node_sub->nodesub_list, &node_sub->node->nsub);
+	node_unlock(node_sub->node);
+}
+
+/**
+ * nodesub_unsubscribe - cancel "node down" subscription (if any)
+ */
+
+void nodesub_unsubscribe(struct node_subscr *node_sub)
+{
+	if (!node_sub->node)
+		return;
+
+	node_lock(node_sub->node);
+	list_del_init(&node_sub->nodesub_list);
+	node_unlock(node_sub->node);
+}
diff --git a/net/tipc/node_subscr.h b/net/tipc/node_subscr.h
new file mode 100644
index 0000000..63ae92e
--- /dev/null
+++ b/net/tipc/node_subscr.h
@@ -0,0 +1,60 @@
+/*
+ * net/tipc/node_subscr.h: Include file for TIPC "node down" subscription handling
+ * 
+ * Copyright (c) 2003-2005, Ericsson Research Canada
+ * Copyright (c) 2005, Wind River Systems
+ * Copyright (c) 2005-2006, Ericsson AB
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without 
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this 
+ * list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright notice, 
+ * this list of conditions and the following disclaimer in the documentation 
+ * and/or other materials provided with the distribution.
+ * Neither the names of the copyright holders nor the names of its 
+ * contributors may be used to endorse or promote products derived from this 
+ * software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _TIPC_NODE_SUBSCR_H
+#define _TIPC_NODE_SUBSCR_H
+
+#include "addr.h"
+
+typedef void (*net_ev_handler) (void *usr_handle);
+
+/**
+ * struct node_subscr - "node down" subscription entry
+ * @node: ptr to node structure of interest (or NULL, if none)
+ * @handle_node_down: routine to invoke when node fails
+ * @usr_handle: argument to pass to routine when node fails
+ * @nodesub_list: adjacent entries in list of subscriptions for the node
+ */
+
+struct node_subscr {
+	struct node *node;
+	net_ev_handler handle_node_down;
+	void *usr_handle;
+	struct list_head nodesub_list;
+};
+
+void nodesub_subscribe(struct node_subscr *node_sub, u32 addr,
+		       void *usr_handle, net_ev_handler handle_down);
+void nodesub_unsubscribe(struct node_subscr *node_sub);
+
+#endif
diff --git a/net/tipc/port.c b/net/tipc/port.c
new file mode 100644
index 0000000..f0f228c
--- /dev/null
+++ b/net/tipc/port.c
@@ -0,0 +1,1704 @@
+/*
+ * net/tipc/port.c: TIPC port code
+ * 
+ * Copyright (c) 2003-2005, Ericsson Research Canada
+ * Copyright (c) 2004-2005, Wind River Systems
+ * Copyright (c) 2005-2006, Ericsson AB
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without 
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this 
+ * list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright notice, 
+ * this list of conditions and the following disclaimer in the documentation 
+ * and/or other materials provided with the distribution.
+ * Neither the names of the copyright holders nor the names of its 
+ * contributors may be used to endorse or promote products derived from this 
+ * software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "core.h"
+#include "config.h"
+#include "dbg.h"
+#include "port.h"
+#include "addr.h"
+#include "link.h"
+#include "node.h"
+#include "port.h"
+#include "name_table.h"
+#include "user_reg.h"
+#include "msg.h"
+#include "bcast.h"
+
+/* Connection management: */
+#define PROBING_INTERVAL 3600000	/* [ms] => 1 h */
+#define CONFIRMED 0
+#define PROBING 1
+
+#define MAX_REJECT_SIZE 1024
+
+static struct sk_buff *msg_queue_head = 0;
+static struct sk_buff *msg_queue_tail = 0;
+
+spinlock_t port_list_lock = SPIN_LOCK_UNLOCKED;
+static spinlock_t queue_lock = SPIN_LOCK_UNLOCKED;
+
+LIST_HEAD(ports);
+static void port_handle_node_down(unsigned long ref);
+static struct sk_buff* port_build_self_abort_msg(struct port *,u32 err);
+static struct sk_buff* port_build_peer_abort_msg(struct port *,u32 err);
+static void port_timeout(unsigned long ref);
+
+
+static inline u32 port_peernode(struct port *p_ptr)
+{
+	return msg_destnode(&p_ptr->publ.phdr);
+}
+
+static inline u32 port_peerport(struct port *p_ptr)
+{
+	return msg_destport(&p_ptr->publ.phdr);
+}
+
+static inline u32 port_out_seqno(struct port *p_ptr)
+{
+	return msg_transp_seqno(&p_ptr->publ.phdr);
+}
+
+static inline void port_set_out_seqno(struct port *p_ptr, u32 seqno) 
+{
+	msg_set_transp_seqno(&p_ptr->publ.phdr,seqno);
+}
+
+static inline void port_incr_out_seqno(struct port *p_ptr)
+{
+	struct tipc_msg *m = &p_ptr->publ.phdr;
+
+	if (likely(!msg_routed(m)))
+		return;
+	msg_set_transp_seqno(m, (msg_transp_seqno(m) + 1));
+}
+
+/**
+ * tipc_multicast - send a multicast message to local and remote destinations
+ */
+
+int tipc_multicast(u32 ref, struct tipc_name_seq const *seq, u32 domain,
+		   u32 num_sect, struct iovec const *msg_sect)
+{
+	struct tipc_msg *hdr;
+	struct sk_buff *buf;
+	struct sk_buff *ibuf = NULL;
+	struct port_list dports = {0, NULL, };
+	struct port *oport = port_deref(ref);
+	int ext_targets;
+	int res;
+
+	if (unlikely(!oport))
+		return -EINVAL;
+
+	/* Create multicast message */
+
+	hdr = &oport->publ.phdr;
+	msg_set_type(hdr, TIPC_MCAST_MSG);
+	msg_set_nametype(hdr, seq->type);
+	msg_set_namelower(hdr, seq->lower);
+	msg_set_nameupper(hdr, seq->upper);
+	msg_set_hdr_sz(hdr, MCAST_H_SIZE);
+	res = msg_build(hdr, msg_sect, num_sect, MAX_MSG_SIZE,
+			!oport->user_port, &buf);
+	if (unlikely(!buf))
+		return res;
+
+	/* Figure out where to send multicast message */
+
+	ext_targets = nametbl_mc_translate(seq->type, seq->lower, seq->upper,
+					   TIPC_NODE_SCOPE, &dports);
+	
+	/* Send message to destinations (duplicate it only if necessary) */ 
+
+	if (ext_targets) {
+		if (dports.count != 0) {
+			ibuf = skb_copy(buf, GFP_ATOMIC);
+			if (ibuf == NULL) {
+				port_list_free(&dports);
+				buf_discard(buf);
+				return -ENOMEM;
+			}
+		}
+		res = bclink_send_msg(buf);
+		if ((res < 0) && (dports.count != 0)) {
+			buf_discard(ibuf);
+		}
+	} else {
+		ibuf = buf;
+	}
+
+	if (res >= 0) {
+		if (ibuf)
+			port_recv_mcast(ibuf, &dports);
+	} else {
+		port_list_free(&dports);
+	}
+	return res;
+}
+
+/**
+ * port_recv_mcast - deliver multicast message to all destination ports
+ * 
+ * If there is no port list, perform a lookup to create one
+ */
+
+void port_recv_mcast(struct sk_buff *buf, struct port_list *dp)
+{
+	struct tipc_msg* msg;
+	struct port_list dports = {0, NULL, };
+	struct port_list *item = dp;
+	int cnt = 0;
+
+	assert(buf);
+	msg = buf_msg(buf);
+
+	/* Create destination port list, if one wasn't supplied */
+
+	if (dp == NULL) {
+		nametbl_mc_translate(msg_nametype(msg),
+				     msg_namelower(msg),
+				     msg_nameupper(msg),
+				     TIPC_CLUSTER_SCOPE,
+				     &dports);
+		item = dp = &dports;
+	}
+
+	/* Deliver a copy of message to each destination port */
+
+	if (dp->count != 0) {
+		if (dp->count == 1) {
+			msg_set_destport(msg, dp->ports[0]);
+			port_recv_msg(buf);
+			port_list_free(dp);
+			return;
+		}
+		for (; cnt < dp->count; cnt++) {
+			int index = cnt % PLSIZE;
+			struct sk_buff *b = skb_clone(buf, GFP_ATOMIC);
+
+			if (b == NULL) {
+				warn("Buffer allocation failure\n");
+				msg_dbg(msg, "LOST:");
+				goto exit;
+			}
+			if ((index == 0) && (cnt != 0)) {
+				item = item->next;
+			}
+			msg_set_destport(buf_msg(b),item->ports[index]);
+			port_recv_msg(b);
+		}
+	}
+exit:
+	buf_discard(buf);
+	port_list_free(dp);
+}
+
+/**
+ * tipc_createport_raw - create a native TIPC port
+ * 
+ * Returns local port reference
+ */
+
+u32 tipc_createport_raw(void *usr_handle,
+			u32 (*dispatcher)(struct tipc_port *, struct sk_buff *),
+			void (*wakeup)(struct tipc_port *),
+			const u32 importance)
+{
+	struct port *p_ptr;
+	struct tipc_msg *msg;
+	u32 ref;
+
+	p_ptr = kmalloc(sizeof(*p_ptr), GFP_ATOMIC);
+	if (p_ptr == NULL) {
+		warn("Memory squeeze; failed to create port\n");
+		return 0;
+	}
+	memset(p_ptr, 0, sizeof(*p_ptr));
+	ref = ref_acquire(p_ptr, &p_ptr->publ.lock);
+	if (!ref) {
+		warn("Reference Table Exhausted\n");
+		kfree(p_ptr);
+		return 0;
+	}
+
+	port_lock(ref);
+	p_ptr->publ.ref = ref;
+	msg = &p_ptr->publ.phdr;
+	msg_init(msg, DATA_LOW, TIPC_NAMED_MSG, TIPC_OK, LONG_H_SIZE, 0);
+	msg_set_orignode(msg, tipc_own_addr);
+	msg_set_prevnode(msg, tipc_own_addr);
+	msg_set_origport(msg, ref);
+	msg_set_importance(msg,importance);
+	p_ptr->last_in_seqno = 41;
+	p_ptr->sent = 1;
+	p_ptr->publ.usr_handle = usr_handle;
+	INIT_LIST_HEAD(&p_ptr->wait_list);
+	INIT_LIST_HEAD(&p_ptr->subscription.nodesub_list);
+	p_ptr->congested_link = 0;
+	p_ptr->max_pkt = MAX_PKT_DEFAULT;
+	p_ptr->dispatcher = dispatcher;
+	p_ptr->wakeup = wakeup;
+	p_ptr->user_port = 0;
+	k_init_timer(&p_ptr->timer, (Handler)port_timeout, ref);
+	spin_lock_bh(&port_list_lock);
+	INIT_LIST_HEAD(&p_ptr->publications);
+	INIT_LIST_HEAD(&p_ptr->port_list);
+	list_add_tail(&p_ptr->port_list, &ports);
+	spin_unlock_bh(&port_list_lock);
+	port_unlock(p_ptr);
+	return ref;
+}
+
+int tipc_deleteport(u32 ref)
+{
+	struct port *p_ptr;
+	struct sk_buff *buf = 0;
+
+	tipc_withdraw(ref, 0, 0);
+	p_ptr = port_lock(ref);
+	if (!p_ptr) 
+		return -EINVAL;
+
+	ref_discard(ref);
+	port_unlock(p_ptr);
+
+	k_cancel_timer(&p_ptr->timer);
+	if (p_ptr->publ.connected) {
+		buf = port_build_peer_abort_msg(p_ptr, TIPC_ERR_NO_PORT);
+		nodesub_unsubscribe(&p_ptr->subscription);
+	}
+	if (p_ptr->user_port) {
+		reg_remove_port(p_ptr->user_port);
+		kfree(p_ptr->user_port);
+	}
+
+	spin_lock_bh(&port_list_lock);
+	list_del(&p_ptr->port_list);
+	list_del(&p_ptr->wait_list);
+	spin_unlock_bh(&port_list_lock);
+	k_term_timer(&p_ptr->timer);
+	kfree(p_ptr);
+	dbg("Deleted port %u\n", ref);
+	net_route_msg(buf);
+	return TIPC_OK;
+}
+
+/**
+ * tipc_get_port() - return port associated with 'ref'
+ * 
+ * Note: Port is not locked.
+ */
+
+struct tipc_port *tipc_get_port(const u32 ref)
+{
+	return (struct tipc_port *)ref_deref(ref);
+}
+
+/**
+ * tipc_get_handle - return user handle associated to port 'ref'
+ */
+
+void *tipc_get_handle(const u32 ref)
+{
+	struct port *p_ptr;
+	void * handle;
+
+	p_ptr = port_lock(ref);
+	if (!p_ptr)
+		return 0;
+	handle = p_ptr->publ.usr_handle;
+	port_unlock(p_ptr);
+	return handle;
+}
+
+static inline int port_unreliable(struct port *p_ptr)
+{
+	return msg_src_droppable(&p_ptr->publ.phdr);
+}
+
+int tipc_portunreliable(u32 ref, unsigned int *isunreliable)
+{
+	struct port *p_ptr;
+	
+	p_ptr = port_lock(ref);
+	if (!p_ptr)
+		return -EINVAL;
+	*isunreliable = port_unreliable(p_ptr);
+	spin_unlock_bh(p_ptr->publ.lock);
+	return TIPC_OK;
+}
+
+int tipc_set_portunreliable(u32 ref, unsigned int isunreliable)
+{
+	struct port *p_ptr;
+	
+	p_ptr = port_lock(ref);
+	if (!p_ptr)
+		return -EINVAL;
+	msg_set_src_droppable(&p_ptr->publ.phdr, (isunreliable != 0));
+	port_unlock(p_ptr);
+	return TIPC_OK;
+}
+
+static inline int port_unreturnable(struct port *p_ptr)
+{
+	return msg_dest_droppable(&p_ptr->publ.phdr);
+}
+
+int tipc_portunreturnable(u32 ref, unsigned int *isunrejectable)
+{
+	struct port *p_ptr;
+	
+	p_ptr = port_lock(ref);
+	if (!p_ptr)
+		return -EINVAL;
+	*isunrejectable = port_unreturnable(p_ptr);
+	spin_unlock_bh(p_ptr->publ.lock);
+	return TIPC_OK;
+}
+
+int tipc_set_portunreturnable(u32 ref, unsigned int isunrejectable)
+{
+	struct port *p_ptr;
+	
+	p_ptr = port_lock(ref);
+	if (!p_ptr)
+		return -EINVAL;
+	msg_set_dest_droppable(&p_ptr->publ.phdr, (isunrejectable != 0));
+	port_unlock(p_ptr);
+	return TIPC_OK;
+}
+
+/* 
+ * port_build_proto_msg(): build a port level protocol 
+ * or a connection abortion message. Called with 
+ * tipc_port lock on.
+ */
+static struct sk_buff *port_build_proto_msg(u32 destport, u32 destnode,
+					    u32 origport, u32 orignode,
+					    u32 usr, u32 type, u32 err, 
+					    u32 seqno, u32 ack)
+{
+	struct sk_buff *buf;
+	struct tipc_msg *msg;
+	
+	buf = buf_acquire(LONG_H_SIZE);
+	if (buf) {
+		msg = buf_msg(buf);
+		msg_init(msg, usr, type, err, LONG_H_SIZE, destnode);
+		msg_set_destport(msg, destport);
+		msg_set_origport(msg, origport);
+		msg_set_destnode(msg, destnode);
+		msg_set_orignode(msg, orignode);
+		msg_set_transp_seqno(msg, seqno);
+		msg_set_msgcnt(msg, ack);
+		msg_dbg(msg, "PORT>SEND>:");
+	}
+	return buf;
+}
+
+int tipc_set_msg_option(struct tipc_port *tp_ptr, const char *opt, const u32 sz)
+{
+	msg_expand(&tp_ptr->phdr, msg_destnode(&tp_ptr->phdr));
+	msg_set_options(&tp_ptr->phdr, opt, sz);
+	return TIPC_OK;
+}
+
+int tipc_reject_msg(struct sk_buff *buf, u32 err)
+{
+	struct tipc_msg *msg = buf_msg(buf);
+	struct sk_buff *rbuf;
+	struct tipc_msg *rmsg;
+	int hdr_sz;
+	u32 imp = msg_importance(msg);
+	u32 data_sz = msg_data_sz(msg);
+
+	if (data_sz > MAX_REJECT_SIZE)
+		data_sz = MAX_REJECT_SIZE;
+	if (msg_connected(msg) && (imp < TIPC_CRITICAL_IMPORTANCE))
+		imp++;
+	msg_dbg(msg, "port->rej: ");
+
+	/* discard rejected message if it shouldn't be returned to sender */
+	if (msg_errcode(msg) || msg_dest_droppable(msg)) {
+		buf_discard(buf);
+		return data_sz;
+	}
+
+	/* construct rejected message */
+	if (msg_mcast(msg))
+		hdr_sz = MCAST_H_SIZE;
+	else
+		hdr_sz = LONG_H_SIZE;
+	rbuf = buf_acquire(data_sz + hdr_sz);
+	if (rbuf == NULL) {
+		buf_discard(buf);
+		return data_sz;
+	}
+	rmsg = buf_msg(rbuf);
+	msg_init(rmsg, imp, msg_type(msg), err, hdr_sz, msg_orignode(msg));
+	msg_set_destport(rmsg, msg_origport(msg));
+	msg_set_prevnode(rmsg, tipc_own_addr);
+	msg_set_origport(rmsg, msg_destport(msg));
+	if (msg_short(msg))
+		msg_set_orignode(rmsg, tipc_own_addr);
+	else
+		msg_set_orignode(rmsg, msg_destnode(msg));
+	msg_set_size(rmsg, data_sz + hdr_sz); 
+	msg_set_nametype(rmsg, msg_nametype(msg));
+	msg_set_nameinst(rmsg, msg_nameinst(msg));
+	memcpy(rbuf->data + hdr_sz, msg_data(msg), data_sz);
+
+	/* send self-abort message when rejecting on a connected port */
+	if (msg_connected(msg)) {
+		struct sk_buff *abuf = 0;
+		struct port *p_ptr = port_lock(msg_destport(msg));
+
+		if (p_ptr) {
+			if (p_ptr->publ.connected)
+				abuf = port_build_self_abort_msg(p_ptr, err);
+			port_unlock(p_ptr);
+		}
+		net_route_msg(abuf);
+	}
+
+	/* send rejected message */
+	buf_discard(buf);
+	net_route_msg(rbuf);
+	return data_sz;
+}
+
+int port_reject_sections(struct port *p_ptr, struct tipc_msg *hdr,
+			 struct iovec const *msg_sect, u32 num_sect,
+			 int err)
+{
+	struct sk_buff *buf;
+	int res;
+
+	res = msg_build(hdr, msg_sect, num_sect, MAX_MSG_SIZE, 
+			!p_ptr->user_port, &buf);
+	if (!buf)
+		return res;
+
+	return tipc_reject_msg(buf, err);
+}
+
+static void port_timeout(unsigned long ref)
+{
+	struct port *p_ptr = port_lock(ref);
+	struct sk_buff *buf = 0;
+
+	if (!p_ptr || !p_ptr->publ.connected)
+		return;
+
+	/* Last probe answered ? */
+	if (p_ptr->probing_state == PROBING) {
+		buf = port_build_self_abort_msg(p_ptr, TIPC_ERR_NO_PORT);
+	} else {
+		buf = port_build_proto_msg(port_peerport(p_ptr),
+					   port_peernode(p_ptr),
+					   p_ptr->publ.ref,
+					   tipc_own_addr,
+					   CONN_MANAGER,
+					   CONN_PROBE,
+					   TIPC_OK, 
+					   port_out_seqno(p_ptr),
+					   0);
+		port_incr_out_seqno(p_ptr);
+		p_ptr->probing_state = PROBING;
+		k_start_timer(&p_ptr->timer, p_ptr->probing_interval);
+	}
+	port_unlock(p_ptr);
+	net_route_msg(buf);
+}
+
+
+static void port_handle_node_down(unsigned long ref)
+{
+	struct port *p_ptr = port_lock(ref);
+	struct sk_buff* buf = 0;
+
+	if (!p_ptr)
+		return;
+	buf = port_build_self_abort_msg(p_ptr, TIPC_ERR_NO_NODE);
+	port_unlock(p_ptr);
+	net_route_msg(buf);
+}
+
+
+static struct sk_buff *port_build_self_abort_msg(struct port *p_ptr, u32 err)
+{
+	u32 imp = msg_importance(&p_ptr->publ.phdr);
+
+	if (!p_ptr->publ.connected)
+		return 0;
+	if (imp < TIPC_CRITICAL_IMPORTANCE)
+		imp++;
+	return port_build_proto_msg(p_ptr->publ.ref,
+				    tipc_own_addr,
+				    port_peerport(p_ptr),
+				    port_peernode(p_ptr),
+				    imp,
+				    TIPC_CONN_MSG,
+				    err, 
+				    p_ptr->last_in_seqno + 1,
+				    0);
+}
+
+
+static struct sk_buff *port_build_peer_abort_msg(struct port *p_ptr, u32 err)
+{
+	u32 imp = msg_importance(&p_ptr->publ.phdr);
+
+	if (!p_ptr->publ.connected)
+		return 0;
+	if (imp < TIPC_CRITICAL_IMPORTANCE)
+		imp++;
+	return port_build_proto_msg(port_peerport(p_ptr),
+				    port_peernode(p_ptr),
+				    p_ptr->publ.ref,
+				    tipc_own_addr,
+				    imp,
+				    TIPC_CONN_MSG,
+				    err, 
+				    port_out_seqno(p_ptr),
+				    0);
+}
+
+void port_recv_proto_msg(struct sk_buff *buf)
+{
+	struct tipc_msg *msg = buf_msg(buf);
+	struct port *p_ptr = port_lock(msg_destport(msg));
+	u32 err = TIPC_OK;
+	struct sk_buff *r_buf = 0;
+	struct sk_buff *abort_buf = 0;
+
+	msg_dbg(msg, "PORT<RECV<:");
+
+	if (!p_ptr) {
+		err = TIPC_ERR_NO_PORT;
+	} else if (p_ptr->publ.connected) {
+		if (port_peernode(p_ptr) != msg_orignode(msg))
+			err = TIPC_ERR_NO_PORT;
+		if (port_peerport(p_ptr) != msg_origport(msg))
+			err = TIPC_ERR_NO_PORT;
+		if (!err && msg_routed(msg)) {
+			u32 seqno = msg_transp_seqno(msg);
+			u32 myno =  ++p_ptr->last_in_seqno;
+			if (seqno != myno) {
+				err = TIPC_ERR_NO_PORT;
+				abort_buf = port_build_self_abort_msg(p_ptr, err);
+			}
+		}
+		if (msg_type(msg) == CONN_ACK) {
+			int wakeup = port_congested(p_ptr) && 
+				     p_ptr->publ.congested &&
+				     p_ptr->wakeup;
+			p_ptr->acked += msg_msgcnt(msg);
+			if (port_congested(p_ptr))
+				goto exit;
+			p_ptr->publ.congested = 0;
+			if (!wakeup)
+				goto exit;
+			p_ptr->wakeup(&p_ptr->publ);
+			goto exit;
+		}
+	} else if (p_ptr->publ.published) {
+		err = TIPC_ERR_NO_PORT;
+	}
+	if (err) {
+		r_buf = port_build_proto_msg(msg_origport(msg),
+					     msg_orignode(msg), 
+					     msg_destport(msg), 
+					     tipc_own_addr,
+					     DATA_HIGH,
+					     TIPC_CONN_MSG,
+					     err,
+					     0,
+					     0);
+		goto exit;
+	}
+
+	/* All is fine */
+	if (msg_type(msg) == CONN_PROBE) {
+		r_buf = port_build_proto_msg(msg_origport(msg), 
+					     msg_orignode(msg), 
+					     msg_destport(msg), 
+					     tipc_own_addr, 
+					     CONN_MANAGER,
+					     CONN_PROBE_REPLY,
+					     TIPC_OK,
+					     port_out_seqno(p_ptr),
+					     0);
+	}
+	p_ptr->probing_state = CONFIRMED;
+	port_incr_out_seqno(p_ptr);
+exit:
+	if (p_ptr)
+		port_unlock(p_ptr);
+	net_route_msg(r_buf);
+	net_route_msg(abort_buf);
+	buf_discard(buf);
+}
+
+static void port_print(struct port *p_ptr, struct print_buf *buf, int full_id)
+{
+        struct publication *publ;
+
+	if (full_id)
+		tipc_printf(buf, "<%u.%u.%u:%u>:", 
+			    tipc_zone(tipc_own_addr), tipc_cluster(tipc_own_addr),
+                            tipc_node(tipc_own_addr), p_ptr->publ.ref);
+	else
+		tipc_printf(buf, "%-10u:", p_ptr->publ.ref);
+
+        if (p_ptr->publ.connected) {
+                u32 dport = port_peerport(p_ptr);
+                u32 destnode = port_peernode(p_ptr);
+
+                tipc_printf(buf, " connected to <%u.%u.%u:%u>",
+                            tipc_zone(destnode), tipc_cluster(destnode),
+                            tipc_node(destnode), dport);
+                if (p_ptr->publ.conn_type != 0)
+                        tipc_printf(buf, " via {%u,%u}",
+                                    p_ptr->publ.conn_type,
+                                    p_ptr->publ.conn_instance);
+        }
+        else if (p_ptr->publ.published) {
+                tipc_printf(buf, " bound to");
+                list_for_each_entry(publ, &p_ptr->publications, pport_list) {
+			if (publ->lower == publ->upper)
+				tipc_printf(buf, " {%u,%u}", publ->type,
+					    publ->lower);
+			else
+				tipc_printf(buf, " {%u,%u,%u}", publ->type, 
+					    publ->lower, publ->upper);
+                }
+        }
+        tipc_printf(buf, "\n");
+}
+
+#define MAX_PORT_QUERY 32768
+
+struct sk_buff *port_get_ports(void)
+{
+	struct sk_buff *buf;
+	struct tlv_desc *rep_tlv;
+	struct print_buf pb;
+	struct port *p_ptr;
+	int str_len;
+
+	buf = cfg_reply_alloc(TLV_SPACE(MAX_PORT_QUERY));
+	if (!buf)
+		return NULL;
+	rep_tlv = (struct tlv_desc *)buf->data;
+
+	printbuf_init(&pb, TLV_DATA(rep_tlv), MAX_PORT_QUERY);
+	spin_lock_bh(&port_list_lock);
+	list_for_each_entry(p_ptr, &ports, port_list) {
+		spin_lock_bh(p_ptr->publ.lock);
+		port_print(p_ptr, &pb, 0);
+		spin_unlock_bh(p_ptr->publ.lock);
+	}
+	spin_unlock_bh(&port_list_lock);
+	str_len = printbuf_validate(&pb);
+
+	skb_put(buf, TLV_SPACE(str_len));
+	TLV_SET(rep_tlv, TIPC_TLV_ULTRA_STRING, NULL, str_len);
+
+	return buf;
+}
+
+#if 0
+
+#define MAX_PORT_STATS 2000
+
+struct sk_buff *port_show_stats(const void *req_tlv_area, int req_tlv_space)
+{
+	u32 ref;
+	struct port *p_ptr;
+	struct sk_buff *buf;
+	struct tlv_desc *rep_tlv;
+	struct print_buf pb;
+	int str_len;
+
+	if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_PORT_REF))
+		return cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
+
+	ref = *(u32 *)TLV_DATA(req_tlv_area);
+	ref = ntohl(ref);
+
+	p_ptr = port_lock(ref);
+	if (!p_ptr)
+		return cfg_reply_error_string("port not found");
+
+	buf = cfg_reply_alloc(TLV_SPACE(MAX_PORT_STATS));
+	if (!buf) {
+		port_unlock(p_ptr);
+		return NULL;
+	}
+	rep_tlv = (struct tlv_desc *)buf->data;
+
+	printbuf_init(&pb, TLV_DATA(rep_tlv), MAX_PORT_STATS);
+	port_print(p_ptr, &pb, 1);
+	/* NEED TO FILL IN ADDITIONAL PORT STATISTICS HERE */
+	port_unlock(p_ptr);
+	str_len = printbuf_validate(&pb);
+
+	skb_put(buf, TLV_SPACE(str_len));
+	TLV_SET(rep_tlv, TIPC_TLV_ULTRA_STRING, NULL, str_len);
+
+	return buf;
+}
+
+#endif
+
+void port_reinit(void)
+{
+	struct port *p_ptr;
+	struct tipc_msg *msg;
+
+	spin_lock_bh(&port_list_lock);
+	list_for_each_entry(p_ptr, &ports, port_list) {
+		msg = &p_ptr->publ.phdr;
+		if (msg_orignode(msg) == tipc_own_addr)
+			break;
+		msg_set_orignode(msg, tipc_own_addr);
+	}
+	spin_unlock_bh(&port_list_lock);
+}
+
+
+/*
+ *  port_dispatcher_sigh(): Signal handler for messages destinated
+ *                          to the tipc_port interface.
+ */
+
+static void port_dispatcher_sigh(void *dummy)
+{
+	struct sk_buff *buf;
+
+	spin_lock_bh(&queue_lock);
+	buf = msg_queue_head;
+	msg_queue_head = 0;
+	spin_unlock_bh(&queue_lock);
+
+	while (buf) {
+		struct port *p_ptr;
+		struct user_port *up_ptr;
+		struct tipc_portid orig;
+		struct tipc_name_seq dseq;
+		void *usr_handle;
+		int connected;
+		int published;
+
+		struct sk_buff *next = buf->next;
+		struct tipc_msg *msg = buf_msg(buf);
+		u32 dref = msg_destport(msg);
+		
+		p_ptr = port_lock(dref);
+		if (!p_ptr) {
+			/* Port deleted while msg in queue */
+			tipc_reject_msg(buf, TIPC_ERR_NO_PORT);
+			buf = next;
+			continue;
+		}
+		orig.ref = msg_origport(msg);
+		orig.node = msg_orignode(msg);
+		up_ptr = p_ptr->user_port;
+		usr_handle = up_ptr->usr_handle;
+		connected = p_ptr->publ.connected;
+		published = p_ptr->publ.published;
+
+		if (unlikely(msg_errcode(msg)))
+			goto err;
+
+		switch (msg_type(msg)) {
+		
+		case TIPC_CONN_MSG:{
+				tipc_conn_msg_event cb = up_ptr->conn_msg_cb;
+				u32 peer_port = port_peerport(p_ptr);
+				u32 peer_node = port_peernode(p_ptr);
+
+				spin_unlock_bh(p_ptr->publ.lock);
+				if (unlikely(!connected)) {
+					if (unlikely(published))
+						goto reject;
+					tipc_connect2port(dref,&orig);
+				}
+				if (unlikely(msg_origport(msg) != peer_port))
+					goto reject;
+				if (unlikely(msg_orignode(msg) != peer_node))
+					goto reject;
+				if (unlikely(!cb))
+					goto reject;
+				if (unlikely(++p_ptr->publ.conn_unacked >= 
+					     TIPC_FLOW_CONTROL_WIN))
+					tipc_acknowledge(dref, 
+							 p_ptr->publ.conn_unacked);
+				skb_pull(buf, msg_hdr_sz(msg));
+				cb(usr_handle, dref, &buf, msg_data(msg),
+				   msg_data_sz(msg));
+				break;
+			}
+		case TIPC_DIRECT_MSG:{
+				tipc_msg_event cb = up_ptr->msg_cb;
+
+				spin_unlock_bh(p_ptr->publ.lock);
+				if (unlikely(connected))
+					goto reject;
+				if (unlikely(!cb))
+					goto reject;
+				skb_pull(buf, msg_hdr_sz(msg));
+				cb(usr_handle, dref, &buf, msg_data(msg), 
+				   msg_data_sz(msg), msg_importance(msg),
+				   &orig);
+				break;
+			}
+		case TIPC_NAMED_MSG:{
+				tipc_named_msg_event cb = up_ptr->named_msg_cb;
+
+				spin_unlock_bh(p_ptr->publ.lock);
+				if (unlikely(connected))
+					goto reject;
+				if (unlikely(!cb))
+					goto reject;
+				if (unlikely(!published))
+					goto reject;
+				dseq.type =  msg_nametype(msg);
+				dseq.lower = msg_nameinst(msg);
+				dseq.upper = dseq.lower;
+				skb_pull(buf, msg_hdr_sz(msg));
+				cb(usr_handle, dref, &buf, msg_data(msg), 
+				   msg_data_sz(msg), msg_importance(msg),
+				   &orig, &dseq);
+				break;
+			}
+		}
+		if (buf)
+			buf_discard(buf);
+		buf = next;
+		continue;
+err:
+		switch (msg_type(msg)) {
+		
+		case TIPC_CONN_MSG:{
+				tipc_conn_shutdown_event cb = 
+					up_ptr->conn_err_cb;
+				u32 peer_port = port_peerport(p_ptr);
+				u32 peer_node = port_peernode(p_ptr);
+
+				spin_unlock_bh(p_ptr->publ.lock);
+				if (!connected || !cb)
+					break;
+				if (msg_origport(msg) != peer_port)
+					break;
+				if (msg_orignode(msg) != peer_node)
+					break;
+				tipc_disconnect(dref);
+				skb_pull(buf, msg_hdr_sz(msg));
+				cb(usr_handle, dref, &buf, msg_data(msg),
+				   msg_data_sz(msg), msg_errcode(msg));
+				break;
+			}
+		case TIPC_DIRECT_MSG:{
+				tipc_msg_err_event cb = up_ptr->err_cb;
+
+				spin_unlock_bh(p_ptr->publ.lock);
+				if (connected || !cb)
+					break;
+				skb_pull(buf, msg_hdr_sz(msg));
+				cb(usr_handle, dref, &buf, msg_data(msg),
+				   msg_data_sz(msg), msg_errcode(msg), &orig);
+				break;
+			}
+		case TIPC_NAMED_MSG:{
+				tipc_named_msg_err_event cb = 
+					up_ptr->named_err_cb;
+
+				spin_unlock_bh(p_ptr->publ.lock);
+				if (connected || !cb)
+					break;
+				dseq.type =  msg_nametype(msg);
+				dseq.lower = msg_nameinst(msg);
+				dseq.upper = dseq.lower;
+				skb_pull(buf, msg_hdr_sz(msg));
+				cb(usr_handle, dref, &buf, msg_data(msg), 
+				   msg_data_sz(msg), msg_errcode(msg), &dseq);
+				break;
+			}
+		}
+		if (buf)
+			buf_discard(buf);
+		buf = next;
+		continue;
+reject:
+		tipc_reject_msg(buf, TIPC_ERR_NO_PORT);
+		buf = next;
+	}
+}
+
+/*
+ *  port_dispatcher(): Dispatcher for messages destinated
+ *  to the tipc_port interface. Called with port locked.
+ */
+
+static u32 port_dispatcher(struct tipc_port *dummy, struct sk_buff *buf)
+{
+	buf->next = NULL;
+	spin_lock_bh(&queue_lock);
+	if (msg_queue_head) {
+		msg_queue_tail->next = buf;
+		msg_queue_tail = buf;
+	} else {
+		msg_queue_tail = msg_queue_head = buf;
+		k_signal((Handler)port_dispatcher_sigh, 0);
+	}
+	spin_unlock_bh(&queue_lock);
+	return TIPC_OK;
+}
+
+/* 
+ * Wake up port after congestion: Called with port locked,
+ *                                
+ */
+
+static void port_wakeup_sh(unsigned long ref)
+{
+	struct port *p_ptr;
+	struct user_port *up_ptr;
+	tipc_continue_event cb = 0;
+	void *uh = 0;
+
+	p_ptr = port_lock(ref);
+	if (p_ptr) {
+		up_ptr = p_ptr->user_port;
+		if (up_ptr) {
+			cb = up_ptr->continue_event_cb;
+			uh = up_ptr->usr_handle;
+		}
+		port_unlock(p_ptr);
+	}
+	if (cb)
+		cb(uh, ref);
+}
+
+
+static void port_wakeup(struct tipc_port *p_ptr)
+{
+	k_signal((Handler)port_wakeup_sh, p_ptr->ref);
+}
+
+void tipc_acknowledge(u32 ref, u32 ack)
+{
+	struct port *p_ptr;
+	struct sk_buff *buf = 0;
+
+	p_ptr = port_lock(ref);
+	if (!p_ptr)
+		return;
+	if (p_ptr->publ.connected) {
+		p_ptr->publ.conn_unacked -= ack;
+		buf = port_build_proto_msg(port_peerport(p_ptr),
+					   port_peernode(p_ptr),
+					   ref,
+					   tipc_own_addr,
+					   CONN_MANAGER,
+					   CONN_ACK,
+					   TIPC_OK, 
+					   port_out_seqno(p_ptr),
+					   ack);
+	}
+	port_unlock(p_ptr);
+	net_route_msg(buf);
+}
+
+/*
+ * tipc_createport(): user level call. Will add port to
+ *                    registry if non-zero user_ref.
+ */
+
+int tipc_createport(u32 user_ref, 
+		    void *usr_handle, 
+		    unsigned int importance, 
+		    tipc_msg_err_event error_cb, 
+		    tipc_named_msg_err_event named_error_cb, 
+		    tipc_conn_shutdown_event conn_error_cb, 
+		    tipc_msg_event msg_cb, 
+		    tipc_named_msg_event named_msg_cb, 
+		    tipc_conn_msg_event conn_msg_cb, 
+		    tipc_continue_event continue_event_cb,/* May be zero */
+		    u32 *portref)
+{
+	struct user_port *up_ptr;
+	struct port *p_ptr; 
+	u32 ref;
+
+	up_ptr = (struct user_port *)kmalloc(sizeof(*up_ptr), GFP_ATOMIC);
+	if (up_ptr == NULL) {
+		return -ENOMEM;
+	}
+	ref = tipc_createport_raw(0, port_dispatcher, port_wakeup, importance);
+	p_ptr = port_lock(ref);
+	if (!p_ptr) {
+		kfree(up_ptr);
+		return -ENOMEM;
+	}
+
+	p_ptr->user_port = up_ptr;
+	up_ptr->user_ref = user_ref;
+	up_ptr->usr_handle = usr_handle;
+	up_ptr->ref = p_ptr->publ.ref;
+	up_ptr->err_cb = error_cb;
+	up_ptr->named_err_cb = named_error_cb;
+	up_ptr->conn_err_cb = conn_error_cb;
+	up_ptr->msg_cb = msg_cb;
+	up_ptr->named_msg_cb = named_msg_cb;
+	up_ptr->conn_msg_cb = conn_msg_cb;
+	up_ptr->continue_event_cb = continue_event_cb;
+	INIT_LIST_HEAD(&up_ptr->uport_list);
+	reg_add_port(up_ptr);
+	*portref = p_ptr->publ.ref;
+	dbg(" tipc_createport: %x with ref %u\n", p_ptr, p_ptr->publ.ref);        
+	port_unlock(p_ptr);
+	return TIPC_OK;
+}
+
+int tipc_ownidentity(u32 ref, struct tipc_portid *id)
+{
+	id->ref = ref;
+	id->node = tipc_own_addr;
+	return TIPC_OK;
+}
+
+int tipc_portimportance(u32 ref, unsigned int *importance)
+{
+	struct port *p_ptr;
+	
+	p_ptr = port_lock(ref);
+	if (!p_ptr)
+		return -EINVAL;
+	*importance = (unsigned int)msg_importance(&p_ptr->publ.phdr);
+	spin_unlock_bh(p_ptr->publ.lock);
+	return TIPC_OK;
+}
+
+int tipc_set_portimportance(u32 ref, unsigned int imp)
+{
+	struct port *p_ptr;
+
+	if (imp > TIPC_CRITICAL_IMPORTANCE)
+		return -EINVAL;
+
+	p_ptr = port_lock(ref);
+	if (!p_ptr)
+		return -EINVAL;
+	msg_set_importance(&p_ptr->publ.phdr, (u32)imp);
+	spin_unlock_bh(p_ptr->publ.lock);
+	return TIPC_OK;
+}
+
+
+int tipc_publish(u32 ref, unsigned int scope, struct tipc_name_seq const *seq)
+{
+	struct port *p_ptr;
+	struct publication *publ;
+	u32 key;
+	int res = -EINVAL;
+
+	p_ptr = port_lock(ref);
+	dbg("tipc_publ %u, p_ptr = %x, conn = %x, scope = %x, "
+	    "lower = %u, upper = %u\n",
+	    ref, p_ptr, p_ptr->publ.connected, scope, seq->lower, seq->upper);
+	if (!p_ptr)
+		return -EINVAL;
+	if (p_ptr->publ.connected)
+		goto exit;
+	if (seq->lower > seq->upper)
+		goto exit;
+	if ((scope < TIPC_ZONE_SCOPE) || (scope > TIPC_NODE_SCOPE))
+		goto exit;
+	key = ref + p_ptr->pub_count + 1;
+	if (key == ref) {
+		res = -EADDRINUSE;
+		goto exit;
+	}
+	publ = nametbl_publish(seq->type, seq->lower, seq->upper,
+			       scope, p_ptr->publ.ref, key);
+	if (publ) {
+		list_add(&publ->pport_list, &p_ptr->publications);
+		p_ptr->pub_count++;
+		p_ptr->publ.published = 1;
+		res = TIPC_OK;
+	}
+exit:
+	port_unlock(p_ptr);
+	return res;
+}
+
+int tipc_withdraw(u32 ref, unsigned int scope, struct tipc_name_seq const *seq)
+{
+	struct port *p_ptr;
+	struct publication *publ;
+	struct publication *tpubl;
+	int res = -EINVAL;
+	
+	p_ptr = port_lock(ref);
+	if (!p_ptr)
+		return -EINVAL;
+	if (!p_ptr->publ.published)
+		goto exit;
+	if (!seq) {
+		list_for_each_entry_safe(publ, tpubl, 
+					 &p_ptr->publications, pport_list) {
+			nametbl_withdraw(publ->type, publ->lower, 
+					 publ->ref, publ->key);
+		}
+		res = TIPC_OK;
+	} else {
+		list_for_each_entry_safe(publ, tpubl, 
+					 &p_ptr->publications, pport_list) {
+			if (publ->scope != scope)
+				continue;
+			if (publ->type != seq->type)
+				continue;
+			if (publ->lower != seq->lower)
+				continue;
+			if (publ->upper != seq->upper)
+				break;
+			nametbl_withdraw(publ->type, publ->lower, 
+					 publ->ref, publ->key);
+			res = TIPC_OK;
+			break;
+		}
+	}
+	if (list_empty(&p_ptr->publications))
+		p_ptr->publ.published = 0;
+exit:
+	port_unlock(p_ptr);
+	return res;
+}
+
+int tipc_connect2port(u32 ref, struct tipc_portid const *peer)
+{
+	struct port *p_ptr;
+	struct tipc_msg *msg;
+	int res = -EINVAL;
+
+	p_ptr = port_lock(ref);
+	if (!p_ptr)
+		return -EINVAL;
+	if (p_ptr->publ.published || p_ptr->publ.connected)
+		goto exit;
+	if (!peer->ref)
+		goto exit;
+
+	msg = &p_ptr->publ.phdr;
+	msg_set_destnode(msg, peer->node);
+	msg_set_destport(msg, peer->ref);
+	msg_set_orignode(msg, tipc_own_addr);
+	msg_set_origport(msg, p_ptr->publ.ref);
+	msg_set_transp_seqno(msg, 42);
+	msg_set_type(msg, TIPC_CONN_MSG);
+	if (!may_route(peer->node))
+		msg_set_hdr_sz(msg, SHORT_H_SIZE);
+	else
+		msg_set_hdr_sz(msg, LONG_H_SIZE);
+
+	p_ptr->probing_interval = PROBING_INTERVAL;
+	p_ptr->probing_state = CONFIRMED;
+	p_ptr->publ.connected = 1;
+	k_start_timer(&p_ptr->timer, p_ptr->probing_interval);
+
+	nodesub_subscribe(&p_ptr->subscription,peer->node, (void *)ref,
+			  (net_ev_handler)port_handle_node_down);
+	res = TIPC_OK;
+exit:
+	port_unlock(p_ptr);
+	p_ptr->max_pkt = link_get_max_pkt(peer->node, ref);
+	return res;
+}
+
+/*
+ * tipc_disconnect(): Disconnect port form peer.
+ *                    This is a node local operation.
+ */
+
+int tipc_disconnect(u32 ref)
+{
+	struct port *p_ptr;
+	int res = -ENOTCONN;
+
+	p_ptr = port_lock(ref);
+	if (!p_ptr)
+		return -EINVAL;
+	if (p_ptr->publ.connected) {
+		p_ptr->publ.connected = 0;
+		/* let timer expire on it's own to avoid deadlock! */
+		nodesub_unsubscribe(&p_ptr->subscription);
+		res = TIPC_OK;
+	}
+	port_unlock(p_ptr);
+	return res;
+}
+
+/*
+ * tipc_shutdown(): Send a SHUTDOWN msg to peer and disconnect
+ */
+int tipc_shutdown(u32 ref)
+{
+	struct port *p_ptr;
+	struct sk_buff *buf = 0;
+
+	p_ptr = port_lock(ref);
+	if (!p_ptr)
+		return -EINVAL;
+
+	if (p_ptr->publ.connected) {
+		u32 imp = msg_importance(&p_ptr->publ.phdr);
+		if (imp < TIPC_CRITICAL_IMPORTANCE)
+			imp++;
+		buf = port_build_proto_msg(port_peerport(p_ptr),
+					   port_peernode(p_ptr),
+					   ref,
+					   tipc_own_addr,
+					   imp,
+					   TIPC_CONN_MSG,
+					   TIPC_CONN_SHUTDOWN, 
+					   port_out_seqno(p_ptr),
+					   0);
+	}
+	port_unlock(p_ptr);
+	net_route_msg(buf);
+	return tipc_disconnect(ref);
+}
+
+int tipc_isconnected(u32 ref, int *isconnected)
+{
+	struct port *p_ptr;
+	
+	p_ptr = port_lock(ref);
+	if (!p_ptr)
+		return -EINVAL;
+	*isconnected = p_ptr->publ.connected;
+	port_unlock(p_ptr);
+	return TIPC_OK;
+}
+
+int tipc_peer(u32 ref, struct tipc_portid *peer)
+{
+	struct port *p_ptr;
+	int res;
+	 
+	p_ptr = port_lock(ref);
+	if (!p_ptr)
+		return -EINVAL;
+	if (p_ptr->publ.connected) {
+		peer->ref = port_peerport(p_ptr);
+		peer->node = port_peernode(p_ptr);
+		res = TIPC_OK;
+	} else
+		res = -ENOTCONN;
+	port_unlock(p_ptr);
+	return res;
+}
+
+int tipc_ref_valid(u32 ref)
+{
+	/* Works irrespective of type */
+	return !!ref_deref(ref);
+}
+
+
+/*
+ *  port_recv_sections(): Concatenate and deliver sectioned
+ *                        message for this node.
+ */
+
+int port_recv_sections(struct port *sender, unsigned int num_sect,
+		       struct iovec const *msg_sect)
+{
+	struct sk_buff *buf;
+	int res;
+	 
+	res = msg_build(&sender->publ.phdr, msg_sect, num_sect,
+			MAX_MSG_SIZE, !sender->user_port, &buf);
+	if (likely(buf))
+		port_recv_msg(buf);
+	return res;
+}
+
+/**
+ * tipc_send - send message sections on connection
+ */
+
+int tipc_send(u32 ref, unsigned int num_sect, struct iovec const *msg_sect)
+{
+	struct port *p_ptr;
+	u32 destnode;
+	int res;
+
+	p_ptr = port_deref(ref);
+	if (!p_ptr || !p_ptr->publ.connected)
+		return -EINVAL;
+
+	p_ptr->publ.congested = 1;
+	if (!port_congested(p_ptr)) {
+		destnode = port_peernode(p_ptr);
+		if (likely(destnode != tipc_own_addr))
+			res = link_send_sections_fast(p_ptr, msg_sect, num_sect,
+						      destnode);
+		else
+			res = port_recv_sections(p_ptr, num_sect, msg_sect);
+
+		if (likely(res != -ELINKCONG)) {
+			port_incr_out_seqno(p_ptr);
+			p_ptr->publ.congested = 0;
+			p_ptr->sent++;
+			return res;
+		}
+	}
+	if (port_unreliable(p_ptr)) {
+		p_ptr->publ.congested = 0;
+		/* Just calculate msg length and return */
+		return msg_calc_data_size(msg_sect, num_sect);
+	}
+	return -ELINKCONG;
+}
+
+/** 
+ * tipc_send_buf - send message buffer on connection
+ */
+
+int tipc_send_buf(u32 ref, struct sk_buff *buf, unsigned int dsz)
+{
+	struct port *p_ptr;
+	struct tipc_msg *msg;
+	u32 destnode;
+	u32 hsz;
+	u32 sz;
+	u32 res;
+	 
+	p_ptr = port_deref(ref);
+	if (!p_ptr || !p_ptr->publ.connected)
+		return -EINVAL;
+
+	msg = &p_ptr->publ.phdr;
+	hsz = msg_hdr_sz(msg);
+	sz = hsz + dsz;
+	msg_set_size(msg, sz);
+	if (skb_cow(buf, hsz))
+		return -ENOMEM;
+
+	skb_push(buf, hsz);
+	memcpy(buf->data, (unchar *)msg, hsz);
+	destnode = msg_destnode(msg);
+	p_ptr->publ.congested = 1;
+	if (!port_congested(p_ptr)) {
+		if (likely(destnode != tipc_own_addr))
+			res = tipc_send_buf_fast(buf, destnode);
+		else {
+			port_recv_msg(buf);
+			res = sz;
+		}
+		if (likely(res != -ELINKCONG)) {
+			port_incr_out_seqno(p_ptr);
+			p_ptr->sent++;
+			p_ptr->publ.congested = 0;
+			return res;
+		}
+	}
+	if (port_unreliable(p_ptr)) {
+		p_ptr->publ.congested = 0;
+		return dsz;
+	}
+	return -ELINKCONG;
+}
+
+/**
+ * tipc_forward2name - forward message sections to port name
+ */
+
+int tipc_forward2name(u32 ref, 
+		      struct tipc_name const *name, 
+		      u32 domain,
+		      u32 num_sect, 
+		      struct iovec const *msg_sect,
+		      struct tipc_portid const *orig, 
+		      unsigned int importance)
+{
+	struct port *p_ptr;
+	struct tipc_msg *msg;
+	u32 destnode = domain;
+	u32 destport = 0;
+	int res;
+
+	p_ptr = port_deref(ref);
+	if (!p_ptr || p_ptr->publ.connected)
+		return -EINVAL;
+
+	msg = &p_ptr->publ.phdr;
+	msg_set_type(msg, TIPC_NAMED_MSG);
+	msg_set_orignode(msg, orig->node);
+	msg_set_origport(msg, orig->ref);
+	msg_set_hdr_sz(msg, LONG_H_SIZE);
+	msg_set_nametype(msg, name->type);
+	msg_set_nameinst(msg, name->instance);
+	msg_set_lookup_scope(msg, addr_scope(domain));
+	if (importance <= TIPC_CRITICAL_IMPORTANCE)
+		msg_set_importance(msg,importance);
+	destport = nametbl_translate(name->type, name->instance, &destnode);
+	msg_set_destnode(msg, destnode);
+	msg_set_destport(msg, destport);
+
+	if (likely(destport || destnode)) {
+		p_ptr->sent++;
+		if (likely(destnode == tipc_own_addr))
+			return port_recv_sections(p_ptr, num_sect, msg_sect);
+		res = link_send_sections_fast(p_ptr, msg_sect, num_sect, 
+					      destnode);
+		if (likely(res != -ELINKCONG))
+			return res;
+		if (port_unreliable(p_ptr)) {
+			/* Just calculate msg length and return */
+			return msg_calc_data_size(msg_sect, num_sect);
+		}
+		return -ELINKCONG;
+	}
+	return port_reject_sections(p_ptr, msg, msg_sect, num_sect, 
+				    TIPC_ERR_NO_NAME);
+}
+
+/**
+ * tipc_send2name - send message sections to port name
+ */
+
+int tipc_send2name(u32 ref, 
+		   struct tipc_name const *name,
+		   unsigned int domain, 
+		   unsigned int num_sect, 
+		   struct iovec const *msg_sect)
+{
+	struct tipc_portid orig;
+
+	orig.ref = ref;
+	orig.node = tipc_own_addr;
+	return tipc_forward2name(ref, name, domain, num_sect, msg_sect, &orig,
+				 TIPC_PORT_IMPORTANCE);
+}
+
+/** 
+ * tipc_forward_buf2name - forward message buffer to port name
+ */
+
+int tipc_forward_buf2name(u32 ref,
+			  struct tipc_name const *name,
+			  u32 domain,
+			  struct sk_buff *buf,
+			  unsigned int dsz,
+			  struct tipc_portid const *orig,
+			  unsigned int importance)
+{
+	struct port *p_ptr;
+	struct tipc_msg *msg;
+	u32 destnode = domain;
+	u32 destport = 0;
+	int res;
+
+	p_ptr = (struct port *)ref_deref(ref);
+	if (!p_ptr || p_ptr->publ.connected)
+		return -EINVAL;
+
+	msg = &p_ptr->publ.phdr;
+	if (importance <= TIPC_CRITICAL_IMPORTANCE)
+		msg_set_importance(msg, importance);
+	msg_set_type(msg, TIPC_NAMED_MSG);
+	msg_set_orignode(msg, orig->node);
+	msg_set_origport(msg, orig->ref);
+	msg_set_nametype(msg, name->type);
+	msg_set_nameinst(msg, name->instance);
+	msg_set_lookup_scope(msg, addr_scope(domain));
+	msg_set_hdr_sz(msg, LONG_H_SIZE);
+	msg_set_size(msg, LONG_H_SIZE + dsz);
+	destport = nametbl_translate(name->type, name->instance, &destnode);
+	msg_set_destnode(msg, destnode);
+	msg_set_destport(msg, destport);
+	msg_dbg(msg, "forw2name ==> ");
+	if (skb_cow(buf, LONG_H_SIZE))
+		return -ENOMEM;
+	skb_push(buf, LONG_H_SIZE);
+	memcpy(buf->data, (unchar *)msg, LONG_H_SIZE);
+	msg_dbg(buf_msg(buf),"PREP:");
+	if (likely(destport || destnode)) {
+		p_ptr->sent++;
+		if (destnode == tipc_own_addr)
+			return port_recv_msg(buf);
+		res = tipc_send_buf_fast(buf, destnode);
+		if (likely(res != -ELINKCONG))
+			return res;
+		if (port_unreliable(p_ptr))
+			return dsz;
+		return -ELINKCONG;
+	}
+	return tipc_reject_msg(buf, TIPC_ERR_NO_NAME);
+}
+
+/** 
+ * tipc_send_buf2name - send message buffer to port name
+ */
+
+int tipc_send_buf2name(u32 ref, 
+		       struct tipc_name const *dest, 
+		       u32 domain,
+		       struct sk_buff *buf, 
+		       unsigned int dsz)
+{
+	struct tipc_portid orig;
+
+	orig.ref = ref;
+	orig.node = tipc_own_addr;
+	return tipc_forward_buf2name(ref, dest, domain, buf, dsz, &orig,
+				     TIPC_PORT_IMPORTANCE);
+}
+
+/** 
+ * tipc_forward2port - forward message sections to port identity
+ */
+
+int tipc_forward2port(u32 ref,
+		      struct tipc_portid const *dest,
+		      unsigned int num_sect, 
+		      struct iovec const *msg_sect,
+		      struct tipc_portid const *orig, 
+		      unsigned int importance)
+{
+	struct port *p_ptr;
+	struct tipc_msg *msg;
+	int res;
+
+	p_ptr = port_deref(ref);
+	if (!p_ptr || p_ptr->publ.connected)
+		return -EINVAL;
+
+	msg = &p_ptr->publ.phdr;
+	msg_set_type(msg, TIPC_DIRECT_MSG);
+	msg_set_orignode(msg, orig->node);
+	msg_set_origport(msg, orig->ref);
+	msg_set_destnode(msg, dest->node);
+	msg_set_destport(msg, dest->ref);
+	msg_set_hdr_sz(msg, DIR_MSG_H_SIZE);
+	if (importance <= TIPC_CRITICAL_IMPORTANCE)
+		msg_set_importance(msg, importance);
+	p_ptr->sent++;
+	if (dest->node == tipc_own_addr)
+		return port_recv_sections(p_ptr, num_sect, msg_sect);
+	res = link_send_sections_fast(p_ptr, msg_sect, num_sect, dest->node);
+	if (likely(res != -ELINKCONG))
+		return res;
+	if (port_unreliable(p_ptr)) {
+		/* Just calculate msg length and return */
+		return msg_calc_data_size(msg_sect, num_sect);
+	}
+	return -ELINKCONG;
+}
+
+/** 
+ * tipc_send2port - send message sections to port identity 
+ */
+
+int tipc_send2port(u32 ref, 
+		   struct tipc_portid const *dest,
+		   unsigned int num_sect, 
+		   struct iovec const *msg_sect)
+{
+	struct tipc_portid orig;
+
+	orig.ref = ref;
+	orig.node = tipc_own_addr;
+	return tipc_forward2port(ref, dest, num_sect, msg_sect, &orig, 
+				 TIPC_PORT_IMPORTANCE);
+}
+
+/** 
+ * tipc_forward_buf2port - forward message buffer to port identity
+ */
+int tipc_forward_buf2port(u32 ref,
+			  struct tipc_portid const *dest,
+			  struct sk_buff *buf,
+			  unsigned int dsz,
+			  struct tipc_portid const *orig,
+			  unsigned int importance)
+{
+	struct port *p_ptr;
+	struct tipc_msg *msg;
+	int res;
+
+	p_ptr = (struct port *)ref_deref(ref);
+	if (!p_ptr || p_ptr->publ.connected)
+		return -EINVAL;
+
+	msg = &p_ptr->publ.phdr;
+	msg_set_type(msg, TIPC_DIRECT_MSG);
+	msg_set_orignode(msg, orig->node);
+	msg_set_origport(msg, orig->ref);
+	msg_set_destnode(msg, dest->node);
+	msg_set_destport(msg, dest->ref);
+	msg_set_hdr_sz(msg, DIR_MSG_H_SIZE);
+	if (importance <= TIPC_CRITICAL_IMPORTANCE)
+		msg_set_importance(msg, importance);
+	msg_set_size(msg, DIR_MSG_H_SIZE + dsz);
+	if (skb_cow(buf, DIR_MSG_H_SIZE))
+		return -ENOMEM;
+
+	skb_push(buf, DIR_MSG_H_SIZE);
+	memcpy(buf->data, (unchar *)msg, DIR_MSG_H_SIZE);
+	msg_dbg(msg, "buf2port: ");
+	p_ptr->sent++;
+	if (dest->node == tipc_own_addr)
+		return port_recv_msg(buf);
+	res = tipc_send_buf_fast(buf, dest->node);
+	if (likely(res != -ELINKCONG))
+		return res;
+	if (port_unreliable(p_ptr))
+		return dsz;
+	return -ELINKCONG;
+}
+
+/** 
+ * tipc_send_buf2port - send message buffer to port identity
+ */
+
+int tipc_send_buf2port(u32 ref, 
+		       struct tipc_portid const *dest,
+		       struct sk_buff *buf, 
+		       unsigned int dsz)
+{
+	struct tipc_portid orig;
+
+	orig.ref = ref;
+	orig.node = tipc_own_addr;
+	return tipc_forward_buf2port(ref, dest, buf, dsz, &orig, 
+				     TIPC_PORT_IMPORTANCE);
+}
+
diff --git a/net/tipc/port.h b/net/tipc/port.h
new file mode 100644
index 0000000..e40235e
--- /dev/null
+++ b/net/tipc/port.h
@@ -0,0 +1,206 @@
+/*
+ * net/tipc/port.h: Include file for TIPC port code
+ * 
+ * Copyright (c) 2003-2005, Ericsson Research Canada
+ * Copyright (c) 2004-2005, Wind River Systems
+ * Copyright (c) 2005-2006, Ericsson AB
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without 
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this 
+ * list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright notice, 
+ * this list of conditions and the following disclaimer in the documentation 
+ * and/or other materials provided with the distribution.
+ * Neither the names of the copyright holders nor the names of its 
+ * contributors may be used to endorse or promote products derived from this 
+ * software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _TIPC_PORT_H
+#define _TIPC_PORT_H
+
+#include <net/tipc/tipc_port.h>
+#include "ref.h"
+#include "net.h"
+#include "msg.h"
+#include "dbg.h"
+#include "node_subscr.h"
+
+/**
+ * struct user_port - TIPC user port (used with native API)
+ * @user_ref: id of user who created user port
+ * @usr_handle: user-specified field
+ * @ref: object reference to associated TIPC port
+ * <various callback routines>
+ * @uport_list: adjacent user ports in list of ports held by user
+ */
+ 
+struct user_port {
+	u32 user_ref;
+	void *usr_handle; 
+	u32 ref;
+	tipc_msg_err_event err_cb; 
+	tipc_named_msg_err_event named_err_cb; 
+	tipc_conn_shutdown_event conn_err_cb; 
+	tipc_msg_event msg_cb; 
+	tipc_named_msg_event named_msg_cb; 
+	tipc_conn_msg_event conn_msg_cb; 
+	tipc_continue_event continue_event_cb;
+	struct list_head uport_list;
+};
+
+/**
+ * struct port - TIPC port structure
+ * @publ: TIPC port info available to privileged users
+ * @port_list: adjacent ports in TIPC's global list of ports
+ * @dispatcher: ptr to routine which handles received messages
+ * @wakeup: ptr to routine to call when port is no longer congested
+ * @user_port: ptr to user port associated with port (if any)
+ * @wait_list: adjacent ports in list of ports waiting on link congestion
+ * @congested_link: ptr to congested link port is waiting on
+ * @waiting_pkts:
+ * @sent:
+ * @acked:
+ * @publications: list of publications for port
+ * @pub_count: total # of publications port has made during its lifetime
+ * @max_pkt: maximum packet size "hint" used when building messages sent by port
+ * @probing_state:
+ * @probing_interval:
+ * @last_in_seqno:
+ * @timer_ref:
+ * @subscription: "node down" subscription used to terminate failed connections
+ */
+
+struct port {
+	struct tipc_port publ;
+	struct list_head port_list;
+	u32 (*dispatcher)(struct tipc_port *, struct sk_buff *);
+	void (*wakeup)(struct tipc_port *);
+	struct user_port *user_port;
+	struct list_head wait_list;
+	struct link *congested_link;
+	u32 waiting_pkts;
+	u32 sent;
+	u32 acked;
+	struct list_head publications;
+	u32 pub_count;
+	u32 max_pkt;
+	u32 probing_state;
+	u32 probing_interval;
+	u32 last_in_seqno;
+	struct timer_list timer;
+	struct node_subscr subscription;
+};
+
+extern spinlock_t port_list_lock;
+struct port_list;
+
+int port_recv_sections(struct port *p_ptr, u32 num_sect, 
+		       struct iovec const *msg_sect);
+int port_reject_sections(struct port *p_ptr, struct tipc_msg *hdr,
+			 struct iovec const *msg_sect, u32 num_sect,
+			 int err);
+struct sk_buff *port_get_ports(void);
+struct sk_buff *port_show_stats(const void *req_tlv_area, int req_tlv_space);
+void port_recv_proto_msg(struct sk_buff *buf);
+void port_recv_mcast(struct sk_buff *buf, struct port_list *dp);
+void port_reinit(void);
+
+/**
+ * port_lock - lock port instance referred to and return its pointer
+ */
+
+static inline struct port *port_lock(u32 ref)
+{
+	return (struct port *)ref_lock(ref);
+}
+
+/** 
+ * port_unlock - unlock a port instance
+ * 
+ * Can use pointer instead of ref_unlock() since port is already locked.
+ */
+
+static inline void port_unlock(struct port *p_ptr)
+{
+	spin_unlock_bh(p_ptr->publ.lock);
+}
+
+static inline struct port* port_deref(u32 ref)
+{
+	return (struct port *)ref_deref(ref);
+}
+
+static inline u32 peer_port(struct port *p_ptr)
+{
+	return msg_destport(&p_ptr->publ.phdr);
+}
+
+static inline u32 peer_node(struct port *p_ptr)
+{
+	return msg_destnode(&p_ptr->publ.phdr);
+}
+
+static inline int port_congested(struct port *p_ptr)
+{
+	return((p_ptr->sent - p_ptr->acked) >= (TIPC_FLOW_CONTROL_WIN * 2));
+}
+
+/** 
+ * port_recv_msg - receive message from lower layer and deliver to port user
+ */
+
+static inline int port_recv_msg(struct sk_buff *buf)
+{
+	struct port *p_ptr;
+	struct tipc_msg *msg = buf_msg(buf);
+	u32 destport = msg_destport(msg);
+	u32 dsz = msg_data_sz(msg);
+	u32 err;
+	
+	/* forward unresolved named message */
+	if (unlikely(!destport)) {
+		net_route_msg(buf);
+		return dsz;
+	}
+
+	/* validate destination & pass to port, otherwise reject message */
+	p_ptr = port_lock(destport);
+	if (likely(p_ptr)) {
+		if (likely(p_ptr->publ.connected)) {
+			if ((unlikely(msg_origport(msg) != peer_port(p_ptr))) ||
+			    (unlikely(msg_orignode(msg) != peer_node(p_ptr))) ||
+			    (unlikely(!msg_connected(msg)))) {
+				err = TIPC_ERR_NO_PORT;
+				port_unlock(p_ptr);
+				goto reject;
+			}
+		}
+		err = p_ptr->dispatcher(&p_ptr->publ, buf);
+		port_unlock(p_ptr);
+		if (likely(!err))
+			return dsz;
+	} else {
+		err = TIPC_ERR_NO_PORT;
+	}
+reject:
+	dbg("port->rejecting, err = %x..\n",err);
+	return tipc_reject_msg(buf, err);
+}
+
+#endif
diff --git a/net/tipc/ref.c b/net/tipc/ref.c
new file mode 100644
index 0000000..aee48c7
--- /dev/null
+++ b/net/tipc/ref.c
@@ -0,0 +1,186 @@
+/*
+ * net/tipc/ref.c: TIPC object registry code
+ * 
+ * Copyright (c) 2003-2005, Ericsson Research Canada
+ * Copyright (c) 2004-2005, Wind River Systems
+ * Copyright (c) 2005-2006, Ericsson AB
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * 
+ * Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * Neither the names of the copyright holders nor the names of its 
+ * contributors may be used to endorse or promote products derived from this
+ * software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "core.h"
+#include "ref.h"
+#include "port.h"
+#include "subscr.h"
+#include "name_distr.h"
+#include "name_table.h"
+#include "config.h"
+#include "discover.h"
+#include "bearer.h"
+#include "node.h"
+#include "bcast.h"
+
+/*
+ * Object reference table consists of 2**N entries.
+ *
+ * A used entry has object ptr != 0, reference == XXXX|own index
+ *				     (XXXX changes each time entry is acquired) 
+ * A free entry has object ptr == 0, reference == YYYY|next free index
+ *				     (YYYY is one more than last used XXXX)
+ *
+ * Free list is initially chained from entry (2**N)-1 to entry 1. 
+ * Entry 0 is not used to allow index 0 to indicate the end of the free list.
+ *
+ * Note: Any accidental reference of the form XXXX|0--0 won't match entry 0
+ * because entry 0's reference field has the form XXXX|1--1.
+ */
+
+struct ref_table ref_table = { 0 };
+
+rwlock_t reftbl_lock = RW_LOCK_UNLOCKED;
+
+/**
+ * ref_table_init - create reference table for objects
+ */
+
+int ref_table_init(u32 requested_size, u32 start)
+{
+	struct reference *table;
+	u32 sz = 1 << 4;
+	u32 index_mask;
+	int i;
+
+	while (sz < requested_size) {
+		sz <<= 1;
+	}
+	table = (struct reference *)vmalloc(sz * sizeof(struct reference));
+	if (table == NULL)
+		return -ENOMEM;
+
+	write_lock_bh(&reftbl_lock);
+	index_mask = sz - 1;
+	for (i = sz - 1; i >= 0; i--) {
+		table[i].object = 0;
+		table[i].lock = SPIN_LOCK_UNLOCKED;
+		table[i].data.next_plus_upper = (start & ~index_mask) + i - 1;
+	}
+	ref_table.entries = table;
+	ref_table.index_mask = index_mask;
+	ref_table.first_free = sz - 1;
+	ref_table.last_free = 1;
+	write_unlock_bh(&reftbl_lock);
+	return TIPC_OK;
+}
+
+/**
+ * ref_table_stop - destroy reference table for objects
+ */
+
+void ref_table_stop(void)
+{
+	if (!ref_table.entries)
+		return;
+
+	vfree(ref_table.entries);
+	ref_table.entries = 0;
+}
+
+/**
+ * ref_acquire - create reference to an object
+ * 
+ * Return a unique reference value which can be translated back to the pointer
+ * 'object' at a later time.  Also, pass back a pointer to the lock protecting 
+ * the object, but without locking it.
+ */
+
+u32 ref_acquire(void *object, spinlock_t **lock)
+{
+	struct reference *entry;
+	u32 index;
+	u32 index_mask;
+	u32 next_plus_upper;
+	u32 reference = 0;
+
+	assert(ref_table.entries && object);
+
+	write_lock_bh(&reftbl_lock);
+	if (ref_table.first_free) {
+		index = ref_table.first_free;
+		entry = &(ref_table.entries[index]);
+		index_mask = ref_table.index_mask;
+		/* take lock in case a previous user of entry still holds it */ 
+		spin_lock_bh(&entry->lock);  
+		next_plus_upper = entry->data.next_plus_upper;
+		ref_table.first_free = next_plus_upper & index_mask;
+		reference = (next_plus_upper & ~index_mask) + index;
+		entry->data.reference = reference;
+		entry->object = object;
+                if (lock != 0)
+                        *lock = &entry->lock;
+		spin_unlock_bh(&entry->lock);
+	}
+	write_unlock_bh(&reftbl_lock);
+	return reference;
+}
+
+/**
+ * ref_discard - invalidate references to an object
+ * 
+ * Disallow future references to an object and free up the entry for re-use.
+ * Note: The entry's spin_lock may still be busy after discard
+ */
+
+void ref_discard(u32 ref)
+{
+	struct reference *entry;
+	u32 index; 
+	u32 index_mask;
+
+	assert(ref_table.entries);
+	assert(ref != 0);
+
+	write_lock_bh(&reftbl_lock);
+	index_mask = ref_table.index_mask;
+	index = ref & index_mask;
+	entry = &(ref_table.entries[index]);
+	assert(entry->object != 0);
+	assert(entry->data.reference == ref);
+
+	/* mark entry as unused */
+	entry->object = 0;
+	if (ref_table.first_free == 0)
+		ref_table.first_free = index;
+	else
+		/* next_plus_upper is always XXXX|0--0 for last free entry */
+		ref_table.entries[ref_table.last_free].data.next_plus_upper 
+			|= index;
+	ref_table.last_free = index;
+
+	/* increment upper bits of entry to invalidate subsequent references */
+	entry->data.next_plus_upper = (ref & ~index_mask) + (index_mask + 1);
+	write_unlock_bh(&reftbl_lock);
+}
+
diff --git a/net/tipc/ref.h b/net/tipc/ref.h
new file mode 100644
index 0000000..e6cd5bb
--- /dev/null
+++ b/net/tipc/ref.h
@@ -0,0 +1,128 @@
+/*
+ * net/tipc/ref.h: Include file for TIPC object registry code
+ * 
+ * Copyright (c) 2003-2005, Ericsson Research Canada
+ * Copyright (c) 2005, Wind River Systems
+ * Copyright (c) 2005-2006, Ericsson AB
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without 
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this 
+ * list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright notice, 
+ * this list of conditions and the following disclaimer in the documentation 
+ * and/or other materials provided with the distribution.
+ * Neither the names of the copyright holders nor the names of its 
+ * contributors may be used to endorse or promote products derived from this 
+ * software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _TIPC_REF_H
+#define _TIPC_REF_H
+
+/**
+ * struct reference - TIPC object reference entry
+ * @object: pointer to object associated with reference entry
+ * @lock: spinlock controlling access to object
+ * @data: reference value associated with object (or link to next unused entry)
+ */
+ 
+struct reference {
+	void *object;
+	spinlock_t lock;
+	union {
+		u32 next_plus_upper;
+		u32 reference;
+	} data;
+};
+
+/**
+ * struct ref_table - table of TIPC object reference entries
+ * @entries: pointer to array of reference entries
+ * @index_mask: bitmask for array index portion of reference values
+ * @first_free: array index of first unused object reference entry
+ * @last_free: array index of last unused object reference entry
+ */
+
+struct ref_table {
+	struct reference *entries;
+	u32 index_mask;
+	u32 first_free;
+	u32 last_free;
+};
+
+extern struct ref_table ref_table;
+
+int ref_table_init(u32 requested_size, u32 start);
+void ref_table_stop(void);
+
+u32 ref_acquire(void *object, spinlock_t **lock);
+void ref_discard(u32 ref);
+
+
+/**
+ * ref_lock - lock referenced object and return pointer to it
+ */
+
+static inline void *ref_lock(u32 ref)
+{
+	if (likely(ref_table.entries)) {
+		struct reference *r =
+			&ref_table.entries[ref & ref_table.index_mask];
+
+		spin_lock_bh(&r->lock);
+		if (likely(r->data.reference == ref))
+			return r->object;
+		spin_unlock_bh(&r->lock);
+	}
+	return 0;
+}
+
+/**
+ * ref_unlock - unlock referenced object 
+ */
+
+static inline void ref_unlock(u32 ref)
+{
+	if (likely(ref_table.entries)) {
+		struct reference *r =
+			&ref_table.entries[ref & ref_table.index_mask];
+
+		if (likely(r->data.reference == ref))
+			spin_unlock_bh(&r->lock);
+		else
+			err("ref_unlock() invoked using obsolete reference\n");
+	}
+}
+
+/**
+ * ref_deref - return pointer referenced object (without locking it)
+ */
+
+static inline void *ref_deref(u32 ref)
+{
+	if (likely(ref_table.entries)) {
+		struct reference *r = 
+			&ref_table.entries[ref & ref_table.index_mask];
+
+		if (likely(r->data.reference == ref))
+			return r->object;
+	}
+	return 0;
+}
+
+#endif
diff --git a/net/tipc/socket.c b/net/tipc/socket.c
new file mode 100644
index 0000000..22dcb72
--- /dev/null
+++ b/net/tipc/socket.c
@@ -0,0 +1,1722 @@
+/*
+ * net/tipc/socket.c: TIPC socket API
+ * 
+ * Copyright (c) 2003-2005, Ericsson Research Canada
+ * Copyright (c) 2004-2005, Wind River Systems
+ * Copyright (c) 2005-2006, Ericsson AB
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without 
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this 
+ * list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright notice, 
+ * this list of conditions and the following disclaimer in the documentation 
+ * and/or other materials provided with the distribution.
+ * Neither the names of the copyright holders nor the names of its 
+ * contributors may be used to endorse or promote products derived from this 
+ * software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/net.h>
+#include <linux/socket.h>
+#include <linux/errno.h>
+#include <linux/mm.h>
+#include <linux/slab.h>
+#include <linux/poll.h>
+#include <linux/version.h>
+#include <linux/fcntl.h>
+#include <linux/version.h>
+#include <asm/semaphore.h>
+#include <asm/string.h>
+#include <asm/atomic.h>
+#include <net/sock.h>
+
+#include <linux/tipc.h>
+#include <net/tipc/tipc_msg.h>
+#include <net/tipc/tipc_port.h>
+
+#include "core.h"
+
+#define SS_LISTENING	-1	/* socket is listening */
+#define SS_READY	-2	/* socket is connectionless */
+
+#define OVERLOAD_LIMIT_BASE    5000
+
+struct tipc_sock {
+	struct sock sk;
+	struct tipc_port *p;
+	struct semaphore sem;
+};
+
+#define tipc_sk(sk) ((struct tipc_sock*)sk)
+
+static u32 dispatch(struct tipc_port *tport, struct sk_buff *buf);
+static void wakeupdispatch(struct tipc_port *tport);
+
+static struct proto_ops packet_ops;
+static struct proto_ops stream_ops;
+static struct proto_ops msg_ops;
+
+static struct proto tipc_proto;
+
+static int sockets_enabled = 0;
+
+static atomic_t tipc_queue_size = ATOMIC_INIT(0);
+
+
+/* 
+ * sock_lock(): Lock a port/socket pair. lock_sock() can 
+ * not be used here, since the same lock must protect ports 
+ * with non-socket interfaces.
+ * See net.c for description of locking policy.
+ */
+static inline void sock_lock(struct tipc_sock* tsock)
+{
+        spin_lock_bh(tsock->p->lock);       
+}
+
+/* 
+ * sock_unlock(): Unlock a port/socket pair
+ */
+static inline void sock_unlock(struct tipc_sock* tsock)
+{
+        spin_unlock_bh(tsock->p->lock);
+}
+
+/**
+ * pollmask - determine the current set of poll() events for a socket
+ * @sock: socket structure
+ * 
+ * TIPC sets the returned events as follows:
+ * a) POLLRDNORM and POLLIN are set if the socket's receive queue is non-empty
+ *    or if a connection-oriented socket is does not have an active connection
+ *    (i.e. a read operation will not block).
+ * b) POLLOUT is set except when a socket's connection has been terminated
+ *    (i.e. a write operation will not block).
+ * c) POLLHUP is set when a socket's connection has been terminated.
+ *
+ * IMPORTANT: The fact that a read or write operation will not block does NOT
+ * imply that the operation will succeed!
+ * 
+ * Returns pollmask value
+ */
+
+static inline u32 pollmask(struct socket *sock)
+{
+	u32 mask;
+
+	if ((skb_queue_len(&sock->sk->sk_receive_queue) != 0) ||
+	    (sock->state == SS_UNCONNECTED) ||
+	    (sock->state == SS_DISCONNECTING))
+		mask = (POLLRDNORM | POLLIN);
+	else
+		mask = 0;
+
+	if (sock->state == SS_DISCONNECTING) 
+		mask |= POLLHUP;
+	else
+		mask |= POLLOUT;
+
+	return mask;
+}
+
+
+/**
+ * advance_queue - discard first buffer in queue
+ * @tsock: TIPC socket
+ */
+
+static inline void advance_queue(struct tipc_sock *tsock)
+{
+        sock_lock(tsock);
+	buf_discard(skb_dequeue(&tsock->sk.sk_receive_queue));
+        sock_unlock(tsock);
+	atomic_dec(&tipc_queue_size);
+}
+
+/**
+ * tipc_create - create a TIPC socket
+ * @sock: pre-allocated socket structure
+ * @protocol: protocol indicator (must be 0)
+ * 
+ * This routine creates and attaches a 'struct sock' to the 'struct socket',
+ * then create and attaches a TIPC port to the 'struct sock' part.
+ *
+ * Returns 0 on success, errno otherwise
+ */
+static int tipc_create(struct socket *sock, int protocol)
+{
+	struct tipc_sock *tsock;
+	struct tipc_port *port;
+	struct sock *sk;
+        u32 ref;
+
+	if ((sock->type != SOCK_STREAM) && 
+	    (sock->type != SOCK_SEQPACKET) &&
+	    (sock->type != SOCK_DGRAM) &&
+	    (sock->type != SOCK_RDM))
+		return -EPROTOTYPE;
+
+	if (unlikely(protocol != 0))
+		return -EPROTONOSUPPORT;
+
+	ref = tipc_createport_raw(0, &dispatch, &wakeupdispatch, TIPC_LOW_IMPORTANCE);
+	if (unlikely(!ref))
+		return -ENOMEM;
+
+	sock->state = SS_UNCONNECTED;
+
+	switch (sock->type) {
+	case SOCK_STREAM:
+		sock->ops = &stream_ops;
+		break;
+	case SOCK_SEQPACKET:
+		sock->ops = &packet_ops;
+		break;
+	case SOCK_DGRAM:
+		tipc_set_portunreliable(ref, 1);
+		/* fall through */
+	case SOCK_RDM:
+		tipc_set_portunreturnable(ref, 1);
+		sock->ops = &msg_ops;
+		sock->state = SS_READY;
+		break;
+	}
+
+	sk = sk_alloc(AF_TIPC, GFP_KERNEL, &tipc_proto, 1);
+	if (!sk) {
+		tipc_deleteport(ref);
+		return -ENOMEM;
+	}
+
+	sock_init_data(sock, sk);
+	init_waitqueue_head(sk->sk_sleep);
+	sk->sk_rcvtimeo = 8 * HZ;   /* default connect timeout = 8s */
+
+	tsock = tipc_sk(sk);
+	port = tipc_get_port(ref);
+
+	tsock->p = port;
+	port->usr_handle = tsock;
+
+	init_MUTEX(&tsock->sem);
+
+	dbg("sock_create: %x\n",tsock);
+
+	atomic_inc(&tipc_user_count);
+
+	return 0;
+}
+
+/**
+ * release - destroy a TIPC socket
+ * @sock: socket to destroy
+ *
+ * This routine cleans up any messages that are still queued on the socket.
+ * For DGRAM and RDM socket types, all queued messages are rejected.
+ * For SEQPACKET and STREAM socket types, the first message is rejected
+ * and any others are discarded.  (If the first message on a STREAM socket
+ * is partially-read, it is discarded and the next one is rejected instead.)
+ * 
+ * NOTE: Rejected messages are not necessarily returned to the sender!  They
+ * are returned or discarded according to the "destination droppable" setting
+ * specified for the message by the sender.
+ *
+ * Returns 0 on success, errno otherwise
+ */
+
+static int release(struct socket *sock)
+{
+	struct tipc_sock *tsock = tipc_sk(sock->sk);
+	struct sock *sk = sock->sk;
+	int res = TIPC_OK;
+	struct sk_buff *buf;
+
+        dbg("sock_delete: %x\n",tsock);
+	if (!tsock)
+		return 0;
+	down_interruptible(&tsock->sem);
+	if (!sock->sk) {
+		up(&tsock->sem);
+		return 0;
+	}
+	
+	/* Reject unreceived messages, unless no longer connected */
+
+	while (sock->state != SS_DISCONNECTING) {
+		sock_lock(tsock);
+		buf = skb_dequeue(&sk->sk_receive_queue);
+		if (!buf)
+			tsock->p->usr_handle = 0;
+		sock_unlock(tsock);
+		if (!buf)
+			break;
+		if (TIPC_SKB_CB(buf)->handle != msg_data(buf_msg(buf)))
+			buf_discard(buf);
+		else
+			tipc_reject_msg(buf, TIPC_ERR_NO_PORT);
+		atomic_dec(&tipc_queue_size);
+	}
+
+	/* Delete TIPC port */
+
+	res = tipc_deleteport(tsock->p->ref);
+	sock->sk = NULL;
+
+	/* Discard any remaining messages */
+
+	while ((buf = skb_dequeue(&sk->sk_receive_queue))) {
+		buf_discard(buf);
+		atomic_dec(&tipc_queue_size);
+	}
+
+	up(&tsock->sem);
+
+	sock_put(sk);
+
+        atomic_dec(&tipc_user_count);
+	return res;
+}
+
+/**
+ * bind - associate or disassocate TIPC name(s) with a socket
+ * @sock: socket structure
+ * @uaddr: socket address describing name(s) and desired operation
+ * @uaddr_len: size of socket address data structure
+ * 
+ * Name and name sequence binding is indicated using a positive scope value;
+ * a negative scope value unbinds the specified name.  Specifying no name
+ * (i.e. a socket address length of 0) unbinds all names from the socket.
+ * 
+ * Returns 0 on success, errno otherwise
+ */
+
+static int bind(struct socket *sock, struct sockaddr *uaddr, int uaddr_len)
+{
+	struct tipc_sock *tsock = tipc_sk(sock->sk);
+	struct sockaddr_tipc *addr = (struct sockaddr_tipc *)uaddr;
+	int res;
+
+	if (down_interruptible(&tsock->sem))
+		return -ERESTARTSYS;
+	
+	if (unlikely(!uaddr_len)) {
+		res = tipc_withdraw(tsock->p->ref, 0, 0);
+		goto exit;
+	}
+
+	if (uaddr_len < sizeof(struct sockaddr_tipc)) {
+		res = -EINVAL;
+		goto exit;
+	}
+
+	if (addr->family != AF_TIPC) {
+		res = -EAFNOSUPPORT;
+		goto exit;
+	}
+	if (addr->addrtype == TIPC_ADDR_NAME)
+		addr->addr.nameseq.upper = addr->addr.nameseq.lower;
+	else if (addr->addrtype != TIPC_ADDR_NAMESEQ) {
+		res = -EAFNOSUPPORT;
+		goto exit;
+	}
+        
+       	if (addr->scope > 0)
+		res = tipc_publish(tsock->p->ref, addr->scope,
+				   &addr->addr.nameseq);
+	else
+		res = tipc_withdraw(tsock->p->ref, -addr->scope,
+				    &addr->addr.nameseq);
+exit:
+	up(&tsock->sem);
+	return res;
+}
+
+/** 
+ * get_name - get port ID of socket or peer socket
+ * @sock: socket structure
+ * @uaddr: area for returned socket address
+ * @uaddr_len: area for returned length of socket address
+ * @peer: 0 to obtain socket name, 1 to obtain peer socket name
+ * 
+ * Returns 0 on success, errno otherwise
+ */
+
+static int get_name(struct socket *sock, struct sockaddr *uaddr, 
+		    int *uaddr_len, int peer)
+{
+	struct tipc_sock *tsock = tipc_sk(sock->sk);
+	struct sockaddr_tipc *addr = (struct sockaddr_tipc *)uaddr;
+	u32 res;
+
+	if (down_interruptible(&tsock->sem))
+		return -ERESTARTSYS;
+
+	*uaddr_len = sizeof(*addr);
+	addr->addrtype = TIPC_ADDR_ID;
+	addr->family = AF_TIPC;
+	addr->scope = 0;
+	if (peer)
+		res = tipc_peer(tsock->p->ref, &addr->addr.id);
+	else
+		res = tipc_ownidentity(tsock->p->ref, &addr->addr.id);
+	addr->addr.name.domain = 0;
+
+	up(&tsock->sem);
+	return res;
+}
+
+/**
+ * poll - read and possibly block on pollmask
+ * @file: file structure associated with the socket
+ * @sock: socket for which to calculate the poll bits
+ * @wait: ???
+ *
+ * Returns the pollmask
+ */
+
+static unsigned int poll(struct file *file, struct socket *sock, 
+			 poll_table *wait)
+{
+	poll_wait(file, sock->sk->sk_sleep, wait);
+	/* NEED LOCK HERE? */
+	return pollmask(sock);
+}
+
+/** 
+ * dest_name_check - verify user is permitted to send to specified port name
+ * @dest: destination address
+ * @m: descriptor for message to be sent
+ * 
+ * Prevents restricted configuration commands from being issued by
+ * unauthorized users.
+ * 
+ * Returns 0 if permission is granted, otherwise errno
+ */
+
+static inline int dest_name_check(struct sockaddr_tipc *dest, struct msghdr *m)
+{
+	struct tipc_cfg_msg_hdr hdr;
+
+        if (likely(dest->addr.name.name.type >= TIPC_RESERVED_TYPES))
+                return 0;
+        if (likely(dest->addr.name.name.type == TIPC_TOP_SRV))
+                return 0;
+
+        if (likely(dest->addr.name.name.type != TIPC_CFG_SRV))
+                return -EACCES;
+
+        if (copy_from_user(&hdr, m->msg_iov[0].iov_base, sizeof(hdr)))
+		return -EFAULT;
+	if ((ntohs(hdr.tcm_type) & 0xC000) & (!capable(CAP_NET_ADMIN)))
+		return -EACCES;
+        
+	return 0;
+}
+
+/**
+ * send_msg - send message in connectionless manner
+ * @iocb: (unused)
+ * @sock: socket structure
+ * @m: message to send
+ * @total_len: (unused)
+ * 
+ * Message must have an destination specified explicitly.
+ * Used for SOCK_RDM and SOCK_DGRAM messages, 
+ * and for 'SYN' messages on SOCK_SEQPACKET and SOCK_STREAM connections.
+ * (Note: 'SYN+' is prohibited on SOCK_STREAM.)
+ * 
+ * Returns the number of bytes sent on success, or errno otherwise
+ */
+
+static int send_msg(struct kiocb *iocb, struct socket *sock,
+		    struct msghdr *m, size_t total_len)
+{
+	struct tipc_sock *tsock = tipc_sk(sock->sk);
+        struct sockaddr_tipc *dest = (struct sockaddr_tipc *)m->msg_name;
+	struct sk_buff *buf;
+	int needs_conn;
+	int res = -EINVAL;
+
+	if (unlikely(!dest))
+		return -EDESTADDRREQ;
+	if (unlikely(dest->family != AF_TIPC))
+		return -EINVAL;
+
+	needs_conn = (sock->state != SS_READY);
+	if (unlikely(needs_conn)) {
+		if (sock->state == SS_LISTENING)
+			return -EPIPE;
+		if (sock->state != SS_UNCONNECTED)
+			return -EISCONN;
+		if ((tsock->p->published) ||
+		    ((sock->type == SOCK_STREAM) && (total_len != 0)))
+			return -EOPNOTSUPP;
+	}
+
+	if (down_interruptible(&tsock->sem))
+		return -ERESTARTSYS;
+
+	if (needs_conn) {
+
+		/* Abort any pending connection attempts (very unlikely) */
+
+		while ((buf = skb_dequeue(&sock->sk->sk_receive_queue))) {
+			tipc_reject_msg(buf, TIPC_ERR_NO_PORT);
+			atomic_dec(&tipc_queue_size);
+		}
+
+		sock->state = SS_CONNECTING;
+	}
+
+        do {
+                if (dest->addrtype == TIPC_ADDR_NAME) {
+                        if ((res = dest_name_check(dest, m)))
+                                goto exit;
+                        res = tipc_send2name(tsock->p->ref,
+                                             &dest->addr.name.name,
+                                             dest->addr.name.domain, 
+                                             m->msg_iovlen,
+                                             m->msg_iov);
+                }
+                else if (dest->addrtype == TIPC_ADDR_ID) {
+                        res = tipc_send2port(tsock->p->ref,
+                                             &dest->addr.id,
+                                             m->msg_iovlen,
+                                             m->msg_iov);
+                }
+                else if (dest->addrtype == TIPC_ADDR_MCAST) {
+			if (needs_conn) {
+				res = -EOPNOTSUPP;
+				goto exit;
+			}
+                        if ((res = dest_name_check(dest, m)))
+                                goto exit;
+                        res = tipc_multicast(tsock->p->ref,
+                                             &dest->addr.nameseq,
+                                             0,
+                                             m->msg_iovlen,
+                                             m->msg_iov);
+                }
+                if (likely(res != -ELINKCONG)) {
+exit:                                
+                        up(&tsock->sem);
+                        return res;
+                }
+		if (m->msg_flags & MSG_DONTWAIT) {
+			res = -EWOULDBLOCK;
+			goto exit;
+		}
+                if (wait_event_interruptible(*sock->sk->sk_sleep,
+                                             !tsock->p->congested)) {
+                    res = -ERESTARTSYS;
+                    goto exit;
+                }
+        } while (1);
+}
+
+/** 
+ * send_packet - send a connection-oriented message
+ * @iocb: (unused)
+ * @sock: socket structure
+ * @m: message to send
+ * @total_len: (unused)
+ * 
+ * Used for SOCK_SEQPACKET messages and SOCK_STREAM data.
+ * 
+ * Returns the number of bytes sent on success, or errno otherwise
+ */
+
+static int send_packet(struct kiocb *iocb, struct socket *sock,
+		       struct msghdr *m, size_t total_len)
+{
+	struct tipc_sock *tsock = tipc_sk(sock->sk);
+        struct sockaddr_tipc *dest = (struct sockaddr_tipc *)m->msg_name;
+	int res;
+
+	/* Handle implied connection establishment */
+
+	if (unlikely(dest))
+		return send_msg(iocb, sock, m, total_len);
+
+	if (down_interruptible(&tsock->sem)) {
+		return -ERESTARTSYS;
+        }
+
+        if (unlikely(sock->state != SS_CONNECTED)) {
+                if (sock->state == SS_DISCONNECTING)
+                        res = -EPIPE;   
+                else
+                        res = -ENOTCONN;
+                goto exit;
+        }
+
+        do {
+                res = tipc_send(tsock->p->ref, m->msg_iovlen, m->msg_iov);
+                if (likely(res != -ELINKCONG)) {
+exit:
+                        up(&tsock->sem);
+                        return res;
+                }
+		if (m->msg_flags & MSG_DONTWAIT) {
+			res = -EWOULDBLOCK;
+			goto exit;
+		}
+                if (wait_event_interruptible(*sock->sk->sk_sleep,
+                                             !tsock->p->congested)) {
+                    res = -ERESTARTSYS;
+                    goto exit;
+                }
+        } while (1);
+}
+
+/** 
+ * send_stream - send stream-oriented data
+ * @iocb: (unused)
+ * @sock: socket structure
+ * @m: data to send
+ * @total_len: total length of data to be sent
+ * 
+ * Used for SOCK_STREAM data.
+ * 
+ * Returns the number of bytes sent on success, or errno otherwise
+ */
+
+
+static int send_stream(struct kiocb *iocb, struct socket *sock,
+		       struct msghdr *m, size_t total_len)
+{
+	struct msghdr my_msg;
+	struct iovec my_iov;
+	struct iovec *curr_iov;
+	int curr_iovlen;
+	char __user *curr_start;
+	int curr_left;
+	int bytes_to_send;
+	int res;
+	
+	if (likely(total_len <= TIPC_MAX_USER_MSG_SIZE))
+		return send_packet(iocb, sock, m, total_len);
+
+	/* Can only send large data streams if already connected */
+
+        if (unlikely(sock->state != SS_CONNECTED)) {
+                if (sock->state == SS_DISCONNECTING)
+                        return -EPIPE;   
+                else
+                        return -ENOTCONN;
+        }
+
+	/* 
+	 * Send each iovec entry using one or more messages
+	 *
+	 * Note: This algorithm is good for the most likely case 
+	 * (i.e. one large iovec entry), but could be improved to pass sets
+	 * of small iovec entries into send_packet().
+	 */
+
+	my_msg = *m;
+	curr_iov = my_msg.msg_iov;
+	curr_iovlen = my_msg.msg_iovlen;
+	my_msg.msg_iov = &my_iov;
+	my_msg.msg_iovlen = 1;
+
+	while (curr_iovlen--) {
+		curr_start = curr_iov->iov_base;
+		curr_left = curr_iov->iov_len;
+
+		while (curr_left) {
+			bytes_to_send = (curr_left < TIPC_MAX_USER_MSG_SIZE)
+				? curr_left : TIPC_MAX_USER_MSG_SIZE;
+			my_iov.iov_base = curr_start;
+			my_iov.iov_len = bytes_to_send;
+                        if ((res = send_packet(iocb, sock, &my_msg, 0)) < 0)
+                                return res;
+			curr_left -= bytes_to_send;
+			curr_start += bytes_to_send;
+		}
+
+		curr_iov++;
+	}
+
+	return total_len;
+}
+
+/**
+ * auto_connect - complete connection setup to a remote port
+ * @sock: socket structure
+ * @tsock: TIPC-specific socket structure
+ * @msg: peer's response message
+ * 
+ * Returns 0 on success, errno otherwise
+ */
+
+static int auto_connect(struct socket *sock, struct tipc_sock *tsock, 
+			struct tipc_msg *msg)
+{
+	struct tipc_portid peer;
+
+	if (msg_errcode(msg)) {
+		sock->state = SS_DISCONNECTING;
+		return -ECONNREFUSED;
+	}
+
+	peer.ref = msg_origport(msg);
+	peer.node = msg_orignode(msg);
+	tipc_connect2port(tsock->p->ref, &peer);
+	tipc_set_portimportance(tsock->p->ref, msg_importance(msg));
+	sock->state = SS_CONNECTED;
+	return 0;
+}
+
+/**
+ * set_orig_addr - capture sender's address for received message
+ * @m: descriptor for message info
+ * @msg: received message header
+ * 
+ * Note: Address is not captured if not requested by receiver.
+ */
+
+static inline void set_orig_addr(struct msghdr *m, struct tipc_msg *msg)
+{
+        struct sockaddr_tipc *addr = (struct sockaddr_tipc *)m->msg_name;
+
+        if (addr) {
+		addr->family = AF_TIPC;
+		addr->addrtype = TIPC_ADDR_ID;
+		addr->addr.id.ref = msg_origport(msg);
+		addr->addr.id.node = msg_orignode(msg);
+		addr->addr.name.domain = 0;   	/* could leave uninitialized */
+		addr->scope = 0;   		/* could leave uninitialized */
+		m->msg_namelen = sizeof(struct sockaddr_tipc);
+	}
+}
+
+/**
+ * anc_data_recv - optionally capture ancillary data for received message 
+ * @m: descriptor for message info
+ * @msg: received message header
+ * @tport: TIPC port associated with message
+ * 
+ * Note: Ancillary data is not captured if not requested by receiver.
+ * 
+ * Returns 0 if successful, otherwise errno
+ */
+
+static inline int anc_data_recv(struct msghdr *m, struct tipc_msg *msg, 
+				struct tipc_port *tport)
+{
+	u32 anc_data[3];
+	u32 err;
+	u32 dest_type;
+	int res;
+
+	if (likely(m->msg_controllen == 0))
+		return 0;
+
+	/* Optionally capture errored message object(s) */
+
+	err = msg ? msg_errcode(msg) : 0;
+	if (unlikely(err)) {
+		anc_data[0] = err;
+		anc_data[1] = msg_data_sz(msg);
+		if ((res = put_cmsg(m, SOL_SOCKET, TIPC_ERRINFO, 8, anc_data)))
+			return res;
+		if (anc_data[1] &&
+		    (res = put_cmsg(m, SOL_SOCKET, TIPC_RETDATA, anc_data[1], 
+				    msg_data(msg))))
+			return res;
+	}
+
+	/* Optionally capture message destination object */
+
+	dest_type = msg ? msg_type(msg) : TIPC_DIRECT_MSG;
+	switch (dest_type) {
+	case TIPC_NAMED_MSG:
+		anc_data[0] = msg_nametype(msg);
+		anc_data[1] = msg_namelower(msg);
+		anc_data[2] = msg_namelower(msg);
+		break;
+	case TIPC_MCAST_MSG:
+		anc_data[0] = msg_nametype(msg);
+		anc_data[1] = msg_namelower(msg);
+		anc_data[2] = msg_nameupper(msg);
+		break;
+	case TIPC_CONN_MSG:
+		anc_data[0] = tport->conn_type;
+		anc_data[1] = tport->conn_instance;
+		anc_data[2] = tport->conn_instance;
+		break;
+	default:
+		anc_data[0] = 0;
+	}
+	if (anc_data[0] &&
+	    (res = put_cmsg(m, SOL_SOCKET, TIPC_DESTNAME, 12, anc_data)))
+		return res;
+
+	return 0;
+}
+
+/** 
+ * recv_msg - receive packet-oriented message
+ * @iocb: (unused)
+ * @m: descriptor for message info
+ * @buf_len: total size of user buffer area
+ * @flags: receive flags
+ * 
+ * Used for SOCK_DGRAM, SOCK_RDM, and SOCK_SEQPACKET messages.
+ * If the complete message doesn't fit in user area, truncate it.
+ *
+ * Returns size of returned message data, errno otherwise
+ */
+
+static int recv_msg(struct kiocb *iocb, struct socket *sock,
+		    struct msghdr *m, size_t buf_len, int flags)
+{
+	struct tipc_sock *tsock = tipc_sk(sock->sk);
+	struct sk_buff *buf;
+	struct tipc_msg *msg;
+	unsigned int q_len;
+	unsigned int sz;
+	u32 err;
+	int res;
+
+	/* Currently doesn't support receiving into multiple iovec entries */
+
+	if (m->msg_iovlen != 1)
+		return -EOPNOTSUPP;
+
+	/* Catch invalid receive attempts */
+
+	if (unlikely(!buf_len))
+		return -EINVAL;
+
+	if (sock->type == SOCK_SEQPACKET) {
+		if (unlikely(sock->state == SS_UNCONNECTED))
+			return -ENOTCONN;
+		if (unlikely((sock->state == SS_DISCONNECTING) && 
+			     (skb_queue_len(&sock->sk->sk_receive_queue) == 0)))
+		       	return -ENOTCONN;
+	}
+
+	/* Look for a message in receive queue; wait if necessary */
+
+	if (unlikely(down_interruptible(&tsock->sem)))
+		return -ERESTARTSYS;
+
+restart:
+	if (unlikely((skb_queue_len(&sock->sk->sk_receive_queue) == 0) &&
+		     (flags & MSG_DONTWAIT))) {
+		res = -EWOULDBLOCK;
+		goto exit;
+	}
+
+	if ((res = wait_event_interruptible(
+		*sock->sk->sk_sleep, 
+		((q_len = skb_queue_len(&sock->sk->sk_receive_queue)) ||
+		 (sock->state == SS_DISCONNECTING))) )) {
+		goto exit;
+	}
+
+	/* Catch attempt to receive on an already terminated connection */
+	/* [THIS CHECK MAY OVERLAP WITH AN EARLIER CHECK] */
+
+	if (!q_len) {
+		res = -ENOTCONN;
+		goto exit;
+	}
+
+	/* Get access to first message in receive queue */
+
+	buf = skb_peek(&sock->sk->sk_receive_queue);
+	msg = buf_msg(buf);
+	sz = msg_data_sz(msg);
+	err = msg_errcode(msg);
+
+	/* Complete connection setup for an implied connect */
+
+	if (unlikely(sock->state == SS_CONNECTING)) {
+		if ((res = auto_connect(sock, tsock, msg)))
+			goto exit;
+	}
+
+	/* Discard an empty non-errored message & try again */
+
+	if ((!sz) && (!err)) {
+		advance_queue(tsock);
+		goto restart;
+	}
+
+	/* Capture sender's address (optional) */
+
+	set_orig_addr(m, msg);
+
+	/* Capture ancillary data (optional) */
+
+	if ((res = anc_data_recv(m, msg, tsock->p)))
+		goto exit;
+
+	/* Capture message data (if valid) & compute return value (always) */
+	
+	if (!err) {
+		if (unlikely(buf_len < sz)) {
+			sz = buf_len;
+			m->msg_flags |= MSG_TRUNC;
+		}
+		if (unlikely(copy_to_user(m->msg_iov->iov_base, msg_data(msg),
+					  sz))) {
+			res = -EFAULT;
+			goto exit;
+		}
+		res = sz;
+	} else {
+		if ((sock->state == SS_READY) ||
+		    ((err == TIPC_CONN_SHUTDOWN) || m->msg_control))
+			res = 0;
+		else
+			res = -ECONNRESET;
+	}
+
+	/* Consume received message (optional) */
+
+	if (likely(!(flags & MSG_PEEK))) {
+                if (unlikely(++tsock->p->conn_unacked >= TIPC_FLOW_CONTROL_WIN))
+                        tipc_acknowledge(tsock->p->ref, tsock->p->conn_unacked);
+		advance_queue(tsock);
+        }
+exit:
+	up(&tsock->sem);
+	return res;
+}
+
+/** 
+ * recv_stream - receive stream-oriented data
+ * @iocb: (unused)
+ * @m: descriptor for message info
+ * @buf_len: total size of user buffer area
+ * @flags: receive flags
+ * 
+ * Used for SOCK_STREAM messages only.  If not enough data is available 
+ * will optionally wait for more; never truncates data.
+ *
+ * Returns size of returned message data, errno otherwise
+ */
+
+static int recv_stream(struct kiocb *iocb, struct socket *sock,
+		       struct msghdr *m, size_t buf_len, int flags)
+{
+	struct tipc_sock *tsock = tipc_sk(sock->sk);
+	struct sk_buff *buf;
+	struct tipc_msg *msg;
+	unsigned int q_len;
+	unsigned int sz;
+	int sz_to_copy;
+	int sz_copied = 0;
+	int needed;
+	char *crs = m->msg_iov->iov_base;
+	unsigned char *buf_crs;
+	u32 err;
+	int res;
+
+	/* Currently doesn't support receiving into multiple iovec entries */
+
+	if (m->msg_iovlen != 1)
+		return -EOPNOTSUPP;
+
+	/* Catch invalid receive attempts */
+
+	if (unlikely(!buf_len))
+		return -EINVAL;
+
+	if (unlikely(sock->state == SS_DISCONNECTING)) {
+		if (skb_queue_len(&sock->sk->sk_receive_queue) == 0)
+			return -ENOTCONN;
+	} else if (unlikely(sock->state != SS_CONNECTED))
+		return -ENOTCONN;
+
+	/* Look for a message in receive queue; wait if necessary */
+
+	if (unlikely(down_interruptible(&tsock->sem)))
+		return -ERESTARTSYS;
+
+restart:
+	if (unlikely((skb_queue_len(&sock->sk->sk_receive_queue) == 0) &&
+		     (flags & MSG_DONTWAIT))) {
+		res = (sz_copied == 0) ? -EWOULDBLOCK : 0;
+		goto exit;
+	}
+
+	if ((res = wait_event_interruptible(
+		*sock->sk->sk_sleep, 
+		((q_len = skb_queue_len(&sock->sk->sk_receive_queue)) ||
+		 (sock->state == SS_DISCONNECTING))) )) {
+		goto exit;
+	}
+
+	/* Catch attempt to receive on an already terminated connection */
+	/* [THIS CHECK MAY OVERLAP WITH AN EARLIER CHECK] */
+
+	if (!q_len) {
+		res = -ENOTCONN;
+		goto exit;
+	}
+
+	/* Get access to first message in receive queue */
+
+	buf = skb_peek(&sock->sk->sk_receive_queue);
+	msg = buf_msg(buf);
+	sz = msg_data_sz(msg);
+	err = msg_errcode(msg);
+
+	/* Discard an empty non-errored message & try again */
+
+	if ((!sz) && (!err)) {
+		advance_queue(tsock);
+		goto restart;
+	}
+
+	/* Optionally capture sender's address & ancillary data of first msg */
+
+	if (sz_copied == 0) {
+		set_orig_addr(m, msg);
+		if ((res = anc_data_recv(m, msg, tsock->p)))
+			goto exit;
+	}
+
+	/* Capture message data (if valid) & compute return value (always) */
+	
+	if (!err) {
+		buf_crs = (unsigned char *)(TIPC_SKB_CB(buf)->handle);
+		sz = buf->tail - buf_crs;
+
+		needed = (buf_len - sz_copied);
+		sz_to_copy = (sz <= needed) ? sz : needed;
+		if (unlikely(copy_to_user(crs, buf_crs, sz_to_copy))) {
+			res = -EFAULT;
+			goto exit;
+		}
+		sz_copied += sz_to_copy;
+
+		if (sz_to_copy < sz) {
+			if (!(flags & MSG_PEEK))
+				TIPC_SKB_CB(buf)->handle = buf_crs + sz_to_copy;
+			goto exit;
+		}
+
+		crs += sz_to_copy;
+	} else {
+		if (sz_copied != 0)
+			goto exit; /* can't add error msg to valid data */
+
+		if ((err == TIPC_CONN_SHUTDOWN) || m->msg_control)
+			res = 0;
+		else
+			res = -ECONNRESET;
+	}
+
+	/* Consume received message (optional) */
+
+	if (likely(!(flags & MSG_PEEK))) {
+                if (unlikely(++tsock->p->conn_unacked >= TIPC_FLOW_CONTROL_WIN))
+                        tipc_acknowledge(tsock->p->ref, tsock->p->conn_unacked);
+		advance_queue(tsock);
+        }
+
+	/* Loop around if more data is required */
+
+	if ((sz_copied < buf_len)    /* didn't get all requested data */ 
+	    && (flags & MSG_WAITALL) /* ... and need to wait for more */
+	    && (!(flags & MSG_PEEK)) /* ... and aren't just peeking at data */
+	    && (!err)                /* ... and haven't reached a FIN */
+	    )
+		goto restart;
+
+exit:
+	up(&tsock->sem);
+	return res ? res : sz_copied;
+}
+
+/**
+ * queue_overloaded - test if queue overload condition exists
+ * @queue_size: current size of queue
+ * @base: nominal maximum size of queue
+ * @msg: message to be added to queue
+ * 
+ * Returns 1 if queue is currently overloaded, 0 otherwise
+ */
+
+static int queue_overloaded(u32 queue_size, u32 base, struct tipc_msg *msg)
+{
+	u32 threshold;
+	u32 imp = msg_importance(msg);
+
+	if (imp == TIPC_LOW_IMPORTANCE)
+		threshold = base;
+	else if (imp == TIPC_MEDIUM_IMPORTANCE)
+		threshold = base * 2;
+	else if (imp == TIPC_HIGH_IMPORTANCE)
+		threshold = base * 100;
+	else
+		return 0;
+
+	if (msg_connected(msg))
+		threshold *= 4;
+
+	return (queue_size > threshold);
+}
+
+/** 
+ * async_disconnect - wrapper function used to disconnect port
+ * @portref: TIPC port reference (passed as pointer-sized value)
+ */
+
+static void async_disconnect(unsigned long portref)
+{
+	tipc_disconnect((u32)portref);
+}
+
+/** 
+ * dispatch - handle arriving message
+ * @tport: TIPC port that received message
+ * @buf: message
+ * 
+ * Called with port locked.  Must not take socket lock to avoid deadlock risk.
+ * 
+ * Returns TIPC error status code (TIPC_OK if message is not to be rejected)
+ */
+
+static u32 dispatch(struct tipc_port *tport, struct sk_buff *buf)
+{
+	struct tipc_msg *msg = buf_msg(buf);
+	struct tipc_sock *tsock = (struct tipc_sock *)tport->usr_handle;
+	struct socket *sock;
+	u32 recv_q_len;
+
+	/* Reject message if socket is closing */
+
+	if (!tsock)
+		return TIPC_ERR_NO_PORT;
+
+	/* Reject message if it is wrong sort of message for socket */
+
+	/*
+	 * WOULD IT BE BETTER TO JUST DISCARD THESE MESSAGES INSTEAD?
+	 * "NO PORT" ISN'T REALLY THE RIGHT ERROR CODE, AND THERE MAY
+	 * BE SECURITY IMPLICATIONS INHERENT IN REJECTING INVALID TRAFFIC
+	 */
+	sock = tsock->sk.sk_socket;
+	if (sock->state == SS_READY) {
+		if (msg_connected(msg)) {
+			msg_dbg(msg, "dispatch filter 1\n");
+			return TIPC_ERR_NO_PORT;
+		}
+	} else {
+		if (msg_mcast(msg)) {
+			msg_dbg(msg, "dispatch filter 2\n");
+			return TIPC_ERR_NO_PORT;
+		}
+		if (sock->state == SS_CONNECTED) {
+			if (!msg_connected(msg)) {
+				msg_dbg(msg, "dispatch filter 3\n");
+				return TIPC_ERR_NO_PORT;
+			}
+		}
+		else if (sock->state == SS_CONNECTING) {
+			if (!msg_connected(msg) && (msg_errcode(msg) == 0)) {
+				msg_dbg(msg, "dispatch filter 4\n");
+				return TIPC_ERR_NO_PORT;
+			}
+		} 
+		else if (sock->state == SS_LISTENING) {
+			if (msg_connected(msg) || msg_errcode(msg)) {
+				msg_dbg(msg, "dispatch filter 5\n");
+				return TIPC_ERR_NO_PORT;
+			}
+		} 
+		else if (sock->state == SS_DISCONNECTING) {
+			msg_dbg(msg, "dispatch filter 6\n");
+			return TIPC_ERR_NO_PORT;
+		}
+		else /* (sock->state == SS_UNCONNECTED) */ {
+			if (msg_connected(msg) || msg_errcode(msg)) {
+				msg_dbg(msg, "dispatch filter 7\n");
+				return TIPC_ERR_NO_PORT;
+			}
+		}
+	}
+
+	/* Reject message if there isn't room to queue it */
+
+	if (unlikely((u32)atomic_read(&tipc_queue_size) > 
+		     OVERLOAD_LIMIT_BASE)) {
+		if (queue_overloaded(atomic_read(&tipc_queue_size), 
+				     OVERLOAD_LIMIT_BASE, msg))
+			return TIPC_ERR_OVERLOAD;
+        }
+	recv_q_len = skb_queue_len(&tsock->sk.sk_receive_queue);
+	if (unlikely(recv_q_len > (OVERLOAD_LIMIT_BASE / 2))) {
+		if (queue_overloaded(recv_q_len, 
+				     OVERLOAD_LIMIT_BASE / 2, msg)) 
+			return TIPC_ERR_OVERLOAD;
+        }
+
+	/* Initiate connection termination for an incoming 'FIN' */
+
+	if (unlikely(msg_errcode(msg) && (sock->state == SS_CONNECTED))) {
+		sock->state = SS_DISCONNECTING;
+		/* Note: Use signal since port lock is already taken! */
+		k_signal((Handler)async_disconnect, tport->ref);
+	}
+
+	/* Enqueue message (finally!) */
+
+	msg_dbg(msg,"<DISP<: ");
+	TIPC_SKB_CB(buf)->handle = msg_data(msg);
+	atomic_inc(&tipc_queue_size);
+	skb_queue_tail(&sock->sk->sk_receive_queue, buf);
+
+        wake_up_interruptible(sock->sk->sk_sleep);
+	return TIPC_OK;
+}
+
+/** 
+ * wakeupdispatch - wake up port after congestion
+ * @tport: port to wakeup
+ * 
+ * Called with port lock on.
+ */
+
+static void wakeupdispatch(struct tipc_port *tport)
+{
+	struct tipc_sock *tsock = (struct tipc_sock *)tport->usr_handle;
+
+        wake_up_interruptible(tsock->sk.sk_sleep);
+}
+
+/**
+ * connect - establish a connection to another TIPC port
+ * @sock: socket structure
+ * @dest: socket address for destination port
+ * @destlen: size of socket address data structure
+ * @flags: (unused)
+ *
+ * Returns 0 on success, errno otherwise
+ */
+
+static int connect(struct socket *sock, struct sockaddr *dest, int destlen, 
+		   int flags)
+{
+   struct tipc_sock *tsock = tipc_sk(sock->sk);
+   struct sockaddr_tipc *dst = (struct sockaddr_tipc *)dest;
+   struct msghdr m = {0,};
+   struct sk_buff *buf;
+   struct tipc_msg *msg;
+   int res;
+
+   /* For now, TIPC does not allow use of connect() with DGRAM or RDM types */
+
+   if (sock->state == SS_READY)
+	   return -EOPNOTSUPP;
+
+   /* MOVE THE REST OF THIS ERROR CHECKING TO send_msg()? */
+   if (sock->state == SS_LISTENING)
+	   return -EOPNOTSUPP;
+   if (sock->state == SS_CONNECTING)
+	   return -EALREADY;
+   if (sock->state != SS_UNCONNECTED)
+           return -EISCONN;
+
+   if ((dst->family != AF_TIPC) ||
+       ((dst->addrtype != TIPC_ADDR_NAME) && (dst->addrtype != TIPC_ADDR_ID)))
+           return -EINVAL;
+
+   /* Send a 'SYN-' to destination */
+
+   m.msg_name = dest;
+   if ((res = send_msg(0, sock, &m, 0)) < 0) {
+	   sock->state = SS_DISCONNECTING;
+	   return res;
+   }
+
+   if (down_interruptible(&tsock->sem)) 
+           return -ERESTARTSYS;
+	
+   /* Wait for destination's 'ACK' response */
+
+   res = wait_event_interruptible_timeout(*sock->sk->sk_sleep,
+                                          skb_queue_len(&sock->sk->sk_receive_queue),
+					  sock->sk->sk_rcvtimeo);
+   buf = skb_peek(&sock->sk->sk_receive_queue);
+   if (res > 0) {
+	   msg = buf_msg(buf);
+           res = auto_connect(sock, tsock, msg);
+           if (!res) {
+		   if (dst->addrtype == TIPC_ADDR_NAME) {
+			   tsock->p->conn_type = dst->addr.name.name.type;
+			   tsock->p->conn_instance = dst->addr.name.name.instance;
+		   }
+		   if (!msg_data_sz(msg))
+			   advance_queue(tsock);
+	   }
+   } else {
+	   if (res == 0) {
+		   res = -ETIMEDOUT;
+	   } else
+	           { /* leave "res" unchanged */ }
+	   sock->state = SS_DISCONNECTING;
+   }
+
+   up(&tsock->sem);
+   return res;
+}
+
+/** 
+ * listen - allow socket to listen for incoming connections
+ * @sock: socket structure
+ * @len: (unused)
+ * 
+ * Returns 0 on success, errno otherwise
+ */
+
+static int listen(struct socket *sock, int len)
+{
+	/* REQUIRES SOCKET LOCKING OF SOME SORT? */
+
+	if (sock->state == SS_READY)
+		return -EOPNOTSUPP;
+	if (sock->state != SS_UNCONNECTED)
+		return -EINVAL;
+	sock->state = SS_LISTENING;
+        return 0;
+}
+
+/** 
+ * accept - wait for connection request
+ * @sock: listening socket
+ * @newsock: new socket that is to be connected
+ * @flags: file-related flags associated with socket
+ * 
+ * Returns 0 on success, errno otherwise
+ */
+
+static int accept(struct socket *sock, struct socket *newsock, int flags)
+{
+	struct tipc_sock *tsock = tipc_sk(sock->sk);
+	struct sk_buff *buf;
+	int res = -EFAULT;
+
+	if (sock->state == SS_READY)
+		return -EOPNOTSUPP;
+	if (sock->state != SS_LISTENING)
+		return -EINVAL;
+	
+	if (unlikely((skb_queue_len(&sock->sk->sk_receive_queue) == 0) && 
+		     (flags & O_NONBLOCK)))
+		return -EWOULDBLOCK;
+
+	if (down_interruptible(&tsock->sem))
+		return -ERESTARTSYS;
+
+	if (wait_event_interruptible(*sock->sk->sk_sleep, 
+				     skb_queue_len(&sock->sk->sk_receive_queue))) {
+		res = -ERESTARTSYS;
+		goto exit;
+	}
+	buf = skb_peek(&sock->sk->sk_receive_queue);
+
+	res = tipc_create(newsock, 0);
+	if (!res) {
+		struct tipc_sock *new_tsock = tipc_sk(newsock->sk);
+		struct tipc_portid id;
+		struct tipc_msg *msg = buf_msg(buf);
+		u32 new_ref = new_tsock->p->ref;
+
+		id.ref = msg_origport(msg);
+		id.node = msg_orignode(msg);
+		tipc_connect2port(new_ref, &id);
+		newsock->state = SS_CONNECTED;
+
+		tipc_set_portimportance(new_ref, msg_importance(msg));
+		if (msg_named(msg)) {
+			new_tsock->p->conn_type = msg_nametype(msg);
+			new_tsock->p->conn_instance = msg_nameinst(msg);
+		}
+
+               /* 
+		 * Respond to 'SYN-' by discarding it & returning 'ACK'-.
+		 * Respond to 'SYN+' by queuing it on new socket.
+		 */
+
+		msg_dbg(msg,"<ACC<: ");
+                if (!msg_data_sz(msg)) {
+                        struct msghdr m = {0,};
+
+                        send_packet(0, newsock, &m, 0);      
+                        advance_queue(tsock);
+                } else {
+			sock_lock(tsock);
+			skb_dequeue(&sock->sk->sk_receive_queue);
+			sock_unlock(tsock);
+			skb_queue_head(&newsock->sk->sk_receive_queue, buf);
+		}
+	}
+exit:
+	up(&tsock->sem);
+	return res;
+}
+
+/**
+ * shutdown - shutdown socket connection
+ * @sock: socket structure
+ * @how: direction to close (always treated as read + write)
+ *
+ * Terminates connection (if necessary), then purges socket's receive queue.
+ * 
+ * Returns 0 on success, errno otherwise
+ */
+
+static int shutdown(struct socket *sock, int how)
+{
+	struct tipc_sock* tsock = tipc_sk(sock->sk);
+	struct sk_buff *buf;
+	int res;
+
+	/* Could return -EINVAL for an invalid "how", but why bother? */
+
+	if (down_interruptible(&tsock->sem))
+		return -ERESTARTSYS;
+
+	sock_lock(tsock);
+
+	switch (sock->state) {
+	case SS_CONNECTED:
+
+		/* Send 'FIN+' or 'FIN-' message to peer */
+
+		sock_unlock(tsock);
+restart:
+		if ((buf = skb_dequeue(&sock->sk->sk_receive_queue))) {
+			atomic_dec(&tipc_queue_size);
+			if (TIPC_SKB_CB(buf)->handle != msg_data(buf_msg(buf))) {
+				buf_discard(buf);
+				goto restart;
+			}
+			tipc_reject_msg(buf, TIPC_CONN_SHUTDOWN);
+		}
+		else {
+			tipc_shutdown(tsock->p->ref);
+		}
+		sock_lock(tsock);
+
+		/* fall through */
+
+	case SS_DISCONNECTING:
+
+		/* Discard any unreceived messages */
+
+		while ((buf = skb_dequeue(&sock->sk->sk_receive_queue))) {
+			atomic_dec(&tipc_queue_size);
+			buf_discard(buf);
+		}
+		tsock->p->conn_unacked = 0;
+
+		/* fall through */
+
+	case SS_CONNECTING:
+		sock->state = SS_DISCONNECTING;
+		res = 0;
+		break;
+
+	default:
+		res = -ENOTCONN;
+	}
+
+	sock_unlock(tsock);
+
+	up(&tsock->sem);
+	return res;
+}
+
+/**
+ * setsockopt - set socket option
+ * @sock: socket structure
+ * @lvl: option level
+ * @opt: option identifier
+ * @ov: pointer to new option value
+ * @ol: length of option value
+ * 
+ * For stream sockets only, accepts and ignores all IPPROTO_TCP options 
+ * (to ease compatibility).
+ * 
+ * Returns 0 on success, errno otherwise
+ */
+
+static int setsockopt(struct socket *sock, int lvl, int opt, char *ov, int ol)
+{
+	struct tipc_sock *tsock = tipc_sk(sock->sk);
+	u32 value;
+	int res;
+
+        if ((lvl == IPPROTO_TCP) && (sock->type == SOCK_STREAM))
+                return 0;
+	if (lvl != SOL_TIPC)
+		return -ENOPROTOOPT;
+	if (ol < sizeof(value))
+		return -EINVAL;
+        if ((res = get_user(value, (u32 *)ov)))
+		return res;
+
+	if (down_interruptible(&tsock->sem)) 
+		return -ERESTARTSYS;
+	
+	switch (opt) {
+	case TIPC_IMPORTANCE:
+		res = tipc_set_portimportance(tsock->p->ref, value);
+		break;
+	case TIPC_SRC_DROPPABLE:
+		if (sock->type != SOCK_STREAM)
+			res = tipc_set_portunreliable(tsock->p->ref, value);
+		else 
+			res = -ENOPROTOOPT;
+		break;
+	case TIPC_DEST_DROPPABLE:
+		res = tipc_set_portunreturnable(tsock->p->ref, value);
+		break;
+	case TIPC_CONN_TIMEOUT:
+		sock->sk->sk_rcvtimeo = (value * HZ / 1000);
+		break;
+	default:
+		res = -EINVAL;
+	}
+
+	up(&tsock->sem);
+	return res;
+}
+
+/**
+ * getsockopt - get socket option
+ * @sock: socket structure
+ * @lvl: option level
+ * @opt: option identifier
+ * @ov: receptacle for option value
+ * @ol: receptacle for length of option value
+ * 
+ * For stream sockets only, returns 0 length result for all IPPROTO_TCP options 
+ * (to ease compatibility).
+ * 
+ * Returns 0 on success, errno otherwise
+ */
+
+static int getsockopt(struct socket *sock, int lvl, int opt, char *ov, int *ol)
+{
+	struct tipc_sock *tsock = tipc_sk(sock->sk);
+        int len;
+	u32 value;
+        int res;
+
+        if ((lvl == IPPROTO_TCP) && (sock->type == SOCK_STREAM))
+                return put_user(0, ol);
+	if (lvl != SOL_TIPC)
+		return -ENOPROTOOPT;
+        if ((res = get_user(len, ol)))
+                return res;
+
+	if (down_interruptible(&tsock->sem)) 
+		return -ERESTARTSYS;
+
+	switch (opt) {
+	case TIPC_IMPORTANCE:
+		res = tipc_portimportance(tsock->p->ref, &value);
+		break;
+	case TIPC_SRC_DROPPABLE:
+		res = tipc_portunreliable(tsock->p->ref, &value);
+		break;
+	case TIPC_DEST_DROPPABLE:
+		res = tipc_portunreturnable(tsock->p->ref, &value);
+		break;
+	case TIPC_CONN_TIMEOUT:
+		value = (sock->sk->sk_rcvtimeo * 1000) / HZ;
+		break;
+	default:
+		res = -EINVAL;
+	}
+
+	if (res) {
+		/* "get" failed */
+	}
+	else if (len < sizeof(value)) {
+		res = -EINVAL;
+	}
+	else if ((res = copy_to_user(ov, &value, sizeof(value)))) {
+		/* couldn't return value */
+	}
+	else {
+		res = put_user(sizeof(value), ol);
+	}
+
+        up(&tsock->sem);
+	return res;
+}
+
+/**
+ * Placeholders for non-implemented functionality
+ * 
+ * Returns error code (POSIX-compliant where defined)
+ */
+
+static int ioctl(struct socket *s, u32 cmd, unsigned long arg)
+{
+        return -EINVAL;
+}
+
+static int no_mmap(struct file *file, struct socket *sock,
+                   struct vm_area_struct *vma)
+{
+        return -EINVAL;
+}
+static ssize_t no_sendpage(struct socket *sock, struct page *page,
+                           int offset, size_t size, int flags)
+{
+        return -EINVAL;
+}
+
+static int no_skpair(struct socket *s1, struct socket *s2)
+{
+	return -EOPNOTSUPP;
+}
+
+/**
+ * Protocol switches for the various types of TIPC sockets
+ */
+
+static struct proto_ops msg_ops = {
+	.owner 		= THIS_MODULE,
+	.family		= AF_TIPC,
+	.release	= release,
+	.bind		= bind,
+	.connect	= connect,
+	.socketpair	= no_skpair,
+	.accept		= accept,
+	.getname	= get_name,
+	.poll		= poll,
+	.ioctl		= ioctl,
+	.listen		= listen,
+	.shutdown	= shutdown,
+	.setsockopt	= setsockopt,
+	.getsockopt	= getsockopt,
+	.sendmsg	= send_msg,
+	.recvmsg	= recv_msg,
+        .mmap		= no_mmap,
+        .sendpage	= no_sendpage
+};
+
+static struct proto_ops packet_ops = {
+	.owner 		= THIS_MODULE,
+	.family		= AF_TIPC,
+	.release	= release,
+	.bind		= bind,
+	.connect	= connect,
+	.socketpair	= no_skpair,
+	.accept		= accept,
+	.getname	= get_name,
+	.poll		= poll,
+	.ioctl		= ioctl,
+	.listen		= listen,
+	.shutdown	= shutdown,
+	.setsockopt	= setsockopt,
+	.getsockopt	= getsockopt,
+	.sendmsg	= send_packet,
+	.recvmsg	= recv_msg,
+        .mmap		= no_mmap,
+        .sendpage	= no_sendpage
+};
+
+static struct proto_ops stream_ops = {
+	.owner 		= THIS_MODULE,
+	.family		= AF_TIPC,
+	.release	= release,
+	.bind		= bind,
+	.connect	= connect,
+	.socketpair	= no_skpair,
+	.accept		= accept,
+	.getname	= get_name,
+	.poll		= poll,
+	.ioctl		= ioctl,
+	.listen		= listen,
+	.shutdown	= shutdown,
+	.setsockopt	= setsockopt,
+	.getsockopt	= getsockopt,
+	.sendmsg	= send_stream,
+	.recvmsg	= recv_stream,
+        .mmap		= no_mmap,
+        .sendpage	= no_sendpage
+};
+
+static struct net_proto_family tipc_family_ops = {
+	.owner 		= THIS_MODULE,
+	.family		= AF_TIPC,
+	.create		= tipc_create
+};
+
+static struct proto tipc_proto = {
+	.name		= "TIPC",
+	.owner		= THIS_MODULE,
+	.obj_size	= sizeof(struct tipc_sock)
+};
+
+/**
+ * socket_init - initialize TIPC socket interface
+ * 
+ * Returns 0 on success, errno otherwise
+ */
+int socket_init(void)
+{
+	int res;
+
+        res = proto_register(&tipc_proto, 1);
+	if (res) {
+		err("Unable to register TIPC protocol type\n");
+		goto out;
+	}
+
+	res = sock_register(&tipc_family_ops);
+	if (res) {
+		err("Unable to register TIPC socket type\n");
+		proto_unregister(&tipc_proto);
+		goto out;
+	}
+
+	sockets_enabled = 1;
+ out:
+	return res;
+}
+
+/**
+ * sock_stop - stop TIPC socket interface
+ */
+void socket_stop(void)
+{
+	if (!sockets_enabled)
+		return;
+
+	sockets_enabled = 0;
+	sock_unregister(tipc_family_ops.family);
+	proto_unregister(&tipc_proto);
+}
+
diff --git a/net/tipc/subscr.c b/net/tipc/subscr.c
new file mode 100644
index 0000000..6f651a2
--- /dev/null
+++ b/net/tipc/subscr.c
@@ -0,0 +1,522 @@
+/*
+ * net/tipc/subscr.c: TIPC subscription service
+ * 
+ * Copyright (c) 2003-2005, Ericsson Research Canada
+ * Copyright (c) 2005, Wind River Systems
+ * Copyright (c) 2005-2006, Ericsson AB
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without 
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this 
+ * list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright notice, 
+ * this list of conditions and the following disclaimer in the documentation 
+ * and/or other materials provided with the distribution.
+ * Neither the names of the copyright holders nor the names of its 
+ * contributors may be used to endorse or promote products derived from this 
+ * software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "core.h"
+#include "dbg.h"
+#include "subscr.h"
+#include "name_table.h"
+#include "ref.h"
+
+/**
+ * struct subscriber - TIPC network topology subscriber
+ * @ref: object reference to subscriber object itself
+ * @lock: pointer to spinlock controlling access to subscriber object
+ * @subscriber_list: adjacent subscribers in top. server's list of subscribers
+ * @subscription_list: list of subscription objects for this subscriber
+ * @port_ref: object reference to port used to communicate with subscriber
+ * @swap: indicates if subscriber uses opposite endianness in its messages
+ */
+ 
+struct subscriber {
+	u32 ref;
+        spinlock_t *lock;
+	struct list_head subscriber_list;
+	struct list_head subscription_list;
+	u32 port_ref;
+	int swap;
+};
+
+/**
+ * struct top_srv - TIPC network topology subscription service
+ * @user_ref: TIPC userid of subscription service
+ * @setup_port: reference to TIPC port that handles subscription requests
+ * @subscription_count: number of active subscriptions (not subscribers!)
+ * @subscriber_list: list of ports subscribing to service
+ * @lock: spinlock govering access to subscriber list
+ */
+
+struct top_srv {
+	u32 user_ref;
+	u32 setup_port;
+	atomic_t subscription_count;
+	struct list_head subscriber_list;
+	spinlock_t lock;
+};
+
+static struct top_srv topsrv = { 0 };
+
+/**
+ * htohl - convert value to endianness used by destination
+ * @in: value to convert
+ * @swap: non-zero if endianness must be reversed
+ * 
+ * Returns converted value
+ */
+
+static inline u32 htohl(u32 in, int swap)
+{
+	char *c = (char *)&in;
+
+	return swap ? ((c[3] << 3) + (c[2] << 2) + (c[1] << 1) + c[0]) : in;
+}
+
+/**
+ * subscr_send_event - send a message containing a tipc_event to the subscriber
+ */
+
+static void subscr_send_event(struct subscription *sub, 
+			      u32 found_lower, 
+			      u32 found_upper,
+			      u32 event, 
+			      u32 port_ref, 
+			      u32 node)
+{
+	struct iovec msg_sect;
+
+	msg_sect.iov_base = (void *)&sub->evt;
+	msg_sect.iov_len = sizeof(struct tipc_event);
+
+	sub->evt.event = htohl(event, sub->owner->swap);
+	sub->evt.found_lower = htohl(found_lower, sub->owner->swap);
+	sub->evt.found_upper = htohl(found_upper, sub->owner->swap);
+	sub->evt.port.ref = htohl(port_ref, sub->owner->swap);
+	sub->evt.port.node = htohl(node, sub->owner->swap);
+	tipc_send(sub->owner->port_ref, 1, &msg_sect);
+}
+
+/**
+ * subscr_overlap - test for subscription overlap with the given values
+ *
+ * Returns 1 if there is overlap, otherwise 0.
+ */
+
+int subscr_overlap(struct subscription *sub, 
+		   u32 found_lower, 
+		   u32 found_upper)
+
+{
+	if (found_lower < sub->seq.lower)
+		found_lower = sub->seq.lower;
+	if (found_upper > sub->seq.upper)
+		found_upper = sub->seq.upper;
+	if (found_lower > found_upper)
+		return 0;
+	return 1;
+}
+
+/**
+ * subscr_report_overlap - issue event if there is subscription overlap
+ * 
+ * Protected by nameseq.lock in name_table.c
+ */
+
+void subscr_report_overlap(struct subscription *sub, 
+			   u32 found_lower, 
+			   u32 found_upper,
+			   u32 event, 
+			   u32 port_ref, 
+			   u32 node,
+			   int must)
+{
+	dbg("Rep overlap %u:%u,%u<->%u,%u\n", sub->seq.type, sub->seq.lower,
+	    sub->seq.upper, found_lower, found_upper);
+	if (!subscr_overlap(sub, found_lower, found_upper))
+		return;
+	if (!must && (sub->filter != TIPC_SUB_PORTS))
+		return;
+	subscr_send_event(sub, found_lower, found_upper, event, port_ref, node);
+}
+
+/**
+ * subscr_timeout - subscription timeout has occurred
+ */
+
+static void subscr_timeout(struct subscription *sub)
+{
+	struct subscriber *subscriber;
+	u32 subscriber_ref;
+
+	/* Validate subscriber reference (in case subscriber is terminating) */
+
+	subscriber_ref = sub->owner->ref;
+	subscriber = (struct subscriber *)ref_lock(subscriber_ref);
+	if (subscriber == NULL)
+		return;
+
+	/* Unlink subscription from name table */
+
+	nametbl_unsubscribe(sub);
+
+	/* Notify subscriber of timeout, then unlink subscription */
+
+	subscr_send_event(sub, 
+			  sub->evt.s.seq.lower, 
+			  sub->evt.s.seq.upper,
+			  TIPC_SUBSCR_TIMEOUT, 
+			  0, 
+			  0);
+	list_del(&sub->subscription_list);
+
+	/* Now destroy subscription */
+
+	ref_unlock(subscriber_ref);
+	k_term_timer(&sub->timer);
+	kfree(sub);
+	atomic_dec(&topsrv.subscription_count);
+}
+
+/**
+ * subscr_terminate - terminate communication with a subscriber
+ * 
+ * Called with subscriber locked.  Routine must temporarily release this lock
+ * to enable subscription timeout routine(s) to finish without deadlocking; 
+ * the lock is then reclaimed to allow caller to release it upon return.
+ * (This should work even in the unlikely event some other thread creates 
+ * a new object reference in the interim that uses this lock; this routine will
+ * simply wait for it to be released, then claim it.)
+ */
+
+static void subscr_terminate(struct subscriber *subscriber)
+{
+	struct subscription *sub;
+	struct subscription *sub_temp;
+
+	/* Invalidate subscriber reference */
+
+	ref_discard(subscriber->ref);
+	spin_unlock_bh(subscriber->lock);
+
+	/* Destroy any existing subscriptions for subscriber */
+	
+	list_for_each_entry_safe(sub, sub_temp, &subscriber->subscription_list,
+				 subscription_list) {
+		if (sub->timeout != TIPC_WAIT_FOREVER) {
+			k_cancel_timer(&sub->timer);
+			k_term_timer(&sub->timer);
+		}
+		nametbl_unsubscribe(sub);
+		list_del(&sub->subscription_list);
+		dbg("Term: Removed sub %u,%u,%u from subscriber %x list\n",
+		    sub->seq.type, sub->seq.lower, sub->seq.upper, subscriber);
+		kfree(sub);
+		atomic_dec(&topsrv.subscription_count);
+	}
+
+	/* Sever connection to subscriber */
+
+	tipc_shutdown(subscriber->port_ref);
+	tipc_deleteport(subscriber->port_ref);
+
+	/* Remove subscriber from topology server's subscriber list */
+
+	spin_lock_bh(&topsrv.lock);
+	list_del(&subscriber->subscriber_list);
+	spin_unlock_bh(&topsrv.lock);
+
+	/* Now destroy subscriber */
+
+	spin_lock_bh(subscriber->lock);
+	kfree(subscriber);
+}
+
+/**
+ * subscr_subscribe - create subscription for subscriber
+ * 
+ * Called with subscriber locked
+ */
+
+static void subscr_subscribe(struct tipc_subscr *s,
+			     struct subscriber *subscriber)
+{
+	struct subscription *sub;
+
+	/* Refuse subscription if global limit exceeded */
+
+	if (atomic_read(&topsrv.subscription_count) >= tipc_max_subscriptions) {
+		warn("Failed: max %u subscriptions\n", tipc_max_subscriptions);
+		subscr_terminate(subscriber);
+		return;
+	}
+
+	/* Allocate subscription object */
+
+	sub = kmalloc(sizeof(*sub), GFP_ATOMIC);
+	if (sub == NULL) {
+		warn("Memory squeeze; ignoring subscription\n");
+		subscr_terminate(subscriber);
+		return;
+	}
+
+	/* Determine/update subscriber's endianness */
+
+	if ((s->filter == TIPC_SUB_PORTS) || (s->filter == TIPC_SUB_SERVICE))
+		subscriber->swap = 0;
+	else
+		subscriber->swap = 1;
+
+	/* Initialize subscription object */
+
+	memset(sub, 0, sizeof(*sub));
+	sub->seq.type = htohl(s->seq.type, subscriber->swap);
+	sub->seq.lower = htohl(s->seq.lower, subscriber->swap);
+	sub->seq.upper = htohl(s->seq.upper, subscriber->swap);
+	sub->timeout = htohl(s->timeout, subscriber->swap);
+	sub->filter = htohl(s->filter, subscriber->swap);
+	if ((((sub->filter != TIPC_SUB_PORTS) 
+	      && (sub->filter != TIPC_SUB_SERVICE)))
+	    || (sub->seq.lower > sub->seq.upper)) {
+		warn("Rejecting illegal subscription %u,%u,%u\n",
+		     sub->seq.type, sub->seq.lower, sub->seq.upper);
+		kfree(sub);
+		subscr_terminate(subscriber);
+		return;
+	}
+	memcpy(&sub->evt.s, s, sizeof(struct tipc_subscr));
+	INIT_LIST_HEAD(&sub->subscription_list);
+	INIT_LIST_HEAD(&sub->nameseq_list);
+	list_add(&sub->subscription_list, &subscriber->subscription_list);
+	atomic_inc(&topsrv.subscription_count);
+	if (sub->timeout != TIPC_WAIT_FOREVER) {
+		k_init_timer(&sub->timer,
+			     (Handler)subscr_timeout, (unsigned long)sub);
+		k_start_timer(&sub->timer, sub->timeout);
+	}
+	sub->owner = subscriber;
+	nametbl_subscribe(sub);
+}
+
+/**
+ * subscr_conn_shutdown_event - handle termination request from subscriber
+ */
+
+static void subscr_conn_shutdown_event(void *usr_handle,
+				       u32 portref,
+				       struct sk_buff **buf,
+				       unsigned char const *data,
+				       unsigned int size,
+				       int reason)
+{
+	struct subscriber *subscriber = ref_lock((u32)usr_handle);
+	spinlock_t *subscriber_lock;
+
+	if (subscriber == NULL)
+		return;
+
+	subscriber_lock = subscriber->lock;
+	subscr_terminate(subscriber);
+	spin_unlock_bh(subscriber_lock);
+}
+
+/**
+ * subscr_conn_msg_event - handle new subscription request from subscriber
+ */
+
+static void subscr_conn_msg_event(void *usr_handle,
+				  u32 port_ref,
+				  struct sk_buff **buf,
+				  const unchar *data,
+				  u32 size)
+{
+	struct subscriber *subscriber = ref_lock((u32)usr_handle);
+	spinlock_t *subscriber_lock;
+
+	if (subscriber == NULL)
+		return;
+
+	subscriber_lock = subscriber->lock;
+	if (size != sizeof(struct tipc_subscr))
+		subscr_terminate(subscriber);
+	else
+		subscr_subscribe((struct tipc_subscr *)data, subscriber);
+	
+	spin_unlock_bh(subscriber_lock);
+}
+
+/**
+ * subscr_named_msg_event - handle request to establish a new subscriber
+ */
+
+static void subscr_named_msg_event(void *usr_handle,
+				   u32 port_ref,
+				   struct sk_buff **buf,
+				   const unchar *data,
+				   u32 size,
+				   u32 importance, 
+				   struct tipc_portid const *orig,
+				   struct tipc_name_seq const *dest)
+{
+	struct subscriber *subscriber;
+	struct iovec msg_sect = {0, 0};
+	spinlock_t *subscriber_lock;
+
+	dbg("subscr_named_msg_event: orig = %x own = %x,\n",
+	    orig->node, tipc_own_addr);
+	if (size && (size != sizeof(struct tipc_subscr))) {
+		warn("Received tipc_subscr of invalid size\n");
+		return;
+	}
+
+	/* Create subscriber object */
+
+	subscriber = kmalloc(sizeof(struct subscriber), GFP_ATOMIC);
+	if (subscriber == NULL) {
+		warn("Memory squeeze; ignoring subscriber setup\n");
+		return;
+	}
+	memset(subscriber, 0, sizeof(struct subscriber));
+	INIT_LIST_HEAD(&subscriber->subscription_list);
+	INIT_LIST_HEAD(&subscriber->subscriber_list);
+	subscriber->ref = ref_acquire(subscriber, &subscriber->lock);
+	if (subscriber->ref == 0) {
+		warn("Failed to acquire subscriber reference\n");
+		kfree(subscriber);
+		return;
+	}
+
+	/* Establish a connection to subscriber */
+
+	tipc_createport(topsrv.user_ref,
+			(void *)subscriber->ref,
+			importance,
+			0,
+			0,
+			subscr_conn_shutdown_event,
+			0,
+			0,
+			subscr_conn_msg_event,
+			0,
+			&subscriber->port_ref);
+	if (subscriber->port_ref == 0) {
+		warn("Memory squeeze; failed to create subscription port\n");
+		ref_discard(subscriber->ref);
+		kfree(subscriber);
+		return;
+	}
+	tipc_connect2port(subscriber->port_ref, orig);
+
+
+	/* Add subscriber to topology server's subscriber list */
+
+	ref_lock(subscriber->ref);
+	spin_lock_bh(&topsrv.lock);
+	list_add(&subscriber->subscriber_list, &topsrv.subscriber_list);
+	spin_unlock_bh(&topsrv.lock);
+
+	/*
+	 * Subscribe now if message contains a subscription,
+	 * otherwise send an empty response to complete connection handshaking
+	 */
+
+	subscriber_lock = subscriber->lock;
+	if (size)
+		subscr_subscribe((struct tipc_subscr *)data, subscriber);
+	else
+		tipc_send(subscriber->port_ref, 1, &msg_sect);
+
+	spin_unlock_bh(subscriber_lock);
+}
+
+int subscr_start(void)
+{
+	struct tipc_name_seq seq = {TIPC_TOP_SRV, TIPC_TOP_SRV, TIPC_TOP_SRV};
+	int res = -1;
+
+	memset(&topsrv, 0, sizeof (topsrv));
+	topsrv.lock = SPIN_LOCK_UNLOCKED;
+	INIT_LIST_HEAD(&topsrv.subscriber_list);
+
+	spin_lock_bh(&topsrv.lock);
+	res = tipc_attach(&topsrv.user_ref, 0, 0);
+	if (res) {
+		spin_unlock_bh(&topsrv.lock);
+		return res;
+	}
+
+ 	res = tipc_createport(topsrv.user_ref,
+ 			      0,
+ 			      TIPC_CRITICAL_IMPORTANCE,
+ 			      0,
+ 			      0,
+ 			      0,
+ 			      0,
+ 			      subscr_named_msg_event,
+ 			      0,
+ 			      0,
+ 			      &topsrv.setup_port);
+ 	if (res)
+		goto failed;
+
+ 	res = nametbl_publish_rsv(topsrv.setup_port, TIPC_NODE_SCOPE, &seq);
+ 	if (res)
+		goto failed;
+
+	spin_unlock_bh(&topsrv.lock);
+	return 0;
+
+failed:
+	err("Unable to create subscription service\n");
+	tipc_detach(topsrv.user_ref);
+	topsrv.user_ref = 0;
+	spin_unlock_bh(&topsrv.lock);
+	return res;
+}
+
+void subscr_stop(void)
+{
+	struct subscriber *subscriber;
+	struct subscriber *subscriber_temp;
+	spinlock_t *subscriber_lock;
+
+	if (topsrv.user_ref) {
+		tipc_deleteport(topsrv.setup_port);
+		list_for_each_entry_safe(subscriber, subscriber_temp, 
+					 &topsrv.subscriber_list,
+					 subscriber_list) {
+			ref_lock(subscriber->ref);
+			subscriber_lock = subscriber->lock;
+			subscr_terminate(subscriber);
+			spin_unlock_bh(subscriber_lock);
+		}
+		tipc_detach(topsrv.user_ref);
+		topsrv.user_ref = 0;
+	}
+}
+
+
+int tipc_ispublished(struct tipc_name const *name)
+{
+	u32 domain = 0;
+
+	return(nametbl_translate(name->type, name->instance,&domain) != 0);
+}
+
diff --git a/net/tipc/subscr.h b/net/tipc/subscr.h
new file mode 100644
index 0000000..e6cb9c3
--- /dev/null
+++ b/net/tipc/subscr.h
@@ -0,0 +1,77 @@
+/*
+ * net/tipc/subscr.h: Include file for TIPC subscription service
+ * 
+ * Copyright (c) 2003-2005, Ericsson Research Canada
+ * Copyright (c) 2005, Wind River Systems
+ * Copyright (c) 2005-2006, Ericsson AB
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without 
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this 
+ * list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright notice, 
+ * this list of conditions and the following disclaimer in the documentation 
+ * and/or other materials provided with the distribution.
+ * Neither the names of the copyright holders nor the names of its 
+ * contributors may be used to endorse or promote products derived from this 
+ * software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _TIPC_SUBSCR_H
+#define _TIPC_SUBSCR_H
+
+/**
+ * struct subscription - TIPC network topology subscription object
+ * @seq: name sequence associated with subscription
+ * @timeout: duration of subscription (in ms)
+ * @filter: event filtering to be done for subscription
+ * @evt: template for events generated by subscription
+ * @subscription_list: adjacent subscriptions in subscriber's subscription list
+ * @nameseq_list: adjacent subscriptions in name sequence's subscription list
+ * @timer_ref: reference to timer governing subscription duration (may be NULL)
+ * @owner: pointer to subscriber object associated with this subscription
+ */
+ 
+struct subscription {
+	struct tipc_name_seq seq;
+	u32 timeout;
+	u32 filter;
+	struct tipc_event evt;
+	struct list_head subscription_list;
+	struct list_head nameseq_list;
+	struct timer_list timer;
+	struct subscriber *owner;
+};
+
+int subscr_overlap(struct subscription * sub, 
+		   u32 found_lower, 
+		   u32 found_upper);
+
+void subscr_report_overlap(struct subscription * sub, 
+			   u32 found_lower, 
+			   u32 found_upper,
+			   u32 event, 
+			   u32 port_ref, 
+			   u32 node,
+			   int must_report);
+
+int subscr_start(void);
+
+void subscr_stop(void);
+
+
+#endif
diff --git a/net/tipc/user_reg.c b/net/tipc/user_reg.c
new file mode 100644
index 0000000..e4da5be
--- /dev/null
+++ b/net/tipc/user_reg.c
@@ -0,0 +1,262 @@
+/*
+ * net/tipc/user_reg.c: TIPC user registry code
+ * 
+ * Copyright (c) 2003-2005, Ericsson Research Canada
+ * Copyright (c) 2004-2005, Wind River Systems
+ * Copyright (c) 2005-2006, Ericsson AB
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * 
+ * Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * Neither the names of the copyright holders nor the names of its 
+ * contributors may be used to endorse or promote products derived from this
+ * software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "core.h"
+#include "user_reg.h"
+
+/*
+ * TIPC user registry keeps track of users of the tipc_port interface.
+ *
+ * The registry utilizes an array of "TIPC user" entries; 
+ * a user's ID is the index of their associated array entry.
+ * Array entry 0 is not used, so userid 0 is not valid;
+ * TIPC sometimes uses this value to denote an anonymous user.
+ * The list of free entries is initially chained from last entry to entry 1.
+ */
+
+/**
+ * struct tipc_user - registered TIPC user info
+ * @next: index of next free registry entry (or -1 for an allocated entry)
+ * @callback: ptr to routine to call when TIPC mode changes (NULL if none)
+ * @usr_handle: user-defined value passed to callback routine 
+ * @ports: list of user ports owned by the user
+ */
+
+struct tipc_user {
+	int next;
+	tipc_mode_event callback;
+	void *usr_handle;
+	struct list_head ports;
+};
+
+#define MAX_USERID 64
+#define USER_LIST_SIZE ((MAX_USERID + 1) * sizeof(struct tipc_user))
+
+static struct tipc_user *users = 0;
+static u32 next_free_user = MAX_USERID + 1;
+static spinlock_t reg_lock = SPIN_LOCK_UNLOCKED;
+
+/**
+ * reg_init - create TIPC user registry (but don't activate it)
+ * 
+ * If registry has been pre-initialized it is left "as is".
+ * NOTE: This routine may be called when TIPC is inactive.
+ */
+
+static int reg_init(void)
+{
+	u32 i;
+	
+	spin_lock_bh(&reg_lock);
+	if (!users) {
+		users = (struct tipc_user *)kmalloc(USER_LIST_SIZE, GFP_ATOMIC);
+		if (users) {
+			memset(users, 0, USER_LIST_SIZE);
+			for (i = 1; i <= MAX_USERID; i++) {
+				users[i].next = i - 1;
+			}
+			next_free_user = MAX_USERID;
+		}
+	}
+	spin_unlock_bh(&reg_lock);
+	return users ? TIPC_OK : -ENOMEM;
+}
+
+/**
+ * reg_callback - inform TIPC user about current operating mode
+ */
+
+static void reg_callback(struct tipc_user *user_ptr)
+{
+	tipc_mode_event cb;
+	void *arg;
+
+	spin_lock_bh(&reg_lock);
+	cb = user_ptr->callback;
+	arg = user_ptr->usr_handle;
+	spin_unlock_bh(&reg_lock);
+
+	if (cb)
+		cb(arg, tipc_mode, tipc_own_addr);
+}
+
+/**
+ * reg_start - activate TIPC user registry
+ */
+
+int reg_start(void)
+{
+	u32 u;
+	int res;
+
+	if ((res = reg_init()))
+		return res;
+
+	for (u = 1; u <= MAX_USERID; u++) {
+		if (users[u].callback)
+			k_signal((Handler)reg_callback,
+				 (unsigned long)&users[u]);
+	}
+	return TIPC_OK;
+}
+
+/**
+ * reg_stop - shut down & delete TIPC user registry
+ */
+
+void reg_stop(void)
+{               
+	int id;
+
+	if (!users)
+		return;
+
+	for (id = 1; id <= MAX_USERID; id++) {
+		if (users[id].callback)
+			reg_callback(&users[id]);
+	}
+	kfree(users);
+	users = 0;
+}
+
+/**
+ * tipc_attach - register a TIPC user
+ *
+ * NOTE: This routine may be called when TIPC is inactive.
+ */
+
+int tipc_attach(u32 *userid, tipc_mode_event cb, void *usr_handle)
+{
+	struct tipc_user *user_ptr;
+
+	if ((tipc_mode == TIPC_NOT_RUNNING) && !cb)
+		return -ENOPROTOOPT;
+	if (!users)
+		reg_init();
+
+	spin_lock_bh(&reg_lock);
+	if (!next_free_user) {
+		spin_unlock_bh(&reg_lock);
+		return -EBUSY;
+	}
+	user_ptr = &users[next_free_user];
+	*userid = next_free_user;
+	next_free_user = user_ptr->next;
+	user_ptr->next = -1; 
+	spin_unlock_bh(&reg_lock);
+
+	user_ptr->callback = cb;
+	user_ptr->usr_handle = usr_handle;
+	INIT_LIST_HEAD(&user_ptr->ports);
+	atomic_inc(&tipc_user_count);
+	
+	if (cb && (tipc_mode != TIPC_NOT_RUNNING))
+		k_signal((Handler)reg_callback, (unsigned long)user_ptr);
+	return TIPC_OK;
+}
+
+/**
+ * tipc_detach - deregister a TIPC user
+ */
+
+void tipc_detach(u32 userid)
+{
+	struct tipc_user *user_ptr;
+	struct list_head ports_temp;
+	struct user_port *up_ptr, *temp_up_ptr;
+
+	if ((userid == 0) || (userid > MAX_USERID))
+		return;
+
+	spin_lock_bh(&reg_lock);
+	if ((!users) || (users[userid].next >= 0)) {
+		spin_unlock_bh(&reg_lock);
+		return;
+	}
+
+	user_ptr = &users[userid];
+        user_ptr->callback = NULL;              
+	INIT_LIST_HEAD(&ports_temp);
+        list_splice(&user_ptr->ports, &ports_temp);
+	user_ptr->next = next_free_user;
+	next_free_user = userid;
+	spin_unlock_bh(&reg_lock);
+
+	atomic_dec(&tipc_user_count);
+
+        list_for_each_entry_safe(up_ptr, temp_up_ptr, &ports_temp, uport_list) {
+		tipc_deleteport(up_ptr->ref);
+	}
+}
+
+/**
+ * reg_add_port - register a user's driver port
+ */
+
+int reg_add_port(struct user_port *up_ptr)
+{
+	struct tipc_user *user_ptr;
+
+	if (up_ptr->user_ref == 0)
+		return TIPC_OK;
+	if (up_ptr->user_ref > MAX_USERID)
+		return -EINVAL;
+	if ((tipc_mode == TIPC_NOT_RUNNING) || !users )
+		return -ENOPROTOOPT;
+
+	spin_lock_bh(&reg_lock);
+	user_ptr = &users[up_ptr->user_ref];
+	list_add(&up_ptr->uport_list, &user_ptr->ports);
+	spin_unlock_bh(&reg_lock);
+	return TIPC_OK;
+}
+
+/**
+ * reg_remove_port - deregister a user's driver port
+ */
+
+int reg_remove_port(struct user_port *up_ptr)
+{
+	if (up_ptr->user_ref == 0)
+		return TIPC_OK;
+	if (up_ptr->user_ref > MAX_USERID)
+		return -EINVAL;
+	if (!users )
+		return -ENOPROTOOPT;
+
+	spin_lock_bh(&reg_lock);
+	list_del_init(&up_ptr->uport_list);
+	spin_unlock_bh(&reg_lock);
+	return TIPC_OK;
+}
+
diff --git a/net/tipc/user_reg.h b/net/tipc/user_reg.h
new file mode 100644
index 0000000..44f2194
--- /dev/null
+++ b/net/tipc/user_reg.h
@@ -0,0 +1,45 @@
+/*
+ * net/tipc/user_reg.h: Include file for TIPC user registry code
+ * 
+ * Copyright (c) 2003-2005, Ericsson Research Canada
+ * Copyright (c) 2005, Wind River Systems
+ * Copyright (c) 2005-2006, Ericsson AB
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without 
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this 
+ * list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright notice, 
+ * this list of conditions and the following disclaimer in the documentation 
+ * and/or other materials provided with the distribution.
+ * Neither the names of the copyright holders nor the names of its 
+ * contributors may be used to endorse or promote products derived from this 
+ * software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _TIPC_USER_REG_H
+#define _TIPC_USER_REG_H
+
+#include "port.h"
+
+int reg_start(void);
+void reg_stop(void);
+
+int reg_add_port(struct user_port *up_ptr);
+int reg_remove_port(struct user_port *up_ptr);
+
+#endif
diff --git a/net/tipc/zone.c b/net/tipc/zone.c
new file mode 100644
index 0000000..336eebb7
--- /dev/null
+++ b/net/tipc/zone.c
@@ -0,0 +1,166 @@
+/*
+ * net/tipc/zone.c: TIPC zone management routines
+ * 
+ * Copyright (c) 2003-2005, Ericsson Research Canada
+ * Copyright (c) 2005, Wind River Systems
+ * Copyright (c) 2005-2006, Ericsson AB
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * Neither the names of the copyright holders nor the names of its 
+ * contributors may be used to endorse or promote products derived from this
+ * software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "core.h"
+#include "zone.h"
+#include "net.h"
+#include "addr.h"
+#include "node_subscr.h"
+#include "cluster.h"
+#include "node.h"
+
+struct _zone *zone_create(u32 addr)
+{
+	struct _zone *z_ptr = 0;
+	u32 z_num;
+
+	if (!addr_domain_valid(addr))
+		return 0;
+
+	z_ptr = (struct _zone *)kmalloc(sizeof(*z_ptr), GFP_ATOMIC);
+	if (z_ptr != NULL) {
+		memset(z_ptr, 0, sizeof(*z_ptr));
+		z_num = tipc_zone(addr);
+		z_ptr->addr = tipc_addr(z_num, 0, 0);
+		net.zones[z_num] = z_ptr;
+	}
+	return z_ptr;
+}
+
+void zone_delete(struct _zone *z_ptr)
+{
+	u32 c_num;
+
+	if (!z_ptr)
+		return;
+	for (c_num = 1; c_num <= tipc_max_clusters; c_num++) {
+		cluster_delete(z_ptr->clusters[c_num]);
+	}
+	kfree(z_ptr);
+}
+
+void zone_attach_cluster(struct _zone *z_ptr, struct cluster *c_ptr)
+{
+	u32 c_num = tipc_cluster(c_ptr->addr);
+
+	assert(c_ptr->addr);
+	assert(c_num <= tipc_max_clusters);
+	assert(z_ptr->clusters[c_num] == 0);
+	z_ptr->clusters[c_num] = c_ptr;
+}
+
+void zone_remove_as_router(struct _zone *z_ptr, u32 router)
+{
+	u32 c_num;
+
+	for (c_num = 1; c_num <= tipc_max_clusters; c_num++) {
+		if (z_ptr->clusters[c_num]) {
+			cluster_remove_as_router(z_ptr->clusters[c_num], 
+						 router);
+		}
+	}
+}
+
+void zone_send_external_routes(struct _zone *z_ptr, u32 dest)
+{
+	u32 c_num;
+
+	for (c_num = 1; c_num <= tipc_max_clusters; c_num++) {
+		if (z_ptr->clusters[c_num]) {
+			if (in_own_cluster(z_ptr->addr))
+				continue;
+			cluster_send_ext_routes(z_ptr->clusters[c_num], dest);
+		}
+	}
+}
+
+struct node *zone_select_remote_node(struct _zone *z_ptr, u32 addr, u32 ref)
+{
+	struct cluster *c_ptr;
+	struct node *n_ptr;
+	u32 c_num;
+
+	if (!z_ptr)
+		return 0;
+	c_ptr = z_ptr->clusters[tipc_cluster(addr)];
+	if (!c_ptr)
+		return 0;
+	n_ptr = cluster_select_node(c_ptr, ref);
+	if (n_ptr)
+		return n_ptr;
+
+	/* Links to any other clusters within this zone ? */
+	for (c_num = 1; c_num <= tipc_max_clusters; c_num++) {
+		c_ptr = z_ptr->clusters[c_num];
+		if (!c_ptr)
+			return 0;
+		n_ptr = cluster_select_node(c_ptr, ref);
+		if (n_ptr)
+			return n_ptr;
+	}
+	return 0;
+}
+
+u32 zone_select_router(struct _zone *z_ptr, u32 addr, u32 ref)
+{
+	struct cluster *c_ptr;
+	u32 c_num;
+	u32 router;
+
+	if (!z_ptr)
+		return 0;
+	c_ptr = z_ptr->clusters[tipc_cluster(addr)];
+	router = c_ptr ? cluster_select_router(c_ptr, ref) : 0;
+	if (router)
+		return router;
+
+	/* Links to any other clusters within the zone? */
+	for (c_num = 1; c_num <= tipc_max_clusters; c_num++) {
+		c_ptr = z_ptr->clusters[c_num];
+		router = c_ptr ? cluster_select_router(c_ptr, ref) : 0;
+		if (router)
+			return router;
+	}
+	return 0;
+}
+
+
+u32 zone_next_node(u32 addr)
+{
+	struct cluster *c_ptr = cluster_find(addr);
+
+	if (c_ptr)
+		return cluster_next_node(c_ptr, addr);
+	return 0;
+}
+
diff --git a/net/tipc/zone.h b/net/tipc/zone.h
new file mode 100644
index 0000000..19fd51a
--- /dev/null
+++ b/net/tipc/zone.h
@@ -0,0 +1,68 @@
+/*
+ * net/tipc/zone.h: Include file for TIPC zone management routines
+ * 
+ * Copyright (c) 2003-2005, Ericsson Research Canada
+ * Copyright (c) 2005, Wind River Systems
+ * Copyright (c) 2005-2006, Ericsson AB
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * Neither the names of the copyright holders nor the names of its 
+ * contributors may be used to endorse or promote products derived from this
+ * software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _TIPC_ZONE_H
+#define _TIPC_ZONE_H
+
+#include "node_subscr.h"
+#include "net.h"
+
+
+/**
+ * struct _zone - TIPC zone structure
+ * @addr: network address of zone
+ * @clusters: array of pointers to all clusters within zone
+ * @links: (used for inter-zone communication)
+ */
+ 
+struct _zone {
+	u32 addr;
+	struct cluster *clusters[2]; /* currently limited to just 1 cluster */
+	u32 links;
+};
+
+struct node *zone_select_remote_node(struct _zone *z_ptr, u32 addr, u32 ref);
+u32 zone_select_router(struct _zone *z_ptr, u32 addr, u32 ref);
+void zone_remove_as_router(struct _zone *z_ptr, u32 router);
+void zone_send_external_routes(struct _zone *z_ptr, u32 dest);
+struct _zone *zone_create(u32 addr);
+void zone_delete(struct _zone *z_ptr);
+void zone_attach_cluster(struct _zone *z_ptr, struct cluster *c_ptr);
+u32 zone_next_node(u32 addr);
+
+static inline struct _zone *zone_find(u32 addr)
+{
+	return net.zones[tipc_zone(addr)];
+}
+
+#endif