Просмотр исходного кода

Add totempg_threaded_mode_enable() api

This API allows totem to operate as a multithreaded library.  Performance is
better without threads but some library users may only have multithreaded
systems.  In the corosync case where we have removed threads, this reduces
cpu utilization by ~10% by removing about 50% of the mutex lock and unlock calls
that occur during typical operation.  Since the latest corosync is nearly
thread free, there is no need for mutex operations.

Signed-off-by: Steven Dake <sdake@redhat.com>
Reviewed-by: Angus Salkeld <asalkeld@redhat.com>
Steven Dake 14 лет назад
Родитель
Сommit
71f044bfe7
6 измененных файлов с 223 добавлено и 66 удалено
  1. 92 30
      exec/cs_queue.h
  2. 9 0
      exec/totemmrp.c
  3. 2 0
      exec/totemmrp.h
  4. 106 34
      exec/totempg.c
  5. 11 2
      exec/totemsrp.c
  6. 3 0
      exec/totemsrp.h

+ 92 - 30
exec/cs_queue.h

@@ -50,58 +50,76 @@ struct cs_queue {
 	int size_per_item;
 	int iterator;
 	pthread_mutex_t mutex;
+	int threaded_mode_enabled;
 };
 
-static inline int cs_queue_init (struct cs_queue *cs_queue, int cs_queue_items, int size_per_item) {
+static inline int cs_queue_init (struct cs_queue *cs_queue, int cs_queue_items, int size_per_item, int threaded_mode_enabled) {
 	cs_queue->head = 0;
 	cs_queue->tail = cs_queue_items - 1;
 	cs_queue->used = 0;
 	cs_queue->usedhw = 0;
 	cs_queue->size = cs_queue_items;
 	cs_queue->size_per_item = size_per_item;
+	cs_queue->threaded_mode_enabled = threaded_mode_enabled;
 
 	cs_queue->items = malloc (cs_queue_items * size_per_item);
 	if (cs_queue->items == 0) {
 		return (-ENOMEM);
 	}
 	memset (cs_queue->items, 0, cs_queue_items * size_per_item);
-	pthread_mutex_init (&cs_queue->mutex, NULL);
+	if (cs_queue->threaded_mode_enabled) {
+		pthread_mutex_init (&cs_queue->mutex, NULL);
+	}
 	return (0);
 }
 
 static inline int cs_queue_reinit (struct cs_queue *cs_queue)
 {
-	pthread_mutex_lock (&cs_queue->mutex);
+	if (cs_queue->threaded_mode_enabled) {
+		pthread_mutex_lock (&cs_queue->mutex);
+	}
 	cs_queue->head = 0;
 	cs_queue->tail = cs_queue->size - 1;
 	cs_queue->used = 0;
 	cs_queue->usedhw = 0;
 
 	memset (cs_queue->items, 0, cs_queue->size * cs_queue->size_per_item);
-	pthread_mutex_unlock (&cs_queue->mutex);
+	if (cs_queue->threaded_mode_enabled) {
+		pthread_mutex_unlock (&cs_queue->mutex);
+	}
 	return (0);
 }
 
 static inline void cs_queue_free (struct cs_queue *cs_queue) {
-	pthread_mutex_destroy (&cs_queue->mutex);
+	if (cs_queue->threaded_mode_enabled) {
+		pthread_mutex_destroy (&cs_queue->mutex);
+	}
 	free (cs_queue->items);
 }
 
 static inline int cs_queue_is_full (struct cs_queue *cs_queue) {
 	int full;
 
-	pthread_mutex_lock (&cs_queue->mutex);
+	if (cs_queue->threaded_mode_enabled) {
+		pthread_mutex_lock (&cs_queue->mutex);
+	}
 	full = ((cs_queue->size - 1) == cs_queue->used);
-	pthread_mutex_unlock (&cs_queue->mutex);
+	if (cs_queue->threaded_mode_enabled) {
+		pthread_mutex_unlock (&cs_queue->mutex);
+	}
 	return (full);
 }
 
 static inline int cs_queue_is_empty (struct cs_queue *cs_queue) {
 	int empty;
 
-	pthread_mutex_lock (&cs_queue->mutex);
+	if (cs_queue->threaded_mode_enabled) {
+		pthread_mutex_lock (&cs_queue->mutex);
+	}
 	empty = (cs_queue->used == 0);
-	pthread_mutex_unlock (&cs_queue->mutex);
+	if (cs_queue->threaded_mode_enabled) {
+		pthread_mutex_unlock (&cs_queue->mutex);
+	}
 	return (empty);
 }
 
@@ -110,7 +128,9 @@ static inline void cs_queue_item_add (struct cs_queue *cs_queue, void *item)
 	char *cs_queue_item;
 	int cs_queue_position;
 
-	pthread_mutex_lock (&cs_queue->mutex);
+	if (cs_queue->threaded_mode_enabled) {
+		pthread_mutex_lock (&cs_queue->mutex);
+	}
 	cs_queue_position = cs_queue->head;
 	cs_queue_item = cs_queue->items;
 	cs_queue_item += cs_queue_position * cs_queue->size_per_item;
@@ -123,7 +143,9 @@ static inline void cs_queue_item_add (struct cs_queue *cs_queue, void *item)
 	if (cs_queue->used > cs_queue->usedhw) {
 		cs_queue->usedhw = cs_queue->used;
 	}
-	pthread_mutex_unlock (&cs_queue->mutex);
+	if (cs_queue->threaded_mode_enabled) {
+		pthread_mutex_unlock (&cs_queue->mutex);
+	}
 }
 
 static inline void *cs_queue_item_get (struct cs_queue *cs_queue)
@@ -131,42 +153,58 @@ static inline void *cs_queue_item_get (struct cs_queue *cs_queue)
 	char *cs_queue_item;
 	int cs_queue_position;
 
-	pthread_mutex_lock (&cs_queue->mutex);
+	if (cs_queue->threaded_mode_enabled) {
+		pthread_mutex_lock (&cs_queue->mutex);
+	}
 	cs_queue_position = (cs_queue->tail + 1) % cs_queue->size;
 	cs_queue_item = cs_queue->items;
 	cs_queue_item += cs_queue_position * cs_queue->size_per_item;
-	pthread_mutex_unlock (&cs_queue->mutex);
+	if (cs_queue->threaded_mode_enabled) {
+		pthread_mutex_unlock (&cs_queue->mutex);
+	}
 	return ((void *)cs_queue_item);
 }
 
 static inline void cs_queue_item_remove (struct cs_queue *cs_queue) {
-	pthread_mutex_lock (&cs_queue->mutex);
+	if (cs_queue->threaded_mode_enabled) {
+		pthread_mutex_lock (&cs_queue->mutex);
+	}
 	cs_queue->tail = (cs_queue->tail + 1) % cs_queue->size;
 
 	assert (cs_queue->tail != cs_queue->head);
 
 	cs_queue->used--;
 	assert (cs_queue->used >= 0);
-	pthread_mutex_unlock (&cs_queue->mutex);
+	if (cs_queue->threaded_mode_enabled) {
+		pthread_mutex_unlock (&cs_queue->mutex);
+	}
 }
 
 static inline void cs_queue_items_remove (struct cs_queue *cs_queue, int rel_count)
 {
-	pthread_mutex_lock (&cs_queue->mutex);
+	if (cs_queue->threaded_mode_enabled) {
+		pthread_mutex_lock (&cs_queue->mutex);
+	}
 	cs_queue->tail = (cs_queue->tail + rel_count) % cs_queue->size;
 
 	assert (cs_queue->tail != cs_queue->head);
 
 	cs_queue->used -= rel_count;
-	pthread_mutex_unlock (&cs_queue->mutex);
+	if (cs_queue->threaded_mode_enabled) {
+		pthread_mutex_unlock (&cs_queue->mutex);
+	}
 }
 
 
 static inline void cs_queue_item_iterator_init (struct cs_queue *cs_queue)
 {
-	pthread_mutex_lock (&cs_queue->mutex);
+	if (cs_queue->threaded_mode_enabled) {
+		pthread_mutex_lock (&cs_queue->mutex);
+	}
 	cs_queue->iterator = (cs_queue->tail + 1) % cs_queue->size;
-	pthread_mutex_unlock (&cs_queue->mutex);
+	if (cs_queue->threaded_mode_enabled) {
+		pthread_mutex_unlock (&cs_queue->mutex);
+	}
 }
 
 static inline void *cs_queue_item_iterator_get (struct cs_queue *cs_queue)
@@ -174,15 +212,21 @@ static inline void *cs_queue_item_iterator_get (struct cs_queue *cs_queue)
 	char *cs_queue_item;
 	int cs_queue_position;
 
-	pthread_mutex_lock (&cs_queue->mutex);
+	if (cs_queue->threaded_mode_enabled) {
+		pthread_mutex_lock (&cs_queue->mutex);
+	}
 	cs_queue_position = (cs_queue->iterator) % cs_queue->size;
 	if (cs_queue->iterator == cs_queue->head) {
-		pthread_mutex_unlock (&cs_queue->mutex);
+		if (cs_queue->threaded_mode_enabled) {
+			pthread_mutex_unlock (&cs_queue->mutex);
+		}
 		return (0);
 	}
 	cs_queue_item = cs_queue->items;
 	cs_queue_item += cs_queue_position * cs_queue->size_per_item;
-	pthread_mutex_unlock (&cs_queue->mutex);
+	if (cs_queue->threaded_mode_enabled) {
+		pthread_mutex_unlock (&cs_queue->mutex);
+	}
 	return ((void *)cs_queue_item);
 }
 
@@ -190,28 +234,40 @@ static inline int cs_queue_item_iterator_next (struct cs_queue *cs_queue)
 {
 	int next_res;
 
-	pthread_mutex_lock (&cs_queue->mutex);
+	if (cs_queue->threaded_mode_enabled) {
+		pthread_mutex_lock (&cs_queue->mutex);
+	}
 	cs_queue->iterator = (cs_queue->iterator + 1) % cs_queue->size;
 
 	next_res = cs_queue->iterator == cs_queue->head;
-	pthread_mutex_unlock (&cs_queue->mutex);
+	if (cs_queue->threaded_mode_enabled) {
+		pthread_mutex_unlock (&cs_queue->mutex);
+	}
 	return (next_res);
 }
 
 static inline void cs_queue_avail (struct cs_queue *cs_queue, int *avail)
 {
-	pthread_mutex_lock (&cs_queue->mutex);
+	if (cs_queue->threaded_mode_enabled) {
+		pthread_mutex_lock (&cs_queue->mutex);
+	}
 	*avail = cs_queue->size - cs_queue->used - 2;
 	assert (*avail >= 0);
-	pthread_mutex_unlock (&cs_queue->mutex);
+	if (cs_queue->threaded_mode_enabled) {
+		pthread_mutex_unlock (&cs_queue->mutex);
+	}
 }
 
 static inline int cs_queue_used (struct cs_queue *cs_queue) {
 	int used;
 
-	pthread_mutex_lock (&cs_queue->mutex);
+	if (cs_queue->threaded_mode_enabled) {
+		pthread_mutex_lock (&cs_queue->mutex);
+	}
 	used = cs_queue->used;
-	pthread_mutex_unlock (&cs_queue->mutex);
+	if (cs_queue->threaded_mode_enabled) {
+		pthread_mutex_unlock (&cs_queue->mutex);
+	}
 
 	return (used);
 }
@@ -219,9 +275,15 @@ static inline int cs_queue_used (struct cs_queue *cs_queue) {
 static inline int cs_queue_usedhw (struct cs_queue *cs_queue) {
 	int usedhw;
 
-	pthread_mutex_lock (&cs_queue->mutex);
+	if (cs_queue->threaded_mode_enabled) {
+		pthread_mutex_lock (&cs_queue->mutex);
+	}
+
 	usedhw = cs_queue->usedhw;
-	pthread_mutex_unlock (&cs_queue->mutex);
+
+	if (cs_queue->threaded_mode_enabled) {
+		pthread_mutex_unlock (&cs_queue->mutex);
+	}
 
 	return (usedhw);
 }

+ 9 - 0
exec/totemmrp.c

@@ -267,3 +267,12 @@ int totemmrp_member_remove (
 
 	return (res);
 }
+
+void totemmrp_threaded_mode_enable (void)
+{
+	int res;
+
+	totemsrp_threaded_mode_enable (totemsrp_context);
+
+	return (res);
+}

+ 2 - 0
exec/totemmrp.h

@@ -128,4 +128,6 @@ extern int totemmrp_member_remove (
 	const struct totem_ip_address *member,
 	int ring_no);
 
+void totemmrp_threaded_mode_enable (void);
+
 #endif /* TOTEMMRP_H_DEFINED */

+ 106 - 34
exec/totempg.c

@@ -166,6 +166,8 @@ static unsigned int totempg_size_limit;
 
 static totem_queue_level_changed_fn totem_queue_level_changed = NULL;
 
+static uint32_t totempg_threaded_mode = 0;
+
 /*
  * Function and data used to log messages
  */
@@ -689,13 +691,19 @@ int callback_token_received_fn (enum totem_callback_token_type type,
 	struct totempg_mcast mcast;
 	struct iovec iovecs[3];
 
-	pthread_mutex_lock (&mcast_msg_mutex);
+	if (totempg_threaded_mode == 1) {
+		pthread_mutex_lock (&mcast_msg_mutex);
+	}
 	if (mcast_packed_msg_count == 0) {
-		pthread_mutex_unlock (&mcast_msg_mutex);
+		if (totempg_threaded_mode == 1) {
+			pthread_mutex_unlock (&mcast_msg_mutex);
+		}
 		return (0);
 	}
 	if (totemmrp_avail() == 0) {
-		pthread_mutex_unlock (&mcast_msg_mutex);
+		if (totempg_threaded_mode == 1) {
+			pthread_mutex_unlock (&mcast_msg_mutex);
+		}
 		return (0);
 	}
 	mcast.header.version = 0;
@@ -722,7 +730,9 @@ int callback_token_received_fn (enum totem_callback_token_type type,
 	mcast_packed_msg_count = 0;
 	fragment_size = 0;
 
-	pthread_mutex_unlock (&mcast_msg_mutex);
+	if (totempg_threaded_mode == 1) {
+		pthread_mutex_unlock (&mcast_msg_mutex);
+	}
 	return (0);
 }
 
@@ -774,9 +784,13 @@ int totempg_initialize (
 
 void totempg_finalize (void)
 {
-	pthread_mutex_lock (&totempg_mutex);
+	if (totempg_threaded_mode == 1) {
+		pthread_mutex_lock (&totempg_mutex);
+	}
 	totemmrp_finalize ();
-	pthread_mutex_unlock (&totempg_mutex);
+	if (totempg_threaded_mode == 1) {
+		pthread_mutex_unlock (&totempg_mutex);
+	}
 }
 
 /*
@@ -798,7 +812,9 @@ static int mcast_msg (
 	int copy_base = 0;
 	int total_size = 0;
 
-	pthread_mutex_lock (&mcast_msg_mutex);
+	if (totempg_threaded_mode == 1) {
+		pthread_mutex_lock (&mcast_msg_mutex);
+	}
 	totemmrp_event_signal (TOTEM_EVENT_NEW_MSG, 1);
 
 	/*
@@ -828,7 +844,9 @@ static int mcast_msg (
 	if (byte_count_send_ok (total_size + sizeof(unsigned short) *
 		(mcast_packed_msg_count)) == 0) {
 
-		pthread_mutex_unlock (&mcast_msg_mutex);
+		if (totempg_threaded_mode == 1) {
+			pthread_mutex_unlock (&mcast_msg_mutex);
+		}
 		return(-1);
 	}
 
@@ -946,7 +964,9 @@ static int mcast_msg (
 	}
 
 error_exit:
-	pthread_mutex_unlock (&mcast_msg_mutex);
+	if (totempg_threaded_mode == 1) {
+		pthread_mutex_unlock (&mcast_msg_mutex);
+	}
 	return (res);
 }
 
@@ -1004,19 +1024,27 @@ int totempg_callback_token_create (
 	const void *data)
 {
 	unsigned int res;
-	pthread_mutex_lock (&callback_token_mutex);
+	if (totempg_threaded_mode == 1) {
+		pthread_mutex_lock (&callback_token_mutex);
+	}
 	res = totemmrp_callback_token_create (handle_out, type, delete,
 		callback_fn, data);
-	pthread_mutex_unlock (&callback_token_mutex);
+	if (totempg_threaded_mode == 1) {
+		pthread_mutex_unlock (&callback_token_mutex);
+	}
 	return (res);
 }
 
 void totempg_callback_token_destroy (
 	void *handle_out)
 {
-	pthread_mutex_lock (&callback_token_mutex);
+	if (totempg_threaded_mode == 1) {
+		pthread_mutex_lock (&callback_token_mutex);
+	}
 	totemmrp_callback_token_destroy (handle_out);
-	pthread_mutex_unlock (&callback_token_mutex);
+	if (totempg_threaded_mode == 1) {
+		pthread_mutex_unlock (&callback_token_mutex);
+	}
 }
 
 /*
@@ -1042,7 +1070,9 @@ int totempg_groups_initialize (
 	struct totempg_group_instance *instance;
 	unsigned int res;
 
-	pthread_mutex_lock (&totempg_mutex);
+	if (totempg_threaded_mode == 1) {
+		pthread_mutex_lock (&totempg_mutex);
+	}
 	res = hdb_handle_create (&totempg_groups_instance_database,
 		sizeof (struct totempg_group_instance), handle);
 	if (res != 0) {
@@ -1068,13 +1098,17 @@ int totempg_groups_initialize (
 
 	hdb_handle_put (&totempg_groups_instance_database, *handle);
 
-	pthread_mutex_unlock (&totempg_mutex);
+	if (totempg_threaded_mode == 1) {
+		pthread_mutex_unlock (&totempg_mutex);
+	}
 	return (0);
 error_destroy:
 	hdb_handle_destroy (&totempg_groups_instance_database, *handle);
 
 error_exit:
-	pthread_mutex_unlock (&totempg_mutex);
+	if (totempg_threaded_mode == 1) {
+		pthread_mutex_unlock (&totempg_mutex);
+	}
 	return (-1);
 }
 
@@ -1087,7 +1121,10 @@ int totempg_groups_join (
 	struct totempg_group *new_groups;
 	unsigned int res;
 
-	pthread_mutex_lock (&totempg_mutex);
+	if (totempg_threaded_mode == 1) {
+		pthread_mutex_lock (&totempg_mutex);
+	}
+	
 	res = hdb_handle_get (&totempg_groups_instance_database, handle,
 		(void *)&instance);
 	if (res != 0) {
@@ -1109,7 +1146,9 @@ int totempg_groups_join (
 	hdb_handle_put (&totempg_groups_instance_database, handle);
 
 error_exit:
-	pthread_mutex_unlock (&totempg_mutex);
+	if (totempg_threaded_mode == 1) {
+		pthread_mutex_unlock (&totempg_mutex);
+	}
 	return (res);
 }
 
@@ -1121,7 +1160,9 @@ int totempg_groups_leave (
 	struct totempg_group_instance *instance;
 	unsigned int res;
 
-	pthread_mutex_lock (&totempg_mutex);
+	if (totempg_threaded_mode == 1) {
+		pthread_mutex_lock (&totempg_mutex);
+	}
 	res = hdb_handle_get (&totempg_groups_instance_database, handle,
 		(void *)&instance);
 	if (res != 0) {
@@ -1131,7 +1172,9 @@ int totempg_groups_leave (
 	hdb_handle_put (&totempg_groups_instance_database, handle);
 
 error_exit:
-	pthread_mutex_unlock (&totempg_mutex);
+	if (totempg_threaded_mode == 1) {
+		pthread_mutex_unlock (&totempg_mutex);
+	}
 	return (res);
 }
 
@@ -1150,7 +1193,10 @@ int totempg_groups_mcast_joined (
 	int i;
 	unsigned int res;
 
-	pthread_mutex_lock (&totempg_mutex);
+	if (totempg_threaded_mode == 1) {
+		pthread_mutex_lock (&totempg_mutex);
+	}
+	
 	res = hdb_handle_get (&totempg_groups_instance_database, handle,
 		(void *)&instance);
 	if (res != 0) {
@@ -1177,7 +1223,10 @@ int totempg_groups_mcast_joined (
 	hdb_handle_put (&totempg_groups_instance_database, handle);
 
 error_exit:
-	pthread_mutex_unlock (&totempg_mutex);
+	if (totempg_threaded_mode == 1) {
+		pthread_mutex_unlock (&totempg_mutex);
+	}
+	
 	return (res);
 }
 
@@ -1228,8 +1277,10 @@ int totempg_groups_joined_reserve (
 	unsigned int res;
 	unsigned int reserved = 0;
 
-	pthread_mutex_lock (&totempg_mutex);
-	pthread_mutex_lock (&mcast_msg_mutex);
+	if (totempg_threaded_mode == 1) {
+		pthread_mutex_lock (&totempg_mutex);
+		pthread_mutex_lock (&mcast_msg_mutex);
+	}
 	res = hdb_handle_get (&totempg_groups_instance_database, handle,
 		(void *)&instance);
 	if (res != 0) {
@@ -1259,19 +1310,25 @@ error_put:
 	hdb_handle_put (&totempg_groups_instance_database, handle);
 
 error_exit:
-	pthread_mutex_unlock (&mcast_msg_mutex);
-	pthread_mutex_unlock (&totempg_mutex);
+	if (totempg_threaded_mode == 1) {
+		pthread_mutex_unlock (&mcast_msg_mutex);
+		pthread_mutex_unlock (&totempg_mutex);
+	}
 	return (reserved);
 }
 
 
 int totempg_groups_joined_release (int msg_count)
 {
-	pthread_mutex_lock (&totempg_mutex);
-	pthread_mutex_lock (&mcast_msg_mutex);
+	if (totempg_threaded_mode == 1) {
+		pthread_mutex_lock (&totempg_mutex);
+		pthread_mutex_lock (&mcast_msg_mutex);
+	}
 	send_release (msg_count);
-	pthread_mutex_unlock (&mcast_msg_mutex);
-	pthread_mutex_unlock (&totempg_mutex);
+	if (totempg_threaded_mode == 1) {
+		pthread_mutex_unlock (&mcast_msg_mutex);
+		pthread_mutex_unlock (&totempg_mutex);
+	}
 	return 0;
 }
 
@@ -1289,7 +1346,9 @@ int totempg_groups_mcast_groups (
 	int i;
 	unsigned int res;
 
-	pthread_mutex_lock (&totempg_mutex);
+	if (totempg_threaded_mode == 1) {
+		pthread_mutex_lock (&totempg_mutex);
+	}
 	res = hdb_handle_get (&totempg_groups_instance_database, handle,
 		(void *)&instance);
 	if (res != 0) {
@@ -1317,7 +1376,9 @@ int totempg_groups_mcast_groups (
 	hdb_handle_put (&totempg_groups_instance_database, handle);
 
 error_exit:
-	pthread_mutex_unlock (&totempg_mutex);
+	if (totempg_threaded_mode == 1) {
+		pthread_mutex_unlock (&totempg_mutex);
+	}
 	return (res);
 }
 
@@ -1336,7 +1397,9 @@ int totempg_groups_send_ok_groups (
 	unsigned int i;
 	unsigned int res;
 
-	pthread_mutex_lock (&totempg_mutex);
+	if (totempg_threaded_mode == 1) {
+		pthread_mutex_lock (&totempg_mutex);
+	}
 	res = hdb_handle_get (&totempg_groups_instance_database, handle,
 		(void *)&instance);
 	if (res != 0) {
@@ -1354,7 +1417,9 @@ int totempg_groups_send_ok_groups (
 
 	hdb_handle_put (&totempg_groups_instance_database, handle);
 error_exit:
-	pthread_mutex_unlock (&totempg_mutex);
+	if (totempg_threaded_mode == 1) {
+		pthread_mutex_unlock (&totempg_mutex);
+	}
 	return (res);
 }
 
@@ -1457,3 +1522,10 @@ extern int totempg_member_add (
 extern int totempg_member_remove (
 	const struct totem_ip_address *member,
 	int ring_no);
+
+void totempg_threaded_mode_enable (void)
+{
+	totempg_threaded_mode = 1;
+	totemmrp_threaded_mode_enable ();
+}
+

+ 11 - 2
exec/totemsrp.c

@@ -503,6 +503,8 @@ struct totemsrp_instance {
 	totemsrp_stats_t stats;
 
 	uint32_t orf_token_discard;
+
+	uint32_t threaded_mode_enabled;
 	
 	void * token_recv_event_handle;
 	void * token_sent_event_handle;
@@ -881,7 +883,7 @@ int totemsrp_initialize (
 
 
 	cs_queue_init (&instance->retrans_message_queue, RETRANS_MESSAGE_QUEUE_SIZE_MAX,
-		sizeof (struct message_item));
+		sizeof (struct message_item), instance->threaded_mode_enabled);
 
 	sq_init (&instance->regular_sort_queue,
 		QUEUE_RTR_ITEMS_SIZE_MAX, sizeof (struct sort_queue_item), 0);
@@ -942,7 +944,7 @@ int totemsrp_initialize (
 	 */
 	cs_queue_init (&instance->new_message_queue,
 		MESSAGE_QUEUE_MAX,
-		sizeof (struct message_item));
+		sizeof (struct message_item), instance->threaded_mode_enabled);
 
 	totemsrp_callback_token_create (instance,
 		&instance->token_recv_event_handle,
@@ -4491,3 +4493,10 @@ int totemsrp_member_remove (
 
 	return (res);
 }
+
+void totemsrp_threaded_mode_enable (void *context)
+{
+	struct totemsrp_instance *instance = (struct totemsrp_instance *)context;
+
+	instance->threaded_mode_enabled = 1;
+}

+ 3 - 0
exec/totemsrp.h

@@ -133,4 +133,7 @@ extern int totemsrp_member_remove (
 	const struct totem_ip_address *member,
 	int ring_no);
 	
+void totemsrp_threaded_mode_enable (
+	void *srp_context);
+
 #endif /* TOTEMSRP_H_DEFINED */