totempg.c 35 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386
  1. /*
  2. * Copyright (c) 2003-2005 MontaVista Software, Inc.
  3. * Copyright (c) 2005 OSDL.
  4. * Copyright (c) 2006-2009 Red Hat, Inc.
  5. *
  6. * All rights reserved.
  7. *
  8. * Author: Steven Dake (sdake@redhat.com)
  9. * Author: Mark Haverkamp (markh@osdl.org)
  10. *
  11. * This software licensed under BSD license, the text of which follows:
  12. *
  13. * Redistribution and use in source and binary forms, with or without
  14. * modification, are permitted provided that the following conditions are met:
  15. *
  16. * - Redistributions of source code must retain the above copyright notice,
  17. * this list of conditions and the following disclaimer.
  18. * - Redistributions in binary form must reproduce the above copyright notice,
  19. * this list of conditions and the following disclaimer in the documentation
  20. * and/or other materials provided with the distribution.
  21. * - Neither the name of the MontaVista Software, Inc. nor the names of its
  22. * contributors may be used to endorse or promote products derived from this
  23. * software without specific prior written permission.
  24. *
  25. * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
  26. * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
  27. * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
  28. * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
  29. * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
  30. * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
  31. * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
  32. * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
  33. * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
  34. * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
  35. * THE POSSIBILITY OF SUCH DAMAGE.
  36. */
  37. /*
  38. * FRAGMENTATION AND PACKING ALGORITHM:
  39. *
  40. * Assemble the entire message into one buffer
  41. * if full fragment
  42. * store fragment into lengths list
  43. * for each full fragment
  44. * multicast fragment
  45. * set length and fragment fields of pg mesage
  46. * store remaining multicast into head of fragmentation data and set lens field
  47. *
  48. * If a message exceeds the maximum packet size allowed by the totem
  49. * single ring protocol, the protocol could lose forward progress.
  50. * Statically calculating the allowed data amount doesn't work because
  51. * the amount of data allowed depends on the number of fragments in
  52. * each message. In this implementation, the maximum fragment size
  53. * is dynamically calculated for each fragment added to the message.
  54. * It is possible for a message to be two bytes short of the maximum
  55. * packet size. This occurs when a message or collection of
  56. * messages + the mcast header + the lens are two bytes short of the
  57. * end of the packet. Since another len field consumes two bytes, the
  58. * len field would consume the rest of the packet without room for data.
  59. *
  60. * One optimization would be to forgo the final len field and determine
  61. * it from the size of the udp datagram. Then this condition would no
  62. * longer occur.
  63. */
  64. /*
  65. * ASSEMBLY AND UNPACKING ALGORITHM:
  66. *
  67. * copy incoming packet into assembly data buffer indexed by current
  68. * location of end of fragment
  69. *
  70. * if not fragmented
  71. * deliver all messages in assembly data buffer
  72. * else
  73. * if msg_count > 1 and fragmented
  74. * deliver all messages except last message in assembly data buffer
  75. * copy last fragmented section to start of assembly data buffer
  76. * else
  77. * if msg_count = 1 and fragmented
  78. * do nothing
  79. *
  80. */
  81. #include <config.h>
  82. #ifdef HAVE_ALLOCA_H
  83. #include <alloca.h>
  84. #endif
  85. #include <netinet/in.h>
  86. #include <sys/uio.h>
  87. #include <stdio.h>
  88. #include <stdlib.h>
  89. #include <string.h>
  90. #include <assert.h>
  91. #include <pthread.h>
  92. #include <errno.h>
  93. #include <limits.h>
  94. #include <corosync/swab.h>
  95. #include <corosync/hdb.h>
  96. #include <corosync/list.h>
  97. #include <corosync/totem/coropoll.h>
  98. #include <corosync/totem/totempg.h>
  99. #define LOGSYS_UTILS_ONLY 1
  100. #include <corosync/engine/logsys.h>
  101. #include "totemmrp.h"
  102. #include "totemsrp.h"
  103. #define min(a,b) ((a) < (b)) ? a : b
  104. struct totempg_mcast_header {
  105. short version;
  106. short type;
  107. };
  108. /*
  109. * totempg_mcast structure
  110. *
  111. * header: Identify the mcast.
  112. * fragmented: Set if this message continues into next message
  113. * continuation: Set if this message is a continuation from last message
  114. * msg_count Indicates how many packed messages are contained
  115. * in the mcast.
  116. * Also, the size of each packed message and the messages themselves are
  117. * appended to the end of this structure when sent.
  118. */
  119. struct totempg_mcast {
  120. struct totempg_mcast_header header;
  121. unsigned char fragmented;
  122. unsigned char continuation;
  123. unsigned short msg_count;
  124. /*
  125. * short msg_len[msg_count];
  126. */
  127. /*
  128. * data for messages
  129. */
  130. };
  131. /*
  132. * Maximum packet size for totem pg messages
  133. */
  134. #define TOTEMPG_PACKET_SIZE (totempg_totem_config->net_mtu - \
  135. sizeof (struct totempg_mcast))
  136. /*
  137. * Local variables used for packing small messages
  138. */
  139. static unsigned short mcast_packed_msg_lens[FRAME_SIZE_MAX];
  140. static int mcast_packed_msg_count = 0;
  141. static int totempg_reserved = 1;
  142. static unsigned int totempg_size_limit;
  143. /*
  144. * Function and data used to log messages
  145. */
  146. static int totempg_log_level_security;
  147. static int totempg_log_level_error;
  148. static int totempg_log_level_warning;
  149. static int totempg_log_level_notice;
  150. static int totempg_log_level_debug;
  151. static int totempg_subsys_id;
  152. static void (*totempg_log_printf) (
  153. unsigned int rec_ident,
  154. const char *function,
  155. const char *file,
  156. int line,
  157. const char *format, ...) __attribute__((format(printf, 5, 6)));
  158. struct totem_config *totempg_totem_config;
  159. static totempg_stats_t totempg_stats;
  160. enum throw_away_mode {
  161. THROW_AWAY_INACTIVE,
  162. THROW_AWAY_ACTIVE
  163. };
  164. struct assembly {
  165. unsigned int nodeid;
  166. unsigned char data[MESSAGE_SIZE_MAX];
  167. int index;
  168. unsigned char last_frag_num;
  169. enum throw_away_mode throw_away_mode;
  170. struct list_head list;
  171. };
  172. static void assembly_deref (struct assembly *assembly);
  173. static int callback_token_received_fn (enum totem_callback_token_type type,
  174. const void *data);
  175. DECLARE_LIST_INIT(assembly_list_inuse);
  176. DECLARE_LIST_INIT(assembly_list_free);
  177. /*
  178. * Staging buffer for packed messages. Messages are staged in this buffer
  179. * before sending. Multiple messages may fit which cuts down on the
  180. * number of mcasts sent. If a message doesn't completely fit, then
  181. * the mcast header has a fragment bit set that says that there are more
  182. * data to follow. fragment_size is an index into the buffer. It indicates
  183. * the size of message data and where to place new message data.
  184. * fragment_contuation indicates whether the first packed message in
  185. * the buffer is a continuation of a previously packed fragment.
  186. */
  187. static unsigned char *fragmentation_data;
  188. static int fragment_size = 0;
  189. static int fragment_continuation = 0;
  190. static struct iovec iov_delv;
  191. static unsigned int totempg_max_handle = 0;
  192. struct totempg_group_instance {
  193. void (*deliver_fn) (
  194. unsigned int nodeid,
  195. const void *msg,
  196. unsigned int msg_len,
  197. int endian_conversion_required);
  198. void (*confchg_fn) (
  199. enum totem_configuration_type configuration_type,
  200. const unsigned int *member_list, size_t member_list_entries,
  201. const unsigned int *left_list, size_t left_list_entries,
  202. const unsigned int *joined_list, size_t joined_list_entries,
  203. const struct memb_ring_id *ring_id);
  204. struct totempg_group *groups;
  205. int groups_cnt;
  206. };
  207. DECLARE_HDB_DATABASE (totempg_groups_instance_database,NULL);
  208. static unsigned char next_fragment = 1;
  209. static pthread_mutex_t totempg_mutex = PTHREAD_MUTEX_INITIALIZER;
  210. static pthread_mutex_t callback_token_mutex = PTHREAD_MUTEX_INITIALIZER;
  211. static pthread_mutex_t mcast_msg_mutex = PTHREAD_MUTEX_INITIALIZER;
  212. #define log_printf(level, format, args...) \
  213. do { \
  214. totempg_log_printf ( \
  215. LOGSYS_ENCODE_RECID(level, \
  216. totempg_subsys_id, \
  217. LOGSYS_RECID_LOG), \
  218. __FUNCTION__, __FILE__, __LINE__, \
  219. format, ##args); \
  220. } while (0);
  221. static int msg_count_send_ok (int msg_count);
  222. static int byte_count_send_ok (int byte_count);
  223. static struct assembly *assembly_ref (unsigned int nodeid)
  224. {
  225. struct assembly *assembly;
  226. struct list_head *list;
  227. /*
  228. * Search inuse list for node id and return assembly buffer if found
  229. */
  230. for (list = assembly_list_inuse.next;
  231. list != &assembly_list_inuse;
  232. list = list->next) {
  233. assembly = list_entry (list, struct assembly, list);
  234. if (nodeid == assembly->nodeid) {
  235. return (assembly);
  236. }
  237. }
  238. /*
  239. * Nothing found in inuse list get one from free list if available
  240. */
  241. if (list_empty (&assembly_list_free) == 0) {
  242. assembly = list_entry (assembly_list_free.next, struct assembly, list);
  243. list_del (&assembly->list);
  244. list_add (&assembly->list, &assembly_list_inuse);
  245. assembly->nodeid = nodeid;
  246. assembly->index = 0;
  247. assembly->last_frag_num = 0;
  248. assembly->throw_away_mode = THROW_AWAY_INACTIVE;
  249. return (assembly);
  250. }
  251. /*
  252. * Nothing available in inuse or free list, so allocate a new one
  253. */
  254. assembly = malloc (sizeof (struct assembly));
  255. /*
  256. * TODO handle memory allocation failure here
  257. */
  258. assert (assembly);
  259. assembly->nodeid = nodeid;
  260. assembly->data[0] = 0;
  261. assembly->index = 0;
  262. assembly->last_frag_num = 0;
  263. assembly->throw_away_mode = THROW_AWAY_INACTIVE;
  264. list_init (&assembly->list);
  265. list_add (&assembly->list, &assembly_list_inuse);
  266. return (assembly);
  267. }
  268. static void assembly_deref (struct assembly *assembly)
  269. {
  270. list_del (&assembly->list);
  271. list_add (&assembly->list, &assembly_list_free);
  272. }
  273. static inline void app_confchg_fn (
  274. enum totem_configuration_type configuration_type,
  275. const unsigned int *member_list, size_t member_list_entries,
  276. const unsigned int *left_list, size_t left_list_entries,
  277. const unsigned int *joined_list, size_t joined_list_entries,
  278. const struct memb_ring_id *ring_id)
  279. {
  280. int i;
  281. struct totempg_group_instance *instance;
  282. struct assembly *assembly;
  283. unsigned int res;
  284. /*
  285. * For every leaving processor, add to free list
  286. * This also has the side effect of clearing out the dataset
  287. * In the leaving processor's assembly buffer.
  288. */
  289. for (i = 0; i < left_list_entries; i++) {
  290. assembly = assembly_ref (left_list[i]);
  291. list_del (&assembly->list);
  292. list_add (&assembly->list, &assembly_list_free);
  293. }
  294. for (i = 0; i <= totempg_max_handle; i++) {
  295. res = hdb_handle_get (&totempg_groups_instance_database,
  296. hdb_nocheck_convert (i), (void *)&instance);
  297. if (res == 0) {
  298. if (instance->confchg_fn) {
  299. instance->confchg_fn (
  300. configuration_type,
  301. member_list,
  302. member_list_entries,
  303. left_list,
  304. left_list_entries,
  305. joined_list,
  306. joined_list_entries,
  307. ring_id);
  308. }
  309. hdb_handle_put (&totempg_groups_instance_database,
  310. hdb_nocheck_convert (i));
  311. }
  312. }
  313. }
  314. static inline void group_endian_convert (
  315. void *msg,
  316. int msg_len)
  317. {
  318. unsigned short *group_len;
  319. int i;
  320. char *aligned_msg;
  321. /*
  322. * Align data structure for sparc and ia64
  323. */
  324. if ((size_t)msg % 4 != 0) {
  325. aligned_msg = alloca(msg_len);
  326. memcpy(aligned_msg, msg, msg_len);
  327. } else {
  328. aligned_msg = msg;
  329. }
  330. group_len = (unsigned short *)aligned_msg;
  331. group_len[0] = swab16(group_len[0]);
  332. for (i = 1; i < group_len[0] + 1; i++) {
  333. group_len[i] = swab16(group_len[i]);
  334. }
  335. if (aligned_msg != msg) {
  336. memcpy(msg, aligned_msg, msg_len);
  337. }
  338. }
  339. static inline int group_matches (
  340. struct iovec *iovec,
  341. unsigned int iov_len,
  342. struct totempg_group *groups_b,
  343. unsigned int group_b_cnt,
  344. unsigned int *adjust_iovec)
  345. {
  346. unsigned short *group_len;
  347. char *group_name;
  348. int i;
  349. int j;
  350. struct iovec iovec_aligned = { NULL, 0 };
  351. assert (iov_len == 1);
  352. /*
  353. * Align data structure for sparc and ia64
  354. */
  355. if ((size_t)iovec->iov_base % 4 != 0) {
  356. iovec_aligned.iov_base = alloca(iovec->iov_len);
  357. memcpy(iovec_aligned.iov_base, iovec->iov_base, iovec->iov_len);
  358. iovec_aligned.iov_len = iovec->iov_len;
  359. iovec = &iovec_aligned;
  360. }
  361. group_len = (unsigned short *)iovec->iov_base;
  362. group_name = ((char *)iovec->iov_base) +
  363. sizeof (unsigned short) * (group_len[0] + 1);
  364. /*
  365. * Calculate amount to adjust the iovec by before delivering to app
  366. */
  367. *adjust_iovec = sizeof (unsigned short) * (group_len[0] + 1);
  368. for (i = 1; i < group_len[0] + 1; i++) {
  369. *adjust_iovec += group_len[i];
  370. }
  371. /*
  372. * Determine if this message should be delivered to this instance
  373. */
  374. for (i = 1; i < group_len[0] + 1; i++) {
  375. for (j = 0; j < group_b_cnt; j++) {
  376. if ((group_len[i] == groups_b[j].group_len) &&
  377. (memcmp (groups_b[j].group, group_name, group_len[i]) == 0)) {
  378. return (1);
  379. }
  380. }
  381. group_name += group_len[i];
  382. }
  383. return (0);
  384. }
  385. static inline void app_deliver_fn (
  386. unsigned int nodeid,
  387. void *msg,
  388. unsigned int msg_len,
  389. int endian_conversion_required)
  390. {
  391. int i;
  392. struct totempg_group_instance *instance;
  393. struct iovec stripped_iovec;
  394. unsigned int adjust_iovec;
  395. unsigned int res;
  396. struct iovec *iovec;
  397. struct iovec aligned_iovec = { NULL, 0 };
  398. if (endian_conversion_required) {
  399. group_endian_convert (msg, msg_len);
  400. }
  401. /*
  402. * TODO This function needs to be rewritten for proper alignment to avoid 3+ memory copies
  403. */
  404. /*
  405. * Align data structure for sparc and ia64
  406. */
  407. aligned_iovec.iov_base = alloca(msg_len);
  408. aligned_iovec.iov_len = msg_len;
  409. memcpy(aligned_iovec.iov_base, msg, msg_len);
  410. iovec = &aligned_iovec;
  411. for (i = 0; i <= totempg_max_handle; i++) {
  412. res = hdb_handle_get (&totempg_groups_instance_database,
  413. hdb_nocheck_convert (i), (void *)&instance);
  414. if (res == 0) {
  415. if (group_matches (iovec, 1, instance->groups, instance->groups_cnt, &adjust_iovec)) {
  416. stripped_iovec.iov_len = iovec->iov_len - adjust_iovec;
  417. stripped_iovec.iov_base = (char *)iovec->iov_base + adjust_iovec;
  418. /*
  419. * Align data structure for sparc and ia64
  420. */
  421. if ((char *)iovec->iov_base + adjust_iovec % 4 != 0) {
  422. /*
  423. * Deal with misalignment
  424. */
  425. stripped_iovec.iov_base =
  426. alloca (stripped_iovec.iov_len);
  427. memcpy (stripped_iovec.iov_base,
  428. (char *)iovec->iov_base + adjust_iovec,
  429. stripped_iovec.iov_len);
  430. }
  431. instance->deliver_fn (
  432. nodeid,
  433. stripped_iovec.iov_base,
  434. stripped_iovec.iov_len,
  435. endian_conversion_required);
  436. }
  437. hdb_handle_put (&totempg_groups_instance_database, hdb_nocheck_convert(i));
  438. }
  439. }
  440. }
  441. static void totempg_confchg_fn (
  442. enum totem_configuration_type configuration_type,
  443. const unsigned int *member_list, size_t member_list_entries,
  444. const unsigned int *left_list, size_t left_list_entries,
  445. const unsigned int *joined_list, size_t joined_list_entries,
  446. const struct memb_ring_id *ring_id)
  447. {
  448. // TODO optimize this
  449. app_confchg_fn (configuration_type,
  450. member_list, member_list_entries,
  451. left_list, left_list_entries,
  452. joined_list, joined_list_entries,
  453. ring_id);
  454. }
  455. static void totempg_deliver_fn (
  456. unsigned int nodeid,
  457. const void *msg,
  458. unsigned int msg_len,
  459. int endian_conversion_required)
  460. {
  461. struct totempg_mcast *mcast;
  462. unsigned short *msg_lens;
  463. int i;
  464. struct assembly *assembly;
  465. char header[FRAME_SIZE_MAX];
  466. int msg_count;
  467. int continuation;
  468. int start;
  469. const char *data;
  470. int datasize;
  471. assembly = assembly_ref (nodeid);
  472. assert (assembly);
  473. /*
  474. * Assemble the header into one block of data and
  475. * assemble the packet contents into one block of data to simplify delivery
  476. */
  477. mcast = (struct totempg_mcast *)msg;
  478. if (endian_conversion_required) {
  479. mcast->msg_count = swab16 (mcast->msg_count);
  480. }
  481. msg_count = mcast->msg_count;
  482. datasize = sizeof (struct totempg_mcast) +
  483. msg_count * sizeof (unsigned short);
  484. memcpy (header, msg, datasize);
  485. data = msg;
  486. msg_lens = (unsigned short *) (header + sizeof (struct totempg_mcast));
  487. if (endian_conversion_required) {
  488. for (i = 0; i < mcast->msg_count; i++) {
  489. msg_lens[i] = swab16 (msg_lens[i]);
  490. }
  491. }
  492. memcpy (&assembly->data[assembly->index], &data[datasize],
  493. msg_len - datasize);
  494. /*
  495. * If the last message in the buffer is a fragment, then we
  496. * can't deliver it. We'll first deliver the full messages
  497. * then adjust the assembly buffer so we can add the rest of the
  498. * fragment when it arrives.
  499. */
  500. msg_count = mcast->fragmented ? mcast->msg_count - 1 : mcast->msg_count;
  501. continuation = mcast->continuation;
  502. iov_delv.iov_base = (void *)&assembly->data[0];
  503. iov_delv.iov_len = assembly->index + msg_lens[0];
  504. /*
  505. * Make sure that if this message is a continuation, that it
  506. * matches the sequence number of the previous fragment.
  507. * Also, if the first packed message is a continuation
  508. * of a previous message, but the assembly buffer
  509. * is empty, then we need to discard it since we can't
  510. * assemble a complete message. Likewise, if this message isn't a
  511. * continuation and the assembly buffer is empty, we have to discard
  512. * the continued message.
  513. */
  514. start = 0;
  515. if (assembly->throw_away_mode == THROW_AWAY_ACTIVE) {
  516. /* Throw away the first msg block */
  517. if (mcast->fragmented == 0 || mcast->fragmented == 1) {
  518. assembly->throw_away_mode = THROW_AWAY_INACTIVE;
  519. assembly->index += msg_lens[0];
  520. iov_delv.iov_base = (void *)&assembly->data[assembly->index];
  521. iov_delv.iov_len = msg_lens[1];
  522. start = 1;
  523. }
  524. } else
  525. if (assembly->throw_away_mode == THROW_AWAY_INACTIVE) {
  526. if (continuation == assembly->last_frag_num) {
  527. assembly->last_frag_num = mcast->fragmented;
  528. for (i = start; i < msg_count; i++) {
  529. app_deliver_fn(nodeid, iov_delv.iov_base, iov_delv.iov_len,
  530. endian_conversion_required);
  531. assembly->index += msg_lens[i];
  532. iov_delv.iov_base = (void *)&assembly->data[assembly->index];
  533. if (i < (msg_count - 1)) {
  534. iov_delv.iov_len = msg_lens[i + 1];
  535. }
  536. }
  537. } else {
  538. assembly->throw_away_mode = THROW_AWAY_ACTIVE;
  539. }
  540. }
  541. if (mcast->fragmented == 0) {
  542. /*
  543. * End of messages, dereference assembly struct
  544. */
  545. assembly->last_frag_num = 0;
  546. assembly->index = 0;
  547. assembly_deref (assembly);
  548. } else {
  549. /*
  550. * Message is fragmented, keep around assembly list
  551. */
  552. if (mcast->msg_count > 1) {
  553. memmove (&assembly->data[0],
  554. &assembly->data[assembly->index],
  555. msg_lens[msg_count]);
  556. assembly->index = 0;
  557. }
  558. assembly->index += msg_lens[msg_count];
  559. }
  560. }
  561. /*
  562. * Totem Process Group Abstraction
  563. * depends on poll abstraction, POSIX, IPV4
  564. */
  565. void *callback_token_received_handle;
  566. int callback_token_received_fn (enum totem_callback_token_type type,
  567. const void *data)
  568. {
  569. struct totempg_mcast mcast;
  570. struct iovec iovecs[3];
  571. int res;
  572. pthread_mutex_lock (&mcast_msg_mutex);
  573. if (mcast_packed_msg_count == 0) {
  574. pthread_mutex_unlock (&mcast_msg_mutex);
  575. return (0);
  576. }
  577. if (totemmrp_avail() == 0) {
  578. pthread_mutex_unlock (&mcast_msg_mutex);
  579. return (0);
  580. }
  581. mcast.header.version = 0;
  582. mcast.fragmented = 0;
  583. /*
  584. * Was the first message in this buffer a continuation of a
  585. * fragmented message?
  586. */
  587. mcast.continuation = fragment_continuation;
  588. fragment_continuation = 0;
  589. mcast.msg_count = mcast_packed_msg_count;
  590. iovecs[0].iov_base = (void *)&mcast;
  591. iovecs[0].iov_len = sizeof (struct totempg_mcast);
  592. iovecs[1].iov_base = (void *)mcast_packed_msg_lens;
  593. iovecs[1].iov_len = mcast_packed_msg_count * sizeof (unsigned short);
  594. iovecs[2].iov_base = (void *)&fragmentation_data[0];
  595. iovecs[2].iov_len = fragment_size;
  596. res = totemmrp_mcast (iovecs, 3, 0);
  597. mcast_packed_msg_count = 0;
  598. fragment_size = 0;
  599. pthread_mutex_unlock (&mcast_msg_mutex);
  600. return (0);
  601. }
  602. /*
  603. * Initialize the totem process group abstraction
  604. */
  605. int totempg_initialize (
  606. hdb_handle_t poll_handle,
  607. struct totem_config *totem_config)
  608. {
  609. int res;
  610. totempg_totem_config = totem_config;
  611. totempg_log_level_security = totem_config->totem_logging_configuration.log_level_security;
  612. totempg_log_level_error = totem_config->totem_logging_configuration.log_level_error;
  613. totempg_log_level_warning = totem_config->totem_logging_configuration.log_level_warning;
  614. totempg_log_level_notice = totem_config->totem_logging_configuration.log_level_notice;
  615. totempg_log_level_debug = totem_config->totem_logging_configuration.log_level_debug;
  616. totempg_log_printf = totem_config->totem_logging_configuration.log_printf;
  617. totempg_subsys_id = totem_config->totem_logging_configuration.log_subsys_id;
  618. fragmentation_data = malloc (TOTEMPG_PACKET_SIZE);
  619. if (fragmentation_data == 0) {
  620. return (-1);
  621. }
  622. totemsrp_net_mtu_adjust (totem_config);
  623. res = totemmrp_initialize (
  624. poll_handle,
  625. totem_config,
  626. &totempg_stats,
  627. totempg_deliver_fn,
  628. totempg_confchg_fn);
  629. totemmrp_callback_token_create (
  630. &callback_token_received_handle,
  631. TOTEM_CALLBACK_TOKEN_RECEIVED,
  632. 0,
  633. callback_token_received_fn,
  634. 0);
  635. totempg_size_limit = (totemmrp_avail() - 1) *
  636. (totempg_totem_config->net_mtu -
  637. sizeof (struct totempg_mcast) - 16);
  638. return (res);
  639. }
  640. void totempg_finalize (void)
  641. {
  642. pthread_mutex_lock (&totempg_mutex);
  643. totemmrp_finalize ();
  644. pthread_mutex_unlock (&totempg_mutex);
  645. }
  646. /*
  647. * Multicast a message
  648. */
  649. static int mcast_msg (
  650. struct iovec *iovec_in,
  651. unsigned int iov_len,
  652. int guarantee)
  653. {
  654. int res = 0;
  655. struct totempg_mcast mcast;
  656. struct iovec iovecs[3];
  657. struct iovec iovec[64];
  658. int i;
  659. int dest, src;
  660. int max_packet_size = 0;
  661. int copy_len = 0;
  662. int copy_base = 0;
  663. int total_size = 0;
  664. pthread_mutex_lock (&mcast_msg_mutex);
  665. totemmrp_event_signal (TOTEM_EVENT_NEW_MSG, 1);
  666. /*
  667. * Remove zero length iovectors from the list
  668. */
  669. assert (iov_len < 64);
  670. for (dest = 0, src = 0; src < iov_len; src++) {
  671. if (iovec_in[src].iov_len) {
  672. memcpy (&iovec[dest++], &iovec_in[src],
  673. sizeof (struct iovec));
  674. }
  675. }
  676. iov_len = dest;
  677. max_packet_size = TOTEMPG_PACKET_SIZE -
  678. (sizeof (unsigned short) * (mcast_packed_msg_count + 1));
  679. mcast_packed_msg_lens[mcast_packed_msg_count] = 0;
  680. /*
  681. * Check if we would overwrite new message queue
  682. */
  683. for (i = 0; i < iov_len; i++) {
  684. total_size += iovec[i].iov_len;
  685. }
  686. if (byte_count_send_ok (total_size + sizeof(unsigned short) *
  687. (mcast_packed_msg_count)) == 0) {
  688. pthread_mutex_unlock (&mcast_msg_mutex);
  689. return(-1);
  690. }
  691. mcast.header.version = 0;
  692. for (i = 0; i < iov_len; ) {
  693. mcast.fragmented = 0;
  694. mcast.continuation = fragment_continuation;
  695. copy_len = iovec[i].iov_len - copy_base;
  696. /*
  697. * If it all fits with room left over, copy it in.
  698. * We need to leave at least sizeof(short) + 1 bytes in the
  699. * fragment_buffer on exit so that max_packet_size + fragment_size
  700. * doesn't exceed the size of the fragment_buffer on the next call.
  701. */
  702. if ((copy_len + fragment_size) <
  703. (max_packet_size - sizeof (unsigned short))) {
  704. memcpy (&fragmentation_data[fragment_size],
  705. (char *)iovec[i].iov_base + copy_base, copy_len);
  706. fragment_size += copy_len;
  707. mcast_packed_msg_lens[mcast_packed_msg_count] += copy_len;
  708. next_fragment = 1;
  709. copy_len = 0;
  710. copy_base = 0;
  711. i++;
  712. continue;
  713. /*
  714. * If it just fits or is too big, then send out what fits.
  715. */
  716. } else {
  717. unsigned char *data_ptr;
  718. copy_len = min(copy_len, max_packet_size - fragment_size);
  719. if( copy_len == max_packet_size )
  720. data_ptr = (unsigned char *)iovec[i].iov_base + copy_base;
  721. else {
  722. data_ptr = fragmentation_data;
  723. memcpy (&fragmentation_data[fragment_size],
  724. (unsigned char *)iovec[i].iov_base + copy_base, copy_len);
  725. }
  726. memcpy (&fragmentation_data[fragment_size],
  727. (unsigned char *)iovec[i].iov_base + copy_base, copy_len);
  728. mcast_packed_msg_lens[mcast_packed_msg_count] += copy_len;
  729. /*
  730. * if we're not on the last iovec or the iovec is too large to
  731. * fit, then indicate a fragment. This also means that the next
  732. * message will have the continuation of this one.
  733. */
  734. if ((i < (iov_len - 1)) ||
  735. ((copy_base + copy_len) < iovec[i].iov_len)) {
  736. if (!next_fragment) {
  737. next_fragment++;
  738. }
  739. fragment_continuation = next_fragment;
  740. mcast.fragmented = next_fragment++;
  741. assert(fragment_continuation != 0);
  742. assert(mcast.fragmented != 0);
  743. } else {
  744. fragment_continuation = 0;
  745. }
  746. /*
  747. * assemble the message and send it
  748. */
  749. mcast.msg_count = ++mcast_packed_msg_count;
  750. iovecs[0].iov_base = (void *)&mcast;
  751. iovecs[0].iov_len = sizeof(struct totempg_mcast);
  752. iovecs[1].iov_base = (void *)mcast_packed_msg_lens;
  753. iovecs[1].iov_len = mcast_packed_msg_count *
  754. sizeof(unsigned short);
  755. iovecs[2].iov_base = (void *)data_ptr;
  756. iovecs[2].iov_len = max_packet_size;
  757. assert (totemmrp_avail() > 0);
  758. res = totemmrp_mcast (iovecs, 3, guarantee);
  759. if (res == -1) {
  760. goto error_exit;
  761. }
  762. /*
  763. * Recalculate counts and indexes for the next.
  764. */
  765. mcast_packed_msg_lens[0] = 0;
  766. mcast_packed_msg_count = 0;
  767. fragment_size = 0;
  768. max_packet_size = TOTEMPG_PACKET_SIZE - (sizeof(unsigned short));
  769. /*
  770. * If the iovec all fit, go to the next iovec
  771. */
  772. if ((copy_base + copy_len) == iovec[i].iov_len) {
  773. copy_len = 0;
  774. copy_base = 0;
  775. i++;
  776. /*
  777. * Continue with the rest of the current iovec.
  778. */
  779. } else {
  780. copy_base += copy_len;
  781. }
  782. }
  783. }
  784. /*
  785. * Bump only if we added message data. This may be zero if
  786. * the last buffer just fit into the fragmentation_data buffer
  787. * and we were at the last iovec.
  788. */
  789. if (mcast_packed_msg_lens[mcast_packed_msg_count]) {
  790. mcast_packed_msg_count++;
  791. }
  792. error_exit:
  793. pthread_mutex_unlock (&mcast_msg_mutex);
  794. return (res);
  795. }
  796. /*
  797. * Determine if a message of msg_size could be queued
  798. */
  799. static int msg_count_send_ok (
  800. int msg_count)
  801. {
  802. int avail = 0;
  803. avail = totemmrp_avail ();
  804. return ((avail - totempg_reserved) > msg_count);
  805. }
  806. static int byte_count_send_ok (
  807. int byte_count)
  808. {
  809. unsigned int msg_count = 0;
  810. int avail = 0;
  811. avail = totemmrp_avail ();
  812. msg_count = (byte_count / (totempg_totem_config->net_mtu - sizeof (struct totempg_mcast) - 16)) + 1;
  813. return (avail >= msg_count);
  814. }
  815. static int send_reserve (
  816. int msg_size)
  817. {
  818. unsigned int msg_count = 0;
  819. msg_count = (msg_size / (totempg_totem_config->net_mtu - sizeof (struct totempg_mcast) - 16)) + 1;
  820. totempg_reserved += msg_count;
  821. return (msg_count);
  822. }
  823. static void send_release (
  824. int msg_count)
  825. {
  826. totempg_reserved -= msg_count;
  827. }
  828. int totempg_callback_token_create (
  829. void **handle_out,
  830. enum totem_callback_token_type type,
  831. int delete,
  832. int (*callback_fn) (enum totem_callback_token_type type, const void *),
  833. const void *data)
  834. {
  835. unsigned int res;
  836. pthread_mutex_lock (&callback_token_mutex);
  837. res = totemmrp_callback_token_create (handle_out, type, delete,
  838. callback_fn, data);
  839. pthread_mutex_unlock (&callback_token_mutex);
  840. return (res);
  841. }
  842. void totempg_callback_token_destroy (
  843. void *handle_out)
  844. {
  845. pthread_mutex_lock (&callback_token_mutex);
  846. totemmrp_callback_token_destroy (handle_out);
  847. pthread_mutex_unlock (&callback_token_mutex);
  848. }
  849. /*
  850. * vi: set autoindent tabstop=4 shiftwidth=4 :
  851. */
  852. int totempg_groups_initialize (
  853. hdb_handle_t *handle,
  854. void (*deliver_fn) (
  855. unsigned int nodeid,
  856. const void *msg,
  857. unsigned int msg_len,
  858. int endian_conversion_required),
  859. void (*confchg_fn) (
  860. enum totem_configuration_type configuration_type,
  861. const unsigned int *member_list, size_t member_list_entries,
  862. const unsigned int *left_list, size_t left_list_entries,
  863. const unsigned int *joined_list, size_t joined_list_entries,
  864. const struct memb_ring_id *ring_id))
  865. {
  866. struct totempg_group_instance *instance;
  867. unsigned int res;
  868. pthread_mutex_lock (&totempg_mutex);
  869. res = hdb_handle_create (&totempg_groups_instance_database,
  870. sizeof (struct totempg_group_instance), handle);
  871. if (res != 0) {
  872. goto error_exit;
  873. }
  874. if (*handle > totempg_max_handle) {
  875. totempg_max_handle = *handle;
  876. }
  877. res = hdb_handle_get (&totempg_groups_instance_database, *handle,
  878. (void *)&instance);
  879. if (res != 0) {
  880. goto error_destroy;
  881. }
  882. instance->deliver_fn = deliver_fn;
  883. instance->confchg_fn = confchg_fn;
  884. instance->groups = 0;
  885. instance->groups_cnt = 0;
  886. hdb_handle_put (&totempg_groups_instance_database, *handle);
  887. pthread_mutex_unlock (&totempg_mutex);
  888. return (0);
  889. error_destroy:
  890. hdb_handle_destroy (&totempg_groups_instance_database, *handle);
  891. error_exit:
  892. pthread_mutex_unlock (&totempg_mutex);
  893. return (-1);
  894. }
  895. int totempg_groups_join (
  896. hdb_handle_t handle,
  897. const struct totempg_group *groups,
  898. size_t group_cnt)
  899. {
  900. struct totempg_group_instance *instance;
  901. struct totempg_group *new_groups;
  902. unsigned int res;
  903. pthread_mutex_lock (&totempg_mutex);
  904. res = hdb_handle_get (&totempg_groups_instance_database, handle,
  905. (void *)&instance);
  906. if (res != 0) {
  907. goto error_exit;
  908. }
  909. new_groups = realloc (instance->groups,
  910. sizeof (struct totempg_group) *
  911. (instance->groups_cnt + group_cnt));
  912. if (new_groups == 0) {
  913. res = ENOMEM;
  914. goto error_exit;
  915. }
  916. memcpy (&new_groups[instance->groups_cnt],
  917. groups, group_cnt * sizeof (struct totempg_group));
  918. instance->groups = new_groups;
  919. instance->groups_cnt += group_cnt;
  920. hdb_handle_put (&totempg_groups_instance_database, handle);
  921. error_exit:
  922. pthread_mutex_unlock (&totempg_mutex);
  923. return (res);
  924. }
  925. int totempg_groups_leave (
  926. hdb_handle_t handle,
  927. const struct totempg_group *groups,
  928. size_t group_cnt)
  929. {
  930. struct totempg_group_instance *instance;
  931. unsigned int res;
  932. pthread_mutex_lock (&totempg_mutex);
  933. res = hdb_handle_get (&totempg_groups_instance_database, handle,
  934. (void *)&instance);
  935. if (res != 0) {
  936. goto error_exit;
  937. }
  938. hdb_handle_put (&totempg_groups_instance_database, handle);
  939. error_exit:
  940. pthread_mutex_unlock (&totempg_mutex);
  941. return (res);
  942. }
  943. #define MAX_IOVECS_FROM_APP 32
  944. #define MAX_GROUPS_PER_MSG 32
  945. int totempg_groups_mcast_joined (
  946. hdb_handle_t handle,
  947. const struct iovec *iovec,
  948. unsigned int iov_len,
  949. int guarantee)
  950. {
  951. struct totempg_group_instance *instance;
  952. unsigned short group_len[MAX_GROUPS_PER_MSG + 1];
  953. struct iovec iovec_mcast[MAX_GROUPS_PER_MSG + 1 + MAX_IOVECS_FROM_APP];
  954. int i;
  955. unsigned int res;
  956. pthread_mutex_lock (&totempg_mutex);
  957. res = hdb_handle_get (&totempg_groups_instance_database, handle,
  958. (void *)&instance);
  959. if (res != 0) {
  960. goto error_exit;
  961. }
  962. /*
  963. * Build group_len structure and the iovec_mcast structure
  964. */
  965. group_len[0] = instance->groups_cnt;
  966. for (i = 0; i < instance->groups_cnt; i++) {
  967. group_len[i + 1] = instance->groups[i].group_len;
  968. iovec_mcast[i + 1].iov_len = instance->groups[i].group_len;
  969. iovec_mcast[i + 1].iov_base = (void *) instance->groups[i].group;
  970. }
  971. iovec_mcast[0].iov_len = (instance->groups_cnt + 1) * sizeof (unsigned short);
  972. iovec_mcast[0].iov_base = group_len;
  973. for (i = 0; i < iov_len; i++) {
  974. iovec_mcast[i + instance->groups_cnt + 1].iov_len = iovec[i].iov_len;
  975. iovec_mcast[i + instance->groups_cnt + 1].iov_base = iovec[i].iov_base;
  976. }
  977. res = mcast_msg (iovec_mcast, iov_len + instance->groups_cnt + 1, guarantee);
  978. hdb_handle_put (&totempg_groups_instance_database, handle);
  979. error_exit:
  980. pthread_mutex_unlock (&totempg_mutex);
  981. return (res);
  982. }
  983. int totempg_groups_joined_reserve (
  984. hdb_handle_t handle,
  985. const struct iovec *iovec,
  986. unsigned int iov_len)
  987. {
  988. struct totempg_group_instance *instance;
  989. unsigned int size = 0;
  990. unsigned int i;
  991. unsigned int res;
  992. unsigned int reserved = 0;
  993. pthread_mutex_lock (&totempg_mutex);
  994. pthread_mutex_lock (&mcast_msg_mutex);
  995. res = hdb_handle_get (&totempg_groups_instance_database, handle,
  996. (void *)&instance);
  997. if (res != 0) {
  998. goto error_exit;
  999. }
  1000. for (i = 0; i < instance->groups_cnt; i++) {
  1001. size += instance->groups[i].group_len;
  1002. }
  1003. for (i = 0; i < iov_len; i++) {
  1004. size += iovec[i].iov_len;
  1005. }
  1006. if (size >= totempg_size_limit) {
  1007. reserved = -1;
  1008. goto error_put;
  1009. }
  1010. reserved = send_reserve (size);
  1011. if (msg_count_send_ok (reserved) == 0) {
  1012. send_release (reserved);
  1013. reserved = 0;
  1014. }
  1015. error_put:
  1016. hdb_handle_put (&totempg_groups_instance_database, handle);
  1017. error_exit:
  1018. pthread_mutex_unlock (&mcast_msg_mutex);
  1019. pthread_mutex_unlock (&totempg_mutex);
  1020. return (reserved);
  1021. }
  1022. int totempg_groups_joined_release (int msg_count)
  1023. {
  1024. pthread_mutex_lock (&totempg_mutex);
  1025. pthread_mutex_lock (&mcast_msg_mutex);
  1026. send_release (msg_count);
  1027. pthread_mutex_unlock (&mcast_msg_mutex);
  1028. pthread_mutex_unlock (&totempg_mutex);
  1029. return 0;
  1030. }
  1031. int totempg_groups_mcast_groups (
  1032. hdb_handle_t handle,
  1033. int guarantee,
  1034. const struct totempg_group *groups,
  1035. size_t groups_cnt,
  1036. const struct iovec *iovec,
  1037. unsigned int iov_len)
  1038. {
  1039. struct totempg_group_instance *instance;
  1040. unsigned short group_len[MAX_GROUPS_PER_MSG + 1];
  1041. struct iovec iovec_mcast[MAX_GROUPS_PER_MSG + 1 + MAX_IOVECS_FROM_APP];
  1042. int i;
  1043. unsigned int res;
  1044. pthread_mutex_lock (&totempg_mutex);
  1045. res = hdb_handle_get (&totempg_groups_instance_database, handle,
  1046. (void *)&instance);
  1047. if (res != 0) {
  1048. goto error_exit;
  1049. }
  1050. /*
  1051. * Build group_len structure and the iovec_mcast structure
  1052. */
  1053. group_len[0] = groups_cnt;
  1054. for (i = 0; i < groups_cnt; i++) {
  1055. group_len[i + 1] = groups[i].group_len;
  1056. iovec_mcast[i + 1].iov_len = groups[i].group_len;
  1057. iovec_mcast[i + 1].iov_base = (void *) groups[i].group;
  1058. }
  1059. iovec_mcast[0].iov_len = (groups_cnt + 1) * sizeof (unsigned short);
  1060. iovec_mcast[0].iov_base = group_len;
  1061. for (i = 0; i < iov_len; i++) {
  1062. iovec_mcast[i + groups_cnt + 1].iov_len = iovec[i].iov_len;
  1063. iovec_mcast[i + groups_cnt + 1].iov_base = iovec[i].iov_base;
  1064. }
  1065. res = mcast_msg (iovec_mcast, iov_len + groups_cnt + 1, guarantee);
  1066. hdb_handle_put (&totempg_groups_instance_database, handle);
  1067. error_exit:
  1068. pthread_mutex_unlock (&totempg_mutex);
  1069. return (res);
  1070. }
  1071. /*
  1072. * Returns -1 if error, 0 if can't send, 1 if can send the message
  1073. */
  1074. int totempg_groups_send_ok_groups (
  1075. hdb_handle_t handle,
  1076. const struct totempg_group *groups,
  1077. size_t groups_cnt,
  1078. const struct iovec *iovec,
  1079. unsigned int iov_len)
  1080. {
  1081. struct totempg_group_instance *instance;
  1082. unsigned int size = 0;
  1083. unsigned int i;
  1084. unsigned int res;
  1085. pthread_mutex_lock (&totempg_mutex);
  1086. res = hdb_handle_get (&totempg_groups_instance_database, handle,
  1087. (void *)&instance);
  1088. if (res != 0) {
  1089. goto error_exit;
  1090. }
  1091. for (i = 0; i < groups_cnt; i++) {
  1092. size += groups[i].group_len;
  1093. }
  1094. for (i = 0; i < iov_len; i++) {
  1095. size += iovec[i].iov_len;
  1096. }
  1097. res = msg_count_send_ok (size);
  1098. hdb_handle_put (&totempg_groups_instance_database, handle);
  1099. error_exit:
  1100. pthread_mutex_unlock (&totempg_mutex);
  1101. return (res);
  1102. }
  1103. int totempg_ifaces_get (
  1104. unsigned int nodeid,
  1105. struct totem_ip_address *interfaces,
  1106. char ***status,
  1107. unsigned int *iface_count)
  1108. {
  1109. int res;
  1110. res = totemmrp_ifaces_get (
  1111. nodeid,
  1112. interfaces,
  1113. status,
  1114. iface_count);
  1115. return (res);
  1116. }
  1117. void totempg_event_signal (enum totem_event_type type, int value)
  1118. {
  1119. totemmrp_event_signal (type, value);
  1120. }
  1121. void* totempg_get_stats (void)
  1122. {
  1123. return &totempg_stats;
  1124. }
  1125. int totempg_crypto_set (
  1126. unsigned int type)
  1127. {
  1128. int res;
  1129. res = totemmrp_crypto_set (
  1130. type);
  1131. return (res);
  1132. }
  1133. int totempg_ring_reenable (void)
  1134. {
  1135. int res;
  1136. res = totemmrp_ring_reenable ();
  1137. return (res);
  1138. }
  1139. const char *totempg_ifaces_print (unsigned int nodeid)
  1140. {
  1141. static char iface_string[256 * INTERFACE_MAX];
  1142. char one_iface[64];
  1143. struct totem_ip_address interfaces[INTERFACE_MAX];
  1144. char **status;
  1145. unsigned int iface_count;
  1146. unsigned int i;
  1147. int res;
  1148. iface_string[0] = '\0';
  1149. res = totempg_ifaces_get (nodeid, interfaces, &status, &iface_count);
  1150. if (res == -1) {
  1151. return ("no interface found for nodeid");
  1152. }
  1153. for (i = 0; i < iface_count; i++) {
  1154. sprintf (one_iface, "r(%d) ip(%s) ",
  1155. i, totemip_print (&interfaces[i]));
  1156. strcat (iface_string, one_iface);
  1157. }
  1158. return (iface_string);
  1159. }
  1160. unsigned int totempg_my_nodeid_get (void)
  1161. {
  1162. return (totemmrp_my_nodeid_get());
  1163. }
  1164. int totempg_my_family_get (void)
  1165. {
  1166. return (totemmrp_my_family_get());
  1167. }
  1168. extern void totempg_service_ready_register (
  1169. void (*totem_service_ready) (void))
  1170. {
  1171. totemmrp_service_ready_register (totem_service_ready);
  1172. }
  1173. extern int totempg_member_add (
  1174. const struct totem_ip_address *member,
  1175. int ring_no);
  1176. extern int totempg_member_remove (
  1177. const struct totem_ip_address *member,
  1178. int ring_no);