sync.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620
  1. /*
  2. * Copyright (c) 2009-2012 Red Hat, Inc.
  3. *
  4. * All rights reserved.
  5. *
  6. * Author: Steven Dake (sdake@redhat.com)
  7. *
  8. * This software licensed under BSD license, the text of which follows:
  9. *
  10. * Redistribution and use in source and binary forms, with or without
  11. * modification, are permitted provided that the following conditions are met:
  12. *
  13. * - Redistributions of source code must retain the above copyright notice,
  14. * this list of conditions and the following disclaimer.
  15. * - Redistributions in binary form must reproduce the above copyright notice,
  16. * this list of conditions and the following disclaimer in the documentation
  17. * and/or other materials provided with the distribution.
  18. * - Neither the name of the MontaVista Software, Inc. nor the names of its
  19. * contributors may be used to endorse or promote products derived from this
  20. * software without specific prior written permission.
  21. *
  22. * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
  23. * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
  24. * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
  25. * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
  26. * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
  27. * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
  28. * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
  29. * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
  30. * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
  31. * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
  32. * THE POSSIBILITY OF SUCH DAMAGE.
  33. */
  34. #include <config.h>
  35. #include <sys/types.h>
  36. #include <sys/socket.h>
  37. #include <sys/un.h>
  38. #include <sys/ioctl.h>
  39. #include <netinet/in.h>
  40. #include <sys/uio.h>
  41. #include <unistd.h>
  42. #include <fcntl.h>
  43. #include <stdlib.h>
  44. #include <stdio.h>
  45. #include <errno.h>
  46. #include <time.h>
  47. #include <unistd.h>
  48. #include <netinet/in.h>
  49. #include <arpa/inet.h>
  50. #include <corosync/corotypes.h>
  51. #include <corosync/swab.h>
  52. #include <corosync/totem/totempg.h>
  53. #include <corosync/totem/totem.h>
  54. #include <corosync/logsys.h>
  55. #include <qb/qbipc_common.h>
  56. #include "schedwrk.h"
  57. #include "quorum.h"
  58. #include "sync.h"
  59. LOGSYS_DECLARE_SUBSYS ("SYNC");
  60. #define MESSAGE_REQ_SYNC_BARRIER 0
  61. #define MESSAGE_REQ_SYNC_SERVICE_BUILD 1
  62. #define MESSAGE_REQ_SYNC_MEMB_DETERMINE 2
  63. enum sync_process_state {
  64. INIT,
  65. PROCESS,
  66. ACTIVATE
  67. };
  68. enum sync_state {
  69. SYNC_SERVICELIST_BUILD,
  70. SYNC_PROCESS,
  71. SYNC_BARRIER
  72. };
  73. struct service_entry {
  74. int service_id;
  75. void (*sync_init) (
  76. const unsigned int *trans_list,
  77. size_t trans_list_entries,
  78. const unsigned int *member_list,
  79. size_t member_list_entries,
  80. const struct memb_ring_id *ring_id);
  81. void (*sync_abort) (void);
  82. int (*sync_process) (void);
  83. void (*sync_activate) (void);
  84. enum sync_process_state state;
  85. char name[128];
  86. };
  87. struct processor_entry {
  88. int nodeid;
  89. int received;
  90. };
  91. struct req_exec_memb_determine_message {
  92. struct qb_ipc_request_header header __attribute__((aligned(8)));
  93. struct memb_ring_id ring_id __attribute__((aligned(8)));
  94. };
  95. struct req_exec_service_build_message {
  96. struct qb_ipc_request_header header __attribute__((aligned(8)));
  97. struct memb_ring_id ring_id __attribute__((aligned(8)));
  98. int service_list_entries __attribute__((aligned(8)));
  99. int service_list[128] __attribute__((aligned(8)));
  100. };
  101. struct req_exec_barrier_message {
  102. struct qb_ipc_request_header header __attribute__((aligned(8)));
  103. struct memb_ring_id ring_id __attribute__((aligned(8)));
  104. };
  105. static enum sync_state my_state = SYNC_BARRIER;
  106. static struct memb_ring_id my_ring_id;
  107. static struct memb_ring_id my_memb_determine_ring_id;
  108. static int my_memb_determine = 0;
  109. static unsigned int my_memb_determine_list[PROCESSOR_COUNT_MAX];
  110. static unsigned int my_memb_determine_list_entries = 0;
  111. static int my_processing_idx = 0;
  112. static hdb_handle_t my_schedwrk_handle;
  113. static struct processor_entry my_processor_list[PROCESSOR_COUNT_MAX];
  114. static unsigned int my_member_list[PROCESSOR_COUNT_MAX];
  115. static unsigned int my_trans_list[PROCESSOR_COUNT_MAX];
  116. static size_t my_member_list_entries = 0;
  117. static size_t my_trans_list_entries = 0;
  118. static int my_processor_list_entries = 0;
  119. static struct service_entry my_service_list[128];
  120. static int my_service_list_entries = 0;
  121. static const struct memb_ring_id sync_ring_id;
  122. static struct service_entry my_initial_service_list[PROCESSOR_COUNT_MAX];
  123. static int my_initial_service_list_entries;
  124. static void (*sync_synchronization_completed) (void);
  125. static void sync_deliver_fn (
  126. unsigned int nodeid,
  127. const void *msg,
  128. unsigned int msg_len,
  129. int endian_conversion_required);
  130. static int schedwrk_processor (const void *context);
  131. static void sync_process_enter (void);
  132. static struct totempg_group sync_group = {
  133. .group = "sync",
  134. .group_len = 4
  135. };
  136. static void *sync_group_handle;
  137. int sync_init (
  138. int (*sync_callbacks_retrieve) (
  139. int service_id,
  140. struct sync_callbacks *callbacks),
  141. void (*synchronization_completed) (void))
  142. {
  143. unsigned int res;
  144. int i;
  145. struct sync_callbacks sync_callbacks;
  146. res = totempg_groups_initialize (
  147. &sync_group_handle,
  148. sync_deliver_fn,
  149. NULL);
  150. if (res == -1) {
  151. log_printf (LOGSYS_LEVEL_ERROR,
  152. "Couldn't initialize groups interface.");
  153. return (-1);
  154. }
  155. res = totempg_groups_join (
  156. sync_group_handle,
  157. &sync_group,
  158. 1);
  159. if (res == -1) {
  160. log_printf (LOGSYS_LEVEL_ERROR, "Couldn't join group.");
  161. return (-1);
  162. }
  163. sync_synchronization_completed = synchronization_completed;
  164. for (i = 0; i < 64; i++) {
  165. res = sync_callbacks_retrieve (i, &sync_callbacks);
  166. if (res == -1) {
  167. continue;
  168. }
  169. if (sync_callbacks.sync_init == NULL) {
  170. continue;
  171. }
  172. my_initial_service_list[my_initial_service_list_entries].state =
  173. INIT;
  174. my_initial_service_list[my_initial_service_list_entries].service_id = i;
  175. strcpy (my_initial_service_list[my_initial_service_list_entries].name,
  176. sync_callbacks.name);
  177. my_initial_service_list[my_initial_service_list_entries].sync_init = sync_callbacks.sync_init;
  178. my_initial_service_list[my_initial_service_list_entries].sync_process = sync_callbacks.sync_process;
  179. my_initial_service_list[my_initial_service_list_entries].sync_abort = sync_callbacks.sync_abort;
  180. my_initial_service_list[my_initial_service_list_entries].sync_activate = sync_callbacks.sync_activate;
  181. my_initial_service_list_entries += 1;
  182. }
  183. return (0);
  184. }
  185. static void sync_barrier_handler (unsigned int nodeid, const void *msg)
  186. {
  187. const struct req_exec_barrier_message *req_exec_barrier_message = msg;
  188. int i;
  189. int barrier_reached = 1;
  190. if (memcmp (&my_ring_id, &req_exec_barrier_message->ring_id,
  191. sizeof (struct memb_ring_id)) != 0) {
  192. log_printf (LOGSYS_LEVEL_DEBUG, "barrier for old ring - discarding");
  193. return;
  194. }
  195. for (i = 0; i < my_processor_list_entries; i++) {
  196. if (my_processor_list[i].nodeid == nodeid) {
  197. my_processor_list[i].received = 1;
  198. }
  199. }
  200. for (i = 0; i < my_processor_list_entries; i++) {
  201. if (my_processor_list[i].received == 0) {
  202. barrier_reached = 0;
  203. }
  204. }
  205. if (barrier_reached) {
  206. log_printf (LOGSYS_LEVEL_DEBUG, "Committing synchronization for %s",
  207. my_service_list[my_processing_idx].name);
  208. my_service_list[my_processing_idx].state = ACTIVATE;
  209. my_service_list[my_processing_idx].sync_activate ();
  210. my_processing_idx += 1;
  211. if (my_service_list_entries == my_processing_idx) {
  212. my_memb_determine_list_entries = 0;
  213. sync_synchronization_completed ();
  214. } else {
  215. sync_process_enter ();
  216. }
  217. }
  218. }
  219. static void dummy_sync_init (
  220. const unsigned int *trans_list,
  221. size_t trans_list_entries,
  222. const unsigned int *member_list,
  223. size_t member_list_entries,
  224. const struct memb_ring_id *ring_id)
  225. {
  226. }
  227. static void dummy_sync_abort (void)
  228. {
  229. }
  230. static int dummy_sync_process (void)
  231. {
  232. return (0);
  233. }
  234. static void dummy_sync_activate (void)
  235. {
  236. }
  237. static int service_entry_compare (const void *a, const void *b)
  238. {
  239. const struct service_entry *service_entry_a = a;
  240. const struct service_entry *service_entry_b = b;
  241. return (service_entry_a->service_id > service_entry_b->service_id);
  242. }
  243. static void sync_memb_determine (unsigned int nodeid, const void *msg)
  244. {
  245. const struct req_exec_memb_determine_message *req_exec_memb_determine_message = msg;
  246. int found = 0;
  247. int i;
  248. if (memcmp (&req_exec_memb_determine_message->ring_id,
  249. &my_memb_determine_ring_id, sizeof (struct memb_ring_id)) != 0) {
  250. log_printf (LOGSYS_LEVEL_DEBUG, "memb determine for old ring - discarding");
  251. return;
  252. }
  253. my_memb_determine = 1;
  254. for (i = 0; i < my_memb_determine_list_entries; i++) {
  255. if (my_memb_determine_list[i] == nodeid) {
  256. found = 1;
  257. }
  258. }
  259. if (found == 0) {
  260. my_memb_determine_list[my_memb_determine_list_entries] = nodeid;
  261. my_memb_determine_list_entries += 1;
  262. }
  263. }
  264. static void sync_service_build_handler (unsigned int nodeid, const void *msg)
  265. {
  266. const struct req_exec_service_build_message *req_exec_service_build_message = msg;
  267. int i, j;
  268. int barrier_reached = 1;
  269. int found;
  270. int qsort_trigger = 0;
  271. if (memcmp (&my_ring_id, &req_exec_service_build_message->ring_id,
  272. sizeof (struct memb_ring_id)) != 0) {
  273. log_printf (LOGSYS_LEVEL_DEBUG, "service build for old ring - discarding");
  274. return;
  275. }
  276. for (i = 0; i < req_exec_service_build_message->service_list_entries; i++) {
  277. found = 0;
  278. for (j = 0; j < my_service_list_entries; j++) {
  279. if (req_exec_service_build_message->service_list[i] ==
  280. my_service_list[j].service_id) {
  281. found = 1;
  282. break;
  283. }
  284. }
  285. if (found == 0) {
  286. my_service_list[my_service_list_entries].state =
  287. INIT;
  288. my_service_list[my_service_list_entries].service_id =
  289. req_exec_service_build_message->service_list[i];
  290. sprintf (my_service_list[my_service_list_entries].name,
  291. "External Service (id = %d)\n",
  292. req_exec_service_build_message->service_list[i]);
  293. my_service_list[my_service_list_entries].sync_init =
  294. dummy_sync_init;
  295. my_service_list[my_service_list_entries].sync_abort =
  296. dummy_sync_abort;
  297. my_service_list[my_service_list_entries].sync_process =
  298. dummy_sync_process;
  299. my_service_list[my_service_list_entries].sync_activate =
  300. dummy_sync_activate;
  301. my_service_list_entries += 1;
  302. qsort_trigger = 1;
  303. }
  304. }
  305. if (qsort_trigger) {
  306. qsort (my_service_list, my_service_list_entries,
  307. sizeof (struct service_entry), service_entry_compare);
  308. }
  309. for (i = 0; i < my_processor_list_entries; i++) {
  310. if (my_processor_list[i].nodeid == nodeid) {
  311. my_processor_list[i].received = 1;
  312. }
  313. }
  314. for (i = 0; i < my_processor_list_entries; i++) {
  315. if (my_processor_list[i].received == 0) {
  316. barrier_reached = 0;
  317. }
  318. }
  319. if (barrier_reached) {
  320. sync_process_enter ();
  321. }
  322. }
  323. static void sync_deliver_fn (
  324. unsigned int nodeid,
  325. const void *msg,
  326. unsigned int msg_len,
  327. int endian_conversion_required)
  328. {
  329. struct qb_ipc_request_header *header = (struct qb_ipc_request_header *)msg;
  330. switch (header->id) {
  331. case MESSAGE_REQ_SYNC_BARRIER:
  332. sync_barrier_handler (nodeid, msg);
  333. break;
  334. case MESSAGE_REQ_SYNC_SERVICE_BUILD:
  335. sync_service_build_handler (nodeid, msg);
  336. break;
  337. case MESSAGE_REQ_SYNC_MEMB_DETERMINE:
  338. sync_memb_determine (nodeid, msg);
  339. break;
  340. }
  341. }
  342. static void memb_determine_message_transmit (void)
  343. {
  344. struct iovec iovec;
  345. struct req_exec_memb_determine_message req_exec_memb_determine_message;
  346. req_exec_memb_determine_message.header.size = sizeof (struct req_exec_memb_determine_message);
  347. req_exec_memb_determine_message.header.id = MESSAGE_REQ_SYNC_MEMB_DETERMINE;
  348. memcpy (&req_exec_memb_determine_message.ring_id,
  349. &my_memb_determine_ring_id,
  350. sizeof (struct memb_ring_id));
  351. iovec.iov_base = (char *)&req_exec_memb_determine_message;
  352. iovec.iov_len = sizeof (req_exec_memb_determine_message);
  353. (void)totempg_groups_mcast_joined (sync_group_handle,
  354. &iovec, 1, TOTEMPG_AGREED);
  355. }
  356. static void barrier_message_transmit (void)
  357. {
  358. struct iovec iovec;
  359. struct req_exec_barrier_message req_exec_barrier_message;
  360. req_exec_barrier_message.header.size = sizeof (struct req_exec_barrier_message);
  361. req_exec_barrier_message.header.id = MESSAGE_REQ_SYNC_BARRIER;
  362. memcpy (&req_exec_barrier_message.ring_id, &my_ring_id,
  363. sizeof (struct memb_ring_id));
  364. iovec.iov_base = (char *)&req_exec_barrier_message;
  365. iovec.iov_len = sizeof (req_exec_barrier_message);
  366. (void)totempg_groups_mcast_joined (sync_group_handle,
  367. &iovec, 1, TOTEMPG_AGREED);
  368. }
  369. static void service_build_message_transmit (struct req_exec_service_build_message *service_build_message)
  370. {
  371. struct iovec iovec;
  372. service_build_message->header.size = sizeof (struct req_exec_service_build_message);
  373. service_build_message->header.id = MESSAGE_REQ_SYNC_SERVICE_BUILD;
  374. memcpy (&service_build_message->ring_id, &my_ring_id,
  375. sizeof (struct memb_ring_id));
  376. iovec.iov_base = (void *)service_build_message;
  377. iovec.iov_len = sizeof (struct req_exec_service_build_message);
  378. (void)totempg_groups_mcast_joined (sync_group_handle,
  379. &iovec, 1, TOTEMPG_AGREED);
  380. }
  381. static void sync_barrier_enter (void)
  382. {
  383. my_state = SYNC_BARRIER;
  384. barrier_message_transmit ();
  385. }
  386. static void sync_process_enter (void)
  387. {
  388. int i;
  389. my_state = SYNC_PROCESS;
  390. /*
  391. * No sync services
  392. */
  393. if (my_service_list_entries == 0) {
  394. my_state = SYNC_SERVICELIST_BUILD;
  395. my_memb_determine_list_entries = 0;
  396. sync_synchronization_completed ();
  397. return;
  398. }
  399. for (i = 0; i < my_processor_list_entries; i++) {
  400. my_processor_list[i].received = 0;
  401. }
  402. schedwrk_create (&my_schedwrk_handle,
  403. schedwrk_processor,
  404. NULL);
  405. }
  406. static void sync_servicelist_build_enter (
  407. const unsigned int *member_list,
  408. size_t member_list_entries,
  409. const struct memb_ring_id *ring_id)
  410. {
  411. struct req_exec_service_build_message service_build;
  412. int i;
  413. my_state = SYNC_SERVICELIST_BUILD;
  414. for (i = 0; i < member_list_entries; i++) {
  415. my_processor_list[i].nodeid = member_list[i];
  416. my_processor_list[i].received = 0;
  417. }
  418. my_processor_list_entries = member_list_entries;
  419. memcpy (my_member_list, member_list,
  420. member_list_entries * sizeof (unsigned int));
  421. my_member_list_entries = member_list_entries;
  422. my_processing_idx = 0;
  423. memcpy (my_service_list, my_initial_service_list,
  424. sizeof (struct service_entry) *
  425. my_initial_service_list_entries);
  426. my_service_list_entries = my_initial_service_list_entries;
  427. for (i = 0; i < my_initial_service_list[i].service_id; i++) {
  428. service_build.service_list[i] =
  429. my_initial_service_list[i].service_id;
  430. }
  431. service_build.service_list_entries = i;
  432. service_build_message_transmit (&service_build);
  433. }
  434. static int schedwrk_processor (const void *context)
  435. {
  436. int res = 0;
  437. if (my_service_list[my_processing_idx].state == INIT) {
  438. unsigned int old_trans_list[PROCESSOR_COUNT_MAX];
  439. size_t old_trans_list_entries = 0;
  440. int o, m;
  441. my_service_list[my_processing_idx].state = PROCESS;
  442. memcpy (old_trans_list, my_trans_list, my_trans_list_entries *
  443. sizeof (unsigned int));
  444. old_trans_list_entries = my_trans_list_entries;
  445. my_trans_list_entries = 0;
  446. for (o = 0; o < old_trans_list_entries; o++) {
  447. for (m = 0; m < my_member_list_entries; m++) {
  448. if (old_trans_list[o] == my_member_list[m]) {
  449. my_trans_list[my_trans_list_entries] = my_member_list[m];
  450. my_trans_list_entries++;
  451. break;
  452. }
  453. }
  454. }
  455. my_service_list[my_processing_idx].sync_init (my_trans_list,
  456. my_trans_list_entries, my_member_list,
  457. my_member_list_entries,
  458. &my_ring_id);
  459. }
  460. if (my_service_list[my_processing_idx].state == PROCESS) {
  461. my_service_list[my_processing_idx].state = PROCESS;
  462. res = my_service_list[my_processing_idx].sync_process ();
  463. if (res == 0) {
  464. sync_barrier_enter();
  465. } else {
  466. return (-1);
  467. }
  468. }
  469. return (0);
  470. }
  471. void sync_start (
  472. const unsigned int *member_list,
  473. size_t member_list_entries,
  474. const struct memb_ring_id *ring_id)
  475. {
  476. ENTER();
  477. memcpy (&my_ring_id, ring_id, sizeof (struct memb_ring_id));
  478. if (my_memb_determine) {
  479. my_memb_determine = 0;
  480. sync_servicelist_build_enter (my_memb_determine_list,
  481. my_memb_determine_list_entries, ring_id);
  482. } else {
  483. sync_servicelist_build_enter (member_list, member_list_entries,
  484. ring_id);
  485. }
  486. }
  487. void sync_save_transitional (
  488. const unsigned int *member_list,
  489. size_t member_list_entries,
  490. const struct memb_ring_id *ring_id)
  491. {
  492. ENTER();
  493. memcpy (my_trans_list, member_list, member_list_entries *
  494. sizeof (unsigned int));
  495. my_trans_list_entries = member_list_entries;
  496. }
  497. void sync_abort (void)
  498. {
  499. ENTER();
  500. if (my_state == SYNC_PROCESS) {
  501. schedwrk_destroy (my_schedwrk_handle);
  502. my_service_list[my_processing_idx].sync_abort ();
  503. }
  504. /* this will cause any "old" barrier messages from causing
  505. * problems.
  506. */
  507. memset (&my_ring_id, 0, sizeof (struct memb_ring_id));
  508. }
  509. void sync_memb_list_determine (const struct memb_ring_id *ring_id)
  510. {
  511. ENTER();
  512. memcpy (&my_memb_determine_ring_id, ring_id,
  513. sizeof (struct memb_ring_id));
  514. memb_determine_message_transmit ();
  515. }
  516. void sync_memb_list_abort (void)
  517. {
  518. ENTER();
  519. my_memb_determine_list_entries = 0;
  520. memset (&my_memb_determine_ring_id, 0, sizeof (struct memb_ring_id));
  521. }