sync.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628
  1. /*
  2. * Copyright (c) 2009-2012 Red Hat, Inc.
  3. *
  4. * All rights reserved.
  5. *
  6. * Author: Steven Dake (sdake@redhat.com)
  7. *
  8. * This software licensed under BSD license, the text of which follows:
  9. *
  10. * Redistribution and use in source and binary forms, with or without
  11. * modification, are permitted provided that the following conditions are met:
  12. *
  13. * - Redistributions of source code must retain the above copyright notice,
  14. * this list of conditions and the following disclaimer.
  15. * - Redistributions in binary form must reproduce the above copyright notice,
  16. * this list of conditions and the following disclaimer in the documentation
  17. * and/or other materials provided with the distribution.
  18. * - Neither the name of the MontaVista Software, Inc. nor the names of its
  19. * contributors may be used to endorse or promote products derived from this
  20. * software without specific prior written permission.
  21. *
  22. * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
  23. * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
  24. * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
  25. * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
  26. * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
  27. * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
  28. * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
  29. * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
  30. * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
  31. * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
  32. * THE POSSIBILITY OF SUCH DAMAGE.
  33. */
  34. #include <config.h>
  35. #include <sys/types.h>
  36. #include <sys/socket.h>
  37. #include <sys/un.h>
  38. #include <sys/ioctl.h>
  39. #include <netinet/in.h>
  40. #include <sys/uio.h>
  41. #include <unistd.h>
  42. #include <fcntl.h>
  43. #include <stdlib.h>
  44. #include <stdio.h>
  45. #include <errno.h>
  46. #include <time.h>
  47. #include <arpa/inet.h>
  48. #include <corosync/corotypes.h>
  49. #include <corosync/swab.h>
  50. #include <corosync/totem/totempg.h>
  51. #include <corosync/totem/totem.h>
  52. #include <corosync/logsys.h>
  53. #include <qb/qbipc_common.h>
  54. #include "schedwrk.h"
  55. #include "quorum.h"
  56. #include "sync.h"
  57. #include "main.h"
  58. LOGSYS_DECLARE_SUBSYS ("SYNC");
  59. #define MESSAGE_REQ_SYNC_BARRIER 0
  60. #define MESSAGE_REQ_SYNC_SERVICE_BUILD 1
  61. #define MESSAGE_REQ_SYNC_MEMB_DETERMINE 2
  62. enum sync_process_state {
  63. INIT,
  64. PROCESS,
  65. ACTIVATE
  66. };
  67. enum sync_state {
  68. SYNC_SERVICELIST_BUILD,
  69. SYNC_PROCESS,
  70. SYNC_BARRIER
  71. };
  72. struct service_entry {
  73. int service_id;
  74. void (*sync_init) (
  75. const unsigned int *trans_list,
  76. size_t trans_list_entries,
  77. const unsigned int *member_list,
  78. size_t member_list_entries,
  79. const struct memb_ring_id *ring_id);
  80. void (*sync_abort) (void);
  81. int (*sync_process) (void);
  82. void (*sync_activate) (void);
  83. enum sync_process_state state;
  84. char name[128];
  85. };
  86. struct processor_entry {
  87. int nodeid;
  88. int received;
  89. };
  90. struct req_exec_memb_determine_message {
  91. struct qb_ipc_request_header header __attribute__((aligned(8)));
  92. struct memb_ring_id ring_id __attribute__((aligned(8)));
  93. };
  94. struct req_exec_service_build_message {
  95. struct qb_ipc_request_header header __attribute__((aligned(8)));
  96. struct memb_ring_id ring_id __attribute__((aligned(8)));
  97. int service_list_entries __attribute__((aligned(8)));
  98. int service_list[128] __attribute__((aligned(8)));
  99. };
  100. struct req_exec_barrier_message {
  101. struct qb_ipc_request_header header __attribute__((aligned(8)));
  102. struct memb_ring_id ring_id __attribute__((aligned(8)));
  103. };
  104. static enum sync_state my_state = SYNC_BARRIER;
  105. static struct memb_ring_id my_ring_id;
  106. static struct memb_ring_id my_memb_determine_ring_id;
  107. static int my_memb_determine = 0;
  108. static unsigned int my_memb_determine_list[PROCESSOR_COUNT_MAX];
  109. static unsigned int my_memb_determine_list_entries = 0;
  110. static int my_processing_idx = 0;
  111. static hdb_handle_t my_schedwrk_handle;
  112. static struct processor_entry my_processor_list[PROCESSOR_COUNT_MAX];
  113. static unsigned int my_member_list[PROCESSOR_COUNT_MAX];
  114. static unsigned int my_trans_list[PROCESSOR_COUNT_MAX];
  115. static size_t my_member_list_entries = 0;
  116. static size_t my_trans_list_entries = 0;
  117. static int my_processor_list_entries = 0;
  118. static struct service_entry my_service_list[SERVICES_COUNT_MAX];
  119. static int my_service_list_entries = 0;
  120. static void (*sync_synchronization_completed) (void);
  121. static void sync_deliver_fn (
  122. unsigned int nodeid,
  123. const void *msg,
  124. unsigned int msg_len,
  125. int endian_conversion_required);
  126. static int schedwrk_processor (const void *context);
  127. static void sync_process_enter (void);
  128. static struct totempg_group sync_group = {
  129. .group = "sync",
  130. .group_len = 4
  131. };
  132. static void *sync_group_handle;
  133. int (*my_sync_callbacks_retrieve) (
  134. int service_id,
  135. struct sync_callbacks *callbacks);
  136. int sync_init (
  137. int (*sync_callbacks_retrieve) (
  138. int service_id,
  139. struct sync_callbacks *callbacks),
  140. void (*synchronization_completed) (void))
  141. {
  142. unsigned int res;
  143. res = totempg_groups_initialize (
  144. &sync_group_handle,
  145. sync_deliver_fn,
  146. NULL);
  147. if (res == -1) {
  148. log_printf (LOGSYS_LEVEL_ERROR,
  149. "Couldn't initialize groups interface.");
  150. return (-1);
  151. }
  152. res = totempg_groups_join (
  153. sync_group_handle,
  154. &sync_group,
  155. 1);
  156. if (res == -1) {
  157. log_printf (LOGSYS_LEVEL_ERROR, "Couldn't join group.");
  158. return (-1);
  159. }
  160. sync_synchronization_completed = synchronization_completed;
  161. my_sync_callbacks_retrieve = sync_callbacks_retrieve;
  162. return (0);
  163. }
  164. static void sync_barrier_handler (unsigned int nodeid, const void *msg)
  165. {
  166. const struct req_exec_barrier_message *req_exec_barrier_message = msg;
  167. int i;
  168. int barrier_reached = 1;
  169. if (memcmp (&my_ring_id, &req_exec_barrier_message->ring_id,
  170. sizeof (struct memb_ring_id)) != 0) {
  171. log_printf (LOGSYS_LEVEL_DEBUG, "barrier for old ring - discarding");
  172. return;
  173. }
  174. for (i = 0; i < my_processor_list_entries; i++) {
  175. if (my_processor_list[i].nodeid == nodeid) {
  176. my_processor_list[i].received = 1;
  177. }
  178. }
  179. for (i = 0; i < my_processor_list_entries; i++) {
  180. if (my_processor_list[i].received == 0) {
  181. barrier_reached = 0;
  182. }
  183. }
  184. if (barrier_reached) {
  185. log_printf (LOGSYS_LEVEL_DEBUG, "Committing synchronization for %s",
  186. my_service_list[my_processing_idx].name);
  187. my_service_list[my_processing_idx].state = ACTIVATE;
  188. if (my_sync_callbacks_retrieve(my_service_list[my_processing_idx].service_id, NULL) != -1) {
  189. my_service_list[my_processing_idx].sync_activate ();
  190. }
  191. my_processing_idx += 1;
  192. if (my_service_list_entries == my_processing_idx) {
  193. my_memb_determine_list_entries = 0;
  194. sync_synchronization_completed ();
  195. } else {
  196. sync_process_enter ();
  197. }
  198. }
  199. }
  200. static void dummy_sync_init (
  201. const unsigned int *trans_list,
  202. size_t trans_list_entries,
  203. const unsigned int *member_list,
  204. size_t member_list_entries,
  205. const struct memb_ring_id *ring_id)
  206. {
  207. }
  208. static void dummy_sync_abort (void)
  209. {
  210. }
  211. static int dummy_sync_process (void)
  212. {
  213. return (0);
  214. }
  215. static void dummy_sync_activate (void)
  216. {
  217. }
  218. static int service_entry_compare (const void *a, const void *b)
  219. {
  220. const struct service_entry *service_entry_a = a;
  221. const struct service_entry *service_entry_b = b;
  222. return (service_entry_a->service_id > service_entry_b->service_id);
  223. }
  224. static void sync_memb_determine (unsigned int nodeid, const void *msg)
  225. {
  226. const struct req_exec_memb_determine_message *req_exec_memb_determine_message = msg;
  227. int found = 0;
  228. int i;
  229. if (memcmp (&req_exec_memb_determine_message->ring_id,
  230. &my_memb_determine_ring_id, sizeof (struct memb_ring_id)) != 0) {
  231. log_printf (LOGSYS_LEVEL_DEBUG, "memb determine for old ring - discarding");
  232. return;
  233. }
  234. my_memb_determine = 1;
  235. for (i = 0; i < my_memb_determine_list_entries; i++) {
  236. if (my_memb_determine_list[i] == nodeid) {
  237. found = 1;
  238. }
  239. }
  240. if (found == 0) {
  241. my_memb_determine_list[my_memb_determine_list_entries] = nodeid;
  242. my_memb_determine_list_entries += 1;
  243. }
  244. }
  245. static void sync_service_build_handler (unsigned int nodeid, const void *msg)
  246. {
  247. const struct req_exec_service_build_message *req_exec_service_build_message = msg;
  248. int i, j;
  249. int barrier_reached = 1;
  250. int found;
  251. int qsort_trigger = 0;
  252. if (memcmp (&my_ring_id, &req_exec_service_build_message->ring_id,
  253. sizeof (struct memb_ring_id)) != 0) {
  254. log_printf (LOGSYS_LEVEL_DEBUG, "service build for old ring - discarding");
  255. return;
  256. }
  257. for (i = 0; i < req_exec_service_build_message->service_list_entries; i++) {
  258. found = 0;
  259. for (j = 0; j < my_service_list_entries; j++) {
  260. if (req_exec_service_build_message->service_list[i] ==
  261. my_service_list[j].service_id) {
  262. found = 1;
  263. break;
  264. }
  265. }
  266. if (found == 0) {
  267. my_service_list[my_service_list_entries].state =
  268. INIT;
  269. my_service_list[my_service_list_entries].service_id =
  270. req_exec_service_build_message->service_list[i];
  271. sprintf (my_service_list[my_service_list_entries].name,
  272. "Unknown External Service (id = %d)\n",
  273. req_exec_service_build_message->service_list[i]);
  274. my_service_list[my_service_list_entries].sync_init =
  275. dummy_sync_init;
  276. my_service_list[my_service_list_entries].sync_abort =
  277. dummy_sync_abort;
  278. my_service_list[my_service_list_entries].sync_process =
  279. dummy_sync_process;
  280. my_service_list[my_service_list_entries].sync_activate =
  281. dummy_sync_activate;
  282. my_service_list_entries += 1;
  283. qsort_trigger = 1;
  284. }
  285. }
  286. if (qsort_trigger) {
  287. qsort (my_service_list, my_service_list_entries,
  288. sizeof (struct service_entry), service_entry_compare);
  289. }
  290. for (i = 0; i < my_processor_list_entries; i++) {
  291. if (my_processor_list[i].nodeid == nodeid) {
  292. my_processor_list[i].received = 1;
  293. }
  294. }
  295. for (i = 0; i < my_processor_list_entries; i++) {
  296. if (my_processor_list[i].received == 0) {
  297. barrier_reached = 0;
  298. }
  299. }
  300. if (barrier_reached) {
  301. sync_process_enter ();
  302. }
  303. }
  304. static void sync_deliver_fn (
  305. unsigned int nodeid,
  306. const void *msg,
  307. unsigned int msg_len,
  308. int endian_conversion_required)
  309. {
  310. struct qb_ipc_request_header *header = (struct qb_ipc_request_header *)msg;
  311. switch (header->id) {
  312. case MESSAGE_REQ_SYNC_BARRIER:
  313. sync_barrier_handler (nodeid, msg);
  314. break;
  315. case MESSAGE_REQ_SYNC_SERVICE_BUILD:
  316. sync_service_build_handler (nodeid, msg);
  317. break;
  318. case MESSAGE_REQ_SYNC_MEMB_DETERMINE:
  319. sync_memb_determine (nodeid, msg);
  320. break;
  321. }
  322. }
  323. static void memb_determine_message_transmit (void)
  324. {
  325. struct iovec iovec;
  326. struct req_exec_memb_determine_message req_exec_memb_determine_message;
  327. req_exec_memb_determine_message.header.size = sizeof (struct req_exec_memb_determine_message);
  328. req_exec_memb_determine_message.header.id = MESSAGE_REQ_SYNC_MEMB_DETERMINE;
  329. memcpy (&req_exec_memb_determine_message.ring_id,
  330. &my_memb_determine_ring_id,
  331. sizeof (struct memb_ring_id));
  332. iovec.iov_base = (char *)&req_exec_memb_determine_message;
  333. iovec.iov_len = sizeof (req_exec_memb_determine_message);
  334. (void)totempg_groups_mcast_joined (sync_group_handle,
  335. &iovec, 1, TOTEMPG_AGREED);
  336. }
  337. static void barrier_message_transmit (void)
  338. {
  339. struct iovec iovec;
  340. struct req_exec_barrier_message req_exec_barrier_message;
  341. req_exec_barrier_message.header.size = sizeof (struct req_exec_barrier_message);
  342. req_exec_barrier_message.header.id = MESSAGE_REQ_SYNC_BARRIER;
  343. memcpy (&req_exec_barrier_message.ring_id, &my_ring_id,
  344. sizeof (struct memb_ring_id));
  345. iovec.iov_base = (char *)&req_exec_barrier_message;
  346. iovec.iov_len = sizeof (req_exec_barrier_message);
  347. (void)totempg_groups_mcast_joined (sync_group_handle,
  348. &iovec, 1, TOTEMPG_AGREED);
  349. }
  350. static void service_build_message_transmit (struct req_exec_service_build_message *service_build_message)
  351. {
  352. struct iovec iovec;
  353. service_build_message->header.size = sizeof (struct req_exec_service_build_message);
  354. service_build_message->header.id = MESSAGE_REQ_SYNC_SERVICE_BUILD;
  355. memcpy (&service_build_message->ring_id, &my_ring_id,
  356. sizeof (struct memb_ring_id));
  357. iovec.iov_base = (void *)service_build_message;
  358. iovec.iov_len = sizeof (struct req_exec_service_build_message);
  359. (void)totempg_groups_mcast_joined (sync_group_handle,
  360. &iovec, 1, TOTEMPG_AGREED);
  361. }
  362. static void sync_barrier_enter (void)
  363. {
  364. my_state = SYNC_BARRIER;
  365. barrier_message_transmit ();
  366. }
  367. static void sync_process_enter (void)
  368. {
  369. int i;
  370. my_state = SYNC_PROCESS;
  371. /*
  372. * No sync services
  373. */
  374. if (my_service_list_entries == 0) {
  375. my_state = SYNC_SERVICELIST_BUILD;
  376. my_memb_determine_list_entries = 0;
  377. sync_synchronization_completed ();
  378. return;
  379. }
  380. for (i = 0; i < my_processor_list_entries; i++) {
  381. my_processor_list[i].received = 0;
  382. }
  383. schedwrk_create (&my_schedwrk_handle,
  384. schedwrk_processor,
  385. NULL);
  386. }
  387. static void sync_servicelist_build_enter (
  388. const unsigned int *member_list,
  389. size_t member_list_entries,
  390. const struct memb_ring_id *ring_id)
  391. {
  392. struct req_exec_service_build_message service_build;
  393. int i;
  394. int res;
  395. struct sync_callbacks sync_callbacks;
  396. my_state = SYNC_SERVICELIST_BUILD;
  397. for (i = 0; i < member_list_entries; i++) {
  398. my_processor_list[i].nodeid = member_list[i];
  399. my_processor_list[i].received = 0;
  400. }
  401. my_processor_list_entries = member_list_entries;
  402. memcpy (my_member_list, member_list,
  403. member_list_entries * sizeof (unsigned int));
  404. my_member_list_entries = member_list_entries;
  405. my_processing_idx = 0;
  406. memset(my_service_list, 0, sizeof (struct service_entry) * SERVICES_COUNT_MAX);
  407. my_service_list_entries = 0;
  408. for (i = 0; i < SERVICES_COUNT_MAX; i++) {
  409. res = my_sync_callbacks_retrieve (i, &sync_callbacks);
  410. if (res == -1) {
  411. continue;
  412. }
  413. if (sync_callbacks.sync_init == NULL) {
  414. continue;
  415. }
  416. my_service_list[my_service_list_entries].state = INIT;
  417. my_service_list[my_service_list_entries].service_id = i;
  418. strcpy (my_service_list[my_service_list_entries].name,
  419. sync_callbacks.name);
  420. my_service_list[my_service_list_entries].sync_init = sync_callbacks.sync_init;
  421. my_service_list[my_service_list_entries].sync_process = sync_callbacks.sync_process;
  422. my_service_list[my_service_list_entries].sync_abort = sync_callbacks.sync_abort;
  423. my_service_list[my_service_list_entries].sync_activate = sync_callbacks.sync_activate;
  424. my_service_list_entries += 1;
  425. }
  426. for (i = 0; i < my_service_list_entries; i++) {
  427. service_build.service_list[i] =
  428. my_service_list[i].service_id;
  429. }
  430. service_build.service_list_entries = my_service_list_entries;
  431. service_build_message_transmit (&service_build);
  432. }
  433. static int schedwrk_processor (const void *context)
  434. {
  435. int res = 0;
  436. if (my_service_list[my_processing_idx].state == INIT) {
  437. unsigned int old_trans_list[PROCESSOR_COUNT_MAX];
  438. size_t old_trans_list_entries = 0;
  439. int o, m;
  440. my_service_list[my_processing_idx].state = PROCESS;
  441. memcpy (old_trans_list, my_trans_list, my_trans_list_entries *
  442. sizeof (unsigned int));
  443. old_trans_list_entries = my_trans_list_entries;
  444. my_trans_list_entries = 0;
  445. for (o = 0; o < old_trans_list_entries; o++) {
  446. for (m = 0; m < my_member_list_entries; m++) {
  447. if (old_trans_list[o] == my_member_list[m]) {
  448. my_trans_list[my_trans_list_entries] = my_member_list[m];
  449. my_trans_list_entries++;
  450. break;
  451. }
  452. }
  453. }
  454. if (my_sync_callbacks_retrieve(my_service_list[my_processing_idx].service_id, NULL) != -1) {
  455. my_service_list[my_processing_idx].sync_init (my_trans_list,
  456. my_trans_list_entries, my_member_list,
  457. my_member_list_entries,
  458. &my_ring_id);
  459. }
  460. }
  461. if (my_service_list[my_processing_idx].state == PROCESS) {
  462. my_service_list[my_processing_idx].state = PROCESS;
  463. if (my_sync_callbacks_retrieve(my_service_list[my_processing_idx].service_id, NULL) != -1) {
  464. res = my_service_list[my_processing_idx].sync_process ();
  465. } else {
  466. res = 0;
  467. }
  468. if (res == 0) {
  469. sync_barrier_enter();
  470. } else {
  471. return (-1);
  472. }
  473. }
  474. return (0);
  475. }
  476. void sync_start (
  477. const unsigned int *member_list,
  478. size_t member_list_entries,
  479. const struct memb_ring_id *ring_id)
  480. {
  481. ENTER();
  482. memcpy (&my_ring_id, ring_id, sizeof (struct memb_ring_id));
  483. if (my_memb_determine) {
  484. my_memb_determine = 0;
  485. sync_servicelist_build_enter (my_memb_determine_list,
  486. my_memb_determine_list_entries, ring_id);
  487. } else {
  488. sync_servicelist_build_enter (member_list, member_list_entries,
  489. ring_id);
  490. }
  491. }
  492. void sync_save_transitional (
  493. const unsigned int *member_list,
  494. size_t member_list_entries,
  495. const struct memb_ring_id *ring_id)
  496. {
  497. ENTER();
  498. memcpy (my_trans_list, member_list, member_list_entries *
  499. sizeof (unsigned int));
  500. my_trans_list_entries = member_list_entries;
  501. }
  502. void sync_abort (void)
  503. {
  504. ENTER();
  505. if (my_state == SYNC_PROCESS) {
  506. schedwrk_destroy (my_schedwrk_handle);
  507. if (my_sync_callbacks_retrieve(my_service_list[my_processing_idx].service_id, NULL) != -1) {
  508. my_service_list[my_processing_idx].sync_abort ();
  509. }
  510. }
  511. /* this will cause any "old" barrier messages from causing
  512. * problems.
  513. */
  514. memset (&my_ring_id, 0, sizeof (struct memb_ring_id));
  515. }
  516. void sync_memb_list_determine (const struct memb_ring_id *ring_id)
  517. {
  518. ENTER();
  519. memcpy (&my_memb_determine_ring_id, ring_id,
  520. sizeof (struct memb_ring_id));
  521. memb_determine_message_transmit ();
  522. }
  523. void sync_memb_list_abort (void)
  524. {
  525. ENTER();
  526. my_memb_determine_list_entries = 0;
  527. memset (&my_memb_determine_ring_id, 0, sizeof (struct memb_ring_id));
  528. }