sync.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630
  1. /*
  2. * Copyright (c) 2009-2012 Red Hat, Inc.
  3. *
  4. * All rights reserved.
  5. *
  6. * Author: Steven Dake (sdake@redhat.com)
  7. *
  8. * This software licensed under BSD license, the text of which follows:
  9. *
  10. * Redistribution and use in source and binary forms, with or without
  11. * modification, are permitted provided that the following conditions are met:
  12. *
  13. * - Redistributions of source code must retain the above copyright notice,
  14. * this list of conditions and the following disclaimer.
  15. * - Redistributions in binary form must reproduce the above copyright notice,
  16. * this list of conditions and the following disclaimer in the documentation
  17. * and/or other materials provided with the distribution.
  18. * - Neither the name of the MontaVista Software, Inc. nor the names of its
  19. * contributors may be used to endorse or promote products derived from this
  20. * software without specific prior written permission.
  21. *
  22. * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
  23. * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
  24. * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
  25. * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
  26. * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
  27. * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
  28. * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
  29. * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
  30. * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
  31. * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
  32. * THE POSSIBILITY OF SUCH DAMAGE.
  33. */
  34. #include <config.h>
  35. #include <sys/types.h>
  36. #include <sys/socket.h>
  37. #include <sys/un.h>
  38. #include <sys/ioctl.h>
  39. #include <netinet/in.h>
  40. #include <sys/uio.h>
  41. #include <unistd.h>
  42. #include <fcntl.h>
  43. #include <stdlib.h>
  44. #include <stdio.h>
  45. #include <errno.h>
  46. #include <time.h>
  47. #include <unistd.h>
  48. #include <netinet/in.h>
  49. #include <arpa/inet.h>
  50. #include <corosync/corotypes.h>
  51. #include <corosync/swab.h>
  52. #include <corosync/totem/totempg.h>
  53. #include <corosync/totem/totem.h>
  54. #include <corosync/logsys.h>
  55. #include <qb/qbipc_common.h>
  56. #include "schedwrk.h"
  57. #include "quorum.h"
  58. #include "sync.h"
  59. #include "main.h"
  60. LOGSYS_DECLARE_SUBSYS ("SYNC");
  61. #define MESSAGE_REQ_SYNC_BARRIER 0
  62. #define MESSAGE_REQ_SYNC_SERVICE_BUILD 1
  63. #define MESSAGE_REQ_SYNC_MEMB_DETERMINE 2
  64. enum sync_process_state {
  65. INIT,
  66. PROCESS,
  67. ACTIVATE
  68. };
  69. enum sync_state {
  70. SYNC_SERVICELIST_BUILD,
  71. SYNC_PROCESS,
  72. SYNC_BARRIER
  73. };
  74. struct service_entry {
  75. int service_id;
  76. void (*sync_init) (
  77. const unsigned int *trans_list,
  78. size_t trans_list_entries,
  79. const unsigned int *member_list,
  80. size_t member_list_entries,
  81. const struct memb_ring_id *ring_id);
  82. void (*sync_abort) (void);
  83. int (*sync_process) (void);
  84. void (*sync_activate) (void);
  85. enum sync_process_state state;
  86. char name[128];
  87. };
  88. struct processor_entry {
  89. int nodeid;
  90. int received;
  91. };
  92. struct req_exec_memb_determine_message {
  93. struct qb_ipc_request_header header __attribute__((aligned(8)));
  94. struct memb_ring_id ring_id __attribute__((aligned(8)));
  95. };
  96. struct req_exec_service_build_message {
  97. struct qb_ipc_request_header header __attribute__((aligned(8)));
  98. struct memb_ring_id ring_id __attribute__((aligned(8)));
  99. int service_list_entries __attribute__((aligned(8)));
  100. int service_list[128] __attribute__((aligned(8)));
  101. };
  102. struct req_exec_barrier_message {
  103. struct qb_ipc_request_header header __attribute__((aligned(8)));
  104. struct memb_ring_id ring_id __attribute__((aligned(8)));
  105. };
  106. static enum sync_state my_state = SYNC_BARRIER;
  107. static struct memb_ring_id my_ring_id;
  108. static struct memb_ring_id my_memb_determine_ring_id;
  109. static int my_memb_determine = 0;
  110. static unsigned int my_memb_determine_list[PROCESSOR_COUNT_MAX];
  111. static unsigned int my_memb_determine_list_entries = 0;
  112. static int my_processing_idx = 0;
  113. static hdb_handle_t my_schedwrk_handle;
  114. static struct processor_entry my_processor_list[PROCESSOR_COUNT_MAX];
  115. static unsigned int my_member_list[PROCESSOR_COUNT_MAX];
  116. static unsigned int my_trans_list[PROCESSOR_COUNT_MAX];
  117. static size_t my_member_list_entries = 0;
  118. static size_t my_trans_list_entries = 0;
  119. static int my_processor_list_entries = 0;
  120. static struct service_entry my_service_list[SERVICES_COUNT_MAX];
  121. static int my_service_list_entries = 0;
  122. static void (*sync_synchronization_completed) (void);
  123. static void sync_deliver_fn (
  124. unsigned int nodeid,
  125. const void *msg,
  126. unsigned int msg_len,
  127. int endian_conversion_required);
  128. static int schedwrk_processor (const void *context);
  129. static void sync_process_enter (void);
  130. static struct totempg_group sync_group = {
  131. .group = "sync",
  132. .group_len = 4
  133. };
  134. static void *sync_group_handle;
  135. int (*my_sync_callbacks_retrieve) (
  136. int service_id,
  137. struct sync_callbacks *callbacks);
  138. int sync_init (
  139. int (*sync_callbacks_retrieve) (
  140. int service_id,
  141. struct sync_callbacks *callbacks),
  142. void (*synchronization_completed) (void))
  143. {
  144. unsigned int res;
  145. res = totempg_groups_initialize (
  146. &sync_group_handle,
  147. sync_deliver_fn,
  148. NULL);
  149. if (res == -1) {
  150. log_printf (LOGSYS_LEVEL_ERROR,
  151. "Couldn't initialize groups interface.");
  152. return (-1);
  153. }
  154. res = totempg_groups_join (
  155. sync_group_handle,
  156. &sync_group,
  157. 1);
  158. if (res == -1) {
  159. log_printf (LOGSYS_LEVEL_ERROR, "Couldn't join group.");
  160. return (-1);
  161. }
  162. sync_synchronization_completed = synchronization_completed;
  163. my_sync_callbacks_retrieve = sync_callbacks_retrieve;
  164. return (0);
  165. }
  166. static void sync_barrier_handler (unsigned int nodeid, const void *msg)
  167. {
  168. const struct req_exec_barrier_message *req_exec_barrier_message = msg;
  169. int i;
  170. int barrier_reached = 1;
  171. if (memcmp (&my_ring_id, &req_exec_barrier_message->ring_id,
  172. sizeof (struct memb_ring_id)) != 0) {
  173. log_printf (LOGSYS_LEVEL_DEBUG, "barrier for old ring - discarding");
  174. return;
  175. }
  176. for (i = 0; i < my_processor_list_entries; i++) {
  177. if (my_processor_list[i].nodeid == nodeid) {
  178. my_processor_list[i].received = 1;
  179. }
  180. }
  181. for (i = 0; i < my_processor_list_entries; i++) {
  182. if (my_processor_list[i].received == 0) {
  183. barrier_reached = 0;
  184. }
  185. }
  186. if (barrier_reached) {
  187. log_printf (LOGSYS_LEVEL_DEBUG, "Committing synchronization for %s",
  188. my_service_list[my_processing_idx].name);
  189. my_service_list[my_processing_idx].state = ACTIVATE;
  190. if (my_sync_callbacks_retrieve(my_service_list[my_processing_idx].service_id, NULL) != -1) {
  191. my_service_list[my_processing_idx].sync_activate ();
  192. }
  193. my_processing_idx += 1;
  194. if (my_service_list_entries == my_processing_idx) {
  195. my_memb_determine_list_entries = 0;
  196. sync_synchronization_completed ();
  197. } else {
  198. sync_process_enter ();
  199. }
  200. }
  201. }
  202. static void dummy_sync_init (
  203. const unsigned int *trans_list,
  204. size_t trans_list_entries,
  205. const unsigned int *member_list,
  206. size_t member_list_entries,
  207. const struct memb_ring_id *ring_id)
  208. {
  209. }
  210. static void dummy_sync_abort (void)
  211. {
  212. }
  213. static int dummy_sync_process (void)
  214. {
  215. return (0);
  216. }
  217. static void dummy_sync_activate (void)
  218. {
  219. }
  220. static int service_entry_compare (const void *a, const void *b)
  221. {
  222. const struct service_entry *service_entry_a = a;
  223. const struct service_entry *service_entry_b = b;
  224. return (service_entry_a->service_id > service_entry_b->service_id);
  225. }
  226. static void sync_memb_determine (unsigned int nodeid, const void *msg)
  227. {
  228. const struct req_exec_memb_determine_message *req_exec_memb_determine_message = msg;
  229. int found = 0;
  230. int i;
  231. if (memcmp (&req_exec_memb_determine_message->ring_id,
  232. &my_memb_determine_ring_id, sizeof (struct memb_ring_id)) != 0) {
  233. log_printf (LOGSYS_LEVEL_DEBUG, "memb determine for old ring - discarding");
  234. return;
  235. }
  236. my_memb_determine = 1;
  237. for (i = 0; i < my_memb_determine_list_entries; i++) {
  238. if (my_memb_determine_list[i] == nodeid) {
  239. found = 1;
  240. }
  241. }
  242. if (found == 0) {
  243. my_memb_determine_list[my_memb_determine_list_entries] = nodeid;
  244. my_memb_determine_list_entries += 1;
  245. }
  246. }
  247. static void sync_service_build_handler (unsigned int nodeid, const void *msg)
  248. {
  249. const struct req_exec_service_build_message *req_exec_service_build_message = msg;
  250. int i, j;
  251. int barrier_reached = 1;
  252. int found;
  253. int qsort_trigger = 0;
  254. if (memcmp (&my_ring_id, &req_exec_service_build_message->ring_id,
  255. sizeof (struct memb_ring_id)) != 0) {
  256. log_printf (LOGSYS_LEVEL_DEBUG, "service build for old ring - discarding");
  257. return;
  258. }
  259. for (i = 0; i < req_exec_service_build_message->service_list_entries; i++) {
  260. found = 0;
  261. for (j = 0; j < my_service_list_entries; j++) {
  262. if (req_exec_service_build_message->service_list[i] ==
  263. my_service_list[j].service_id) {
  264. found = 1;
  265. break;
  266. }
  267. }
  268. if (found == 0) {
  269. my_service_list[my_service_list_entries].state =
  270. INIT;
  271. my_service_list[my_service_list_entries].service_id =
  272. req_exec_service_build_message->service_list[i];
  273. sprintf (my_service_list[my_service_list_entries].name,
  274. "Unknown External Service (id = %d)\n",
  275. req_exec_service_build_message->service_list[i]);
  276. my_service_list[my_service_list_entries].sync_init =
  277. dummy_sync_init;
  278. my_service_list[my_service_list_entries].sync_abort =
  279. dummy_sync_abort;
  280. my_service_list[my_service_list_entries].sync_process =
  281. dummy_sync_process;
  282. my_service_list[my_service_list_entries].sync_activate =
  283. dummy_sync_activate;
  284. my_service_list_entries += 1;
  285. qsort_trigger = 1;
  286. }
  287. }
  288. if (qsort_trigger) {
  289. qsort (my_service_list, my_service_list_entries,
  290. sizeof (struct service_entry), service_entry_compare);
  291. }
  292. for (i = 0; i < my_processor_list_entries; i++) {
  293. if (my_processor_list[i].nodeid == nodeid) {
  294. my_processor_list[i].received = 1;
  295. }
  296. }
  297. for (i = 0; i < my_processor_list_entries; i++) {
  298. if (my_processor_list[i].received == 0) {
  299. barrier_reached = 0;
  300. }
  301. }
  302. if (barrier_reached) {
  303. sync_process_enter ();
  304. }
  305. }
  306. static void sync_deliver_fn (
  307. unsigned int nodeid,
  308. const void *msg,
  309. unsigned int msg_len,
  310. int endian_conversion_required)
  311. {
  312. struct qb_ipc_request_header *header = (struct qb_ipc_request_header *)msg;
  313. switch (header->id) {
  314. case MESSAGE_REQ_SYNC_BARRIER:
  315. sync_barrier_handler (nodeid, msg);
  316. break;
  317. case MESSAGE_REQ_SYNC_SERVICE_BUILD:
  318. sync_service_build_handler (nodeid, msg);
  319. break;
  320. case MESSAGE_REQ_SYNC_MEMB_DETERMINE:
  321. sync_memb_determine (nodeid, msg);
  322. break;
  323. }
  324. }
  325. static void memb_determine_message_transmit (void)
  326. {
  327. struct iovec iovec;
  328. struct req_exec_memb_determine_message req_exec_memb_determine_message;
  329. req_exec_memb_determine_message.header.size = sizeof (struct req_exec_memb_determine_message);
  330. req_exec_memb_determine_message.header.id = MESSAGE_REQ_SYNC_MEMB_DETERMINE;
  331. memcpy (&req_exec_memb_determine_message.ring_id,
  332. &my_memb_determine_ring_id,
  333. sizeof (struct memb_ring_id));
  334. iovec.iov_base = (char *)&req_exec_memb_determine_message;
  335. iovec.iov_len = sizeof (req_exec_memb_determine_message);
  336. (void)totempg_groups_mcast_joined (sync_group_handle,
  337. &iovec, 1, TOTEMPG_AGREED);
  338. }
  339. static void barrier_message_transmit (void)
  340. {
  341. struct iovec iovec;
  342. struct req_exec_barrier_message req_exec_barrier_message;
  343. req_exec_barrier_message.header.size = sizeof (struct req_exec_barrier_message);
  344. req_exec_barrier_message.header.id = MESSAGE_REQ_SYNC_BARRIER;
  345. memcpy (&req_exec_barrier_message.ring_id, &my_ring_id,
  346. sizeof (struct memb_ring_id));
  347. iovec.iov_base = (char *)&req_exec_barrier_message;
  348. iovec.iov_len = sizeof (req_exec_barrier_message);
  349. (void)totempg_groups_mcast_joined (sync_group_handle,
  350. &iovec, 1, TOTEMPG_AGREED);
  351. }
  352. static void service_build_message_transmit (struct req_exec_service_build_message *service_build_message)
  353. {
  354. struct iovec iovec;
  355. service_build_message->header.size = sizeof (struct req_exec_service_build_message);
  356. service_build_message->header.id = MESSAGE_REQ_SYNC_SERVICE_BUILD;
  357. memcpy (&service_build_message->ring_id, &my_ring_id,
  358. sizeof (struct memb_ring_id));
  359. iovec.iov_base = (void *)service_build_message;
  360. iovec.iov_len = sizeof (struct req_exec_service_build_message);
  361. (void)totempg_groups_mcast_joined (sync_group_handle,
  362. &iovec, 1, TOTEMPG_AGREED);
  363. }
  364. static void sync_barrier_enter (void)
  365. {
  366. my_state = SYNC_BARRIER;
  367. barrier_message_transmit ();
  368. }
  369. static void sync_process_enter (void)
  370. {
  371. int i;
  372. my_state = SYNC_PROCESS;
  373. /*
  374. * No sync services
  375. */
  376. if (my_service_list_entries == 0) {
  377. my_state = SYNC_SERVICELIST_BUILD;
  378. my_memb_determine_list_entries = 0;
  379. sync_synchronization_completed ();
  380. return;
  381. }
  382. for (i = 0; i < my_processor_list_entries; i++) {
  383. my_processor_list[i].received = 0;
  384. }
  385. schedwrk_create (&my_schedwrk_handle,
  386. schedwrk_processor,
  387. NULL);
  388. }
  389. static void sync_servicelist_build_enter (
  390. const unsigned int *member_list,
  391. size_t member_list_entries,
  392. const struct memb_ring_id *ring_id)
  393. {
  394. struct req_exec_service_build_message service_build;
  395. int i;
  396. int res;
  397. struct sync_callbacks sync_callbacks;
  398. my_state = SYNC_SERVICELIST_BUILD;
  399. for (i = 0; i < member_list_entries; i++) {
  400. my_processor_list[i].nodeid = member_list[i];
  401. my_processor_list[i].received = 0;
  402. }
  403. my_processor_list_entries = member_list_entries;
  404. memcpy (my_member_list, member_list,
  405. member_list_entries * sizeof (unsigned int));
  406. my_member_list_entries = member_list_entries;
  407. my_processing_idx = 0;
  408. memset(my_service_list, 0, sizeof (struct service_entry) * SERVICES_COUNT_MAX);
  409. my_service_list_entries = 0;
  410. for (i = 0; i < SERVICES_COUNT_MAX; i++) {
  411. res = my_sync_callbacks_retrieve (i, &sync_callbacks);
  412. if (res == -1) {
  413. continue;
  414. }
  415. if (sync_callbacks.sync_init == NULL) {
  416. continue;
  417. }
  418. my_service_list[my_service_list_entries].state = INIT;
  419. my_service_list[my_service_list_entries].service_id = i;
  420. strcpy (my_service_list[my_service_list_entries].name,
  421. sync_callbacks.name);
  422. my_service_list[my_service_list_entries].sync_init = sync_callbacks.sync_init;
  423. my_service_list[my_service_list_entries].sync_process = sync_callbacks.sync_process;
  424. my_service_list[my_service_list_entries].sync_abort = sync_callbacks.sync_abort;
  425. my_service_list[my_service_list_entries].sync_activate = sync_callbacks.sync_activate;
  426. my_service_list_entries += 1;
  427. }
  428. for (i = 0; i < my_service_list_entries; i++) {
  429. service_build.service_list[i] =
  430. my_service_list[i].service_id;
  431. }
  432. service_build.service_list_entries = my_service_list_entries;
  433. service_build_message_transmit (&service_build);
  434. }
  435. static int schedwrk_processor (const void *context)
  436. {
  437. int res = 0;
  438. if (my_service_list[my_processing_idx].state == INIT) {
  439. unsigned int old_trans_list[PROCESSOR_COUNT_MAX];
  440. size_t old_trans_list_entries = 0;
  441. int o, m;
  442. my_service_list[my_processing_idx].state = PROCESS;
  443. memcpy (old_trans_list, my_trans_list, my_trans_list_entries *
  444. sizeof (unsigned int));
  445. old_trans_list_entries = my_trans_list_entries;
  446. my_trans_list_entries = 0;
  447. for (o = 0; o < old_trans_list_entries; o++) {
  448. for (m = 0; m < my_member_list_entries; m++) {
  449. if (old_trans_list[o] == my_member_list[m]) {
  450. my_trans_list[my_trans_list_entries] = my_member_list[m];
  451. my_trans_list_entries++;
  452. break;
  453. }
  454. }
  455. }
  456. if (my_sync_callbacks_retrieve(my_service_list[my_processing_idx].service_id, NULL) != -1) {
  457. my_service_list[my_processing_idx].sync_init (my_trans_list,
  458. my_trans_list_entries, my_member_list,
  459. my_member_list_entries,
  460. &my_ring_id);
  461. }
  462. }
  463. if (my_service_list[my_processing_idx].state == PROCESS) {
  464. my_service_list[my_processing_idx].state = PROCESS;
  465. if (my_sync_callbacks_retrieve(my_service_list[my_processing_idx].service_id, NULL) != -1) {
  466. res = my_service_list[my_processing_idx].sync_process ();
  467. } else {
  468. res = 0;
  469. }
  470. if (res == 0) {
  471. sync_barrier_enter();
  472. } else {
  473. return (-1);
  474. }
  475. }
  476. return (0);
  477. }
  478. void sync_start (
  479. const unsigned int *member_list,
  480. size_t member_list_entries,
  481. const struct memb_ring_id *ring_id)
  482. {
  483. ENTER();
  484. memcpy (&my_ring_id, ring_id, sizeof (struct memb_ring_id));
  485. if (my_memb_determine) {
  486. my_memb_determine = 0;
  487. sync_servicelist_build_enter (my_memb_determine_list,
  488. my_memb_determine_list_entries, ring_id);
  489. } else {
  490. sync_servicelist_build_enter (member_list, member_list_entries,
  491. ring_id);
  492. }
  493. }
  494. void sync_save_transitional (
  495. const unsigned int *member_list,
  496. size_t member_list_entries,
  497. const struct memb_ring_id *ring_id)
  498. {
  499. ENTER();
  500. memcpy (my_trans_list, member_list, member_list_entries *
  501. sizeof (unsigned int));
  502. my_trans_list_entries = member_list_entries;
  503. }
  504. void sync_abort (void)
  505. {
  506. ENTER();
  507. if (my_state == SYNC_PROCESS) {
  508. schedwrk_destroy (my_schedwrk_handle);
  509. if (my_sync_callbacks_retrieve(my_service_list[my_processing_idx].service_id, NULL) != -1) {
  510. my_service_list[my_processing_idx].sync_abort ();
  511. }
  512. }
  513. /* this will cause any "old" barrier messages from causing
  514. * problems.
  515. */
  516. memset (&my_ring_id, 0, sizeof (struct memb_ring_id));
  517. }
  518. void sync_memb_list_determine (const struct memb_ring_id *ring_id)
  519. {
  520. ENTER();
  521. memcpy (&my_memb_determine_ring_id, ring_id,
  522. sizeof (struct memb_ring_id));
  523. memb_determine_message_transmit ();
  524. }
  525. void sync_memb_list_abort (void)
  526. {
  527. ENTER();
  528. my_memb_determine_list_entries = 0;
  529. memset (&my_memb_determine_ring_id, 0, sizeof (struct memb_ring_id));
  530. }