sync.c 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545
  1. /*
  2. * Copyright (c) 2009-2012 Red Hat, Inc.
  3. *
  4. * All rights reserved.
  5. *
  6. * Author: Steven Dake (sdake@redhat.com)
  7. *
  8. * This software licensed under BSD license, the text of which follows:
  9. *
  10. * Redistribution and use in source and binary forms, with or without
  11. * modification, are permitted provided that the following conditions are met:
  12. *
  13. * - Redistributions of source code must retain the above copyright notice,
  14. * this list of conditions and the following disclaimer.
  15. * - Redistributions in binary form must reproduce the above copyright notice,
  16. * this list of conditions and the following disclaimer in the documentation
  17. * and/or other materials provided with the distribution.
  18. * - Neither the name of the MontaVista Software, Inc. nor the names of its
  19. * contributors may be used to endorse or promote products derived from this
  20. * software without specific prior written permission.
  21. *
  22. * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
  23. * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
  24. * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
  25. * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
  26. * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
  27. * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
  28. * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
  29. * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
  30. * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
  31. * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
  32. * THE POSSIBILITY OF SUCH DAMAGE.
  33. */
  34. #include <config.h>
  35. #include <sys/types.h>
  36. #include <sys/socket.h>
  37. #include <sys/un.h>
  38. #include <sys/ioctl.h>
  39. #include <netinet/in.h>
  40. #include <sys/uio.h>
  41. #include <unistd.h>
  42. #include <fcntl.h>
  43. #include <stdlib.h>
  44. #include <stdio.h>
  45. #include <errno.h>
  46. #include <time.h>
  47. #include <arpa/inet.h>
  48. #include <corosync/corotypes.h>
  49. #include <corosync/swab.h>
  50. #include <corosync/totem/totempg.h>
  51. #include <corosync/totem/totem.h>
  52. #include <corosync/logsys.h>
  53. #include <qb/qbipc_common.h>
  54. #include "schedwrk.h"
  55. #include "quorum.h"
  56. #include "sync.h"
  57. #include "main.h"
  58. LOGSYS_DECLARE_SUBSYS ("SYNC");
  59. #define MESSAGE_REQ_SYNC_BARRIER 0
  60. #define MESSAGE_REQ_SYNC_SERVICE_BUILD 1
  61. enum sync_process_state {
  62. PROCESS,
  63. ACTIVATE
  64. };
  65. enum sync_state {
  66. SYNC_SERVICELIST_BUILD,
  67. SYNC_PROCESS,
  68. SYNC_BARRIER
  69. };
  70. struct service_entry {
  71. int service_id;
  72. void (*sync_init) (
  73. const unsigned int *trans_list,
  74. size_t trans_list_entries,
  75. const unsigned int *member_list,
  76. size_t member_list_entries,
  77. const struct memb_ring_id *ring_id);
  78. void (*sync_abort) (void);
  79. int (*sync_process) (void);
  80. void (*sync_activate) (void);
  81. enum sync_process_state state;
  82. char name[128];
  83. };
  84. struct processor_entry {
  85. int nodeid;
  86. int received;
  87. };
  88. struct req_exec_service_build_message {
  89. struct qb_ipc_request_header header __attribute__((aligned(8)));
  90. struct memb_ring_id ring_id __attribute__((aligned(8)));
  91. int service_list_entries __attribute__((aligned(8)));
  92. int service_list[128] __attribute__((aligned(8)));
  93. };
  94. struct req_exec_barrier_message {
  95. struct qb_ipc_request_header header __attribute__((aligned(8)));
  96. struct memb_ring_id ring_id __attribute__((aligned(8)));
  97. };
  98. static enum sync_state my_state = SYNC_BARRIER;
  99. static struct memb_ring_id my_ring_id;
  100. static int my_processing_idx = 0;
  101. static hdb_handle_t my_schedwrk_handle;
  102. static struct processor_entry my_processor_list[PROCESSOR_COUNT_MAX];
  103. static unsigned int my_member_list[PROCESSOR_COUNT_MAX];
  104. static unsigned int my_trans_list[PROCESSOR_COUNT_MAX];
  105. static size_t my_member_list_entries = 0;
  106. static size_t my_trans_list_entries = 0;
  107. static int my_processor_list_entries = 0;
  108. static struct service_entry my_service_list[SERVICES_COUNT_MAX];
  109. static int my_service_list_entries = 0;
  110. static void (*sync_synchronization_completed) (void);
  111. static void sync_deliver_fn (
  112. unsigned int nodeid,
  113. const void *msg,
  114. unsigned int msg_len,
  115. int endian_conversion_required);
  116. static int schedwrk_processor (const void *context);
  117. static void sync_process_enter (void);
  118. static void sync_process_call_init (void);
  119. static struct totempg_group sync_group = {
  120. .group = "sync",
  121. .group_len = 4
  122. };
  123. static void *sync_group_handle;
  124. int (*my_sync_callbacks_retrieve) (
  125. int service_id,
  126. struct sync_callbacks *callbacks);
  127. int sync_init (
  128. int (*sync_callbacks_retrieve) (
  129. int service_id,
  130. struct sync_callbacks *callbacks),
  131. void (*synchronization_completed) (void))
  132. {
  133. unsigned int res;
  134. res = totempg_groups_initialize (
  135. &sync_group_handle,
  136. sync_deliver_fn,
  137. NULL);
  138. if (res == -1) {
  139. log_printf (LOGSYS_LEVEL_ERROR,
  140. "Couldn't initialize groups interface.");
  141. return (-1);
  142. }
  143. res = totempg_groups_join (
  144. sync_group_handle,
  145. &sync_group,
  146. 1);
  147. if (res == -1) {
  148. log_printf (LOGSYS_LEVEL_ERROR, "Couldn't join group.");
  149. return (-1);
  150. }
  151. sync_synchronization_completed = synchronization_completed;
  152. my_sync_callbacks_retrieve = sync_callbacks_retrieve;
  153. return (0);
  154. }
  155. static void sync_barrier_handler (unsigned int nodeid, const void *msg)
  156. {
  157. const struct req_exec_barrier_message *req_exec_barrier_message = msg;
  158. int i;
  159. int barrier_reached = 1;
  160. if (memcmp (&my_ring_id, &req_exec_barrier_message->ring_id,
  161. sizeof (struct memb_ring_id)) != 0) {
  162. log_printf (LOGSYS_LEVEL_DEBUG, "barrier for old ring - discarding");
  163. return;
  164. }
  165. for (i = 0; i < my_processor_list_entries; i++) {
  166. if (my_processor_list[i].nodeid == nodeid) {
  167. my_processor_list[i].received = 1;
  168. }
  169. }
  170. for (i = 0; i < my_processor_list_entries; i++) {
  171. if (my_processor_list[i].received == 0) {
  172. barrier_reached = 0;
  173. }
  174. }
  175. if (barrier_reached) {
  176. log_printf (LOGSYS_LEVEL_DEBUG, "Committing synchronization for %s",
  177. my_service_list[my_processing_idx].name);
  178. my_service_list[my_processing_idx].state = ACTIVATE;
  179. if (my_sync_callbacks_retrieve(my_service_list[my_processing_idx].service_id, NULL) != -1) {
  180. my_service_list[my_processing_idx].sync_activate ();
  181. }
  182. my_processing_idx += 1;
  183. if (my_service_list_entries == my_processing_idx) {
  184. sync_synchronization_completed ();
  185. } else {
  186. sync_process_enter ();
  187. }
  188. }
  189. }
  190. static void dummy_sync_abort (void)
  191. {
  192. }
  193. static int dummy_sync_process (void)
  194. {
  195. return (0);
  196. }
  197. static void dummy_sync_activate (void)
  198. {
  199. }
  200. static int service_entry_compare (const void *a, const void *b)
  201. {
  202. const struct service_entry *service_entry_a = a;
  203. const struct service_entry *service_entry_b = b;
  204. return (service_entry_a->service_id > service_entry_b->service_id);
  205. }
  206. static void sync_service_build_handler (unsigned int nodeid, const void *msg)
  207. {
  208. const struct req_exec_service_build_message *req_exec_service_build_message = msg;
  209. int i, j;
  210. int barrier_reached = 1;
  211. int found;
  212. int qsort_trigger = 0;
  213. if (memcmp (&my_ring_id, &req_exec_service_build_message->ring_id,
  214. sizeof (struct memb_ring_id)) != 0) {
  215. log_printf (LOGSYS_LEVEL_DEBUG, "service build for old ring - discarding");
  216. return;
  217. }
  218. for (i = 0; i < req_exec_service_build_message->service_list_entries; i++) {
  219. found = 0;
  220. for (j = 0; j < my_service_list_entries; j++) {
  221. if (req_exec_service_build_message->service_list[i] ==
  222. my_service_list[j].service_id) {
  223. found = 1;
  224. break;
  225. }
  226. }
  227. if (found == 0) {
  228. my_service_list[my_service_list_entries].state = PROCESS;
  229. my_service_list[my_service_list_entries].service_id =
  230. req_exec_service_build_message->service_list[i];
  231. sprintf (my_service_list[my_service_list_entries].name,
  232. "Unknown External Service (id = %d)\n",
  233. req_exec_service_build_message->service_list[i]);
  234. my_service_list[my_service_list_entries].sync_init =
  235. NULL;
  236. my_service_list[my_service_list_entries].sync_abort =
  237. dummy_sync_abort;
  238. my_service_list[my_service_list_entries].sync_process =
  239. dummy_sync_process;
  240. my_service_list[my_service_list_entries].sync_activate =
  241. dummy_sync_activate;
  242. my_service_list_entries += 1;
  243. qsort_trigger = 1;
  244. }
  245. }
  246. if (qsort_trigger) {
  247. qsort (my_service_list, my_service_list_entries,
  248. sizeof (struct service_entry), service_entry_compare);
  249. }
  250. for (i = 0; i < my_processor_list_entries; i++) {
  251. if (my_processor_list[i].nodeid == nodeid) {
  252. my_processor_list[i].received = 1;
  253. }
  254. }
  255. for (i = 0; i < my_processor_list_entries; i++) {
  256. if (my_processor_list[i].received == 0) {
  257. barrier_reached = 0;
  258. }
  259. }
  260. if (barrier_reached) {
  261. log_printf (LOGSYS_LEVEL_DEBUG, "enter sync process");
  262. sync_process_enter ();
  263. }
  264. }
  265. static void sync_deliver_fn (
  266. unsigned int nodeid,
  267. const void *msg,
  268. unsigned int msg_len,
  269. int endian_conversion_required)
  270. {
  271. struct qb_ipc_request_header *header = (struct qb_ipc_request_header *)msg;
  272. switch (header->id) {
  273. case MESSAGE_REQ_SYNC_BARRIER:
  274. sync_barrier_handler (nodeid, msg);
  275. break;
  276. case MESSAGE_REQ_SYNC_SERVICE_BUILD:
  277. sync_service_build_handler (nodeid, msg);
  278. break;
  279. }
  280. }
  281. static void barrier_message_transmit (void)
  282. {
  283. struct iovec iovec;
  284. struct req_exec_barrier_message req_exec_barrier_message;
  285. req_exec_barrier_message.header.size = sizeof (struct req_exec_barrier_message);
  286. req_exec_barrier_message.header.id = MESSAGE_REQ_SYNC_BARRIER;
  287. memcpy (&req_exec_barrier_message.ring_id, &my_ring_id,
  288. sizeof (struct memb_ring_id));
  289. iovec.iov_base = (char *)&req_exec_barrier_message;
  290. iovec.iov_len = sizeof (req_exec_barrier_message);
  291. (void)totempg_groups_mcast_joined (sync_group_handle,
  292. &iovec, 1, TOTEMPG_AGREED);
  293. }
  294. static void service_build_message_transmit (struct req_exec_service_build_message *service_build_message)
  295. {
  296. struct iovec iovec;
  297. service_build_message->header.size = sizeof (struct req_exec_service_build_message);
  298. service_build_message->header.id = MESSAGE_REQ_SYNC_SERVICE_BUILD;
  299. memcpy (&service_build_message->ring_id, &my_ring_id,
  300. sizeof (struct memb_ring_id));
  301. iovec.iov_base = (void *)service_build_message;
  302. iovec.iov_len = sizeof (struct req_exec_service_build_message);
  303. (void)totempg_groups_mcast_joined (sync_group_handle,
  304. &iovec, 1, TOTEMPG_AGREED);
  305. }
  306. static void sync_barrier_enter (void)
  307. {
  308. my_state = SYNC_BARRIER;
  309. barrier_message_transmit ();
  310. }
  311. static void sync_process_call_init (void)
  312. {
  313. unsigned int old_trans_list[PROCESSOR_COUNT_MAX];
  314. size_t old_trans_list_entries = 0;
  315. int o, m;
  316. int i;
  317. memcpy (old_trans_list, my_trans_list, my_trans_list_entries *
  318. sizeof (unsigned int));
  319. old_trans_list_entries = my_trans_list_entries;
  320. my_trans_list_entries = 0;
  321. for (o = 0; o < old_trans_list_entries; o++) {
  322. for (m = 0; m < my_member_list_entries; m++) {
  323. if (old_trans_list[o] == my_member_list[m]) {
  324. my_trans_list[my_trans_list_entries] = my_member_list[m];
  325. my_trans_list_entries++;
  326. break;
  327. }
  328. }
  329. }
  330. for (i = 0; i < my_service_list_entries; i++) {
  331. if (my_sync_callbacks_retrieve(my_service_list[i].service_id, NULL) != -1) {
  332. my_service_list[i].sync_init (my_trans_list,
  333. my_trans_list_entries, my_member_list,
  334. my_member_list_entries,
  335. &my_ring_id);
  336. }
  337. }
  338. }
  339. static void sync_process_enter (void)
  340. {
  341. int i;
  342. my_state = SYNC_PROCESS;
  343. /*
  344. * No sync services
  345. */
  346. if (my_service_list_entries == 0) {
  347. my_state = SYNC_SERVICELIST_BUILD;
  348. sync_synchronization_completed ();
  349. return;
  350. }
  351. for (i = 0; i < my_processor_list_entries; i++) {
  352. my_processor_list[i].received = 0;
  353. }
  354. schedwrk_create (&my_schedwrk_handle,
  355. schedwrk_processor,
  356. NULL);
  357. }
  358. static void sync_servicelist_build_enter (
  359. const unsigned int *member_list,
  360. size_t member_list_entries,
  361. const struct memb_ring_id *ring_id)
  362. {
  363. struct req_exec_service_build_message service_build;
  364. int i;
  365. int res;
  366. struct sync_callbacks sync_callbacks;
  367. my_state = SYNC_SERVICELIST_BUILD;
  368. for (i = 0; i < member_list_entries; i++) {
  369. my_processor_list[i].nodeid = member_list[i];
  370. my_processor_list[i].received = 0;
  371. }
  372. my_processor_list_entries = member_list_entries;
  373. memcpy (my_member_list, member_list,
  374. member_list_entries * sizeof (unsigned int));
  375. my_member_list_entries = member_list_entries;
  376. my_processing_idx = 0;
  377. memset(my_service_list, 0, sizeof (struct service_entry) * SERVICES_COUNT_MAX);
  378. my_service_list_entries = 0;
  379. for (i = 0; i < SERVICES_COUNT_MAX; i++) {
  380. res = my_sync_callbacks_retrieve (i, &sync_callbacks);
  381. if (res == -1) {
  382. continue;
  383. }
  384. if (sync_callbacks.sync_init == NULL) {
  385. continue;
  386. }
  387. my_service_list[my_service_list_entries].state = PROCESS;
  388. my_service_list[my_service_list_entries].service_id = i;
  389. assert(strlen(sync_callbacks.name) < sizeof(my_service_list[my_service_list_entries].name));
  390. strcpy (my_service_list[my_service_list_entries].name,
  391. sync_callbacks.name);
  392. my_service_list[my_service_list_entries].sync_init = sync_callbacks.sync_init;
  393. my_service_list[my_service_list_entries].sync_process = sync_callbacks.sync_process;
  394. my_service_list[my_service_list_entries].sync_abort = sync_callbacks.sync_abort;
  395. my_service_list[my_service_list_entries].sync_activate = sync_callbacks.sync_activate;
  396. my_service_list_entries += 1;
  397. }
  398. for (i = 0; i < my_service_list_entries; i++) {
  399. service_build.service_list[i] =
  400. my_service_list[i].service_id;
  401. }
  402. service_build.service_list_entries = my_service_list_entries;
  403. service_build_message_transmit (&service_build);
  404. log_printf (LOGSYS_LEVEL_DEBUG, "call init for locally known services");
  405. sync_process_call_init ();
  406. }
  407. static int schedwrk_processor (const void *context)
  408. {
  409. int res = 0;
  410. if (my_service_list[my_processing_idx].state == PROCESS) {
  411. if (my_sync_callbacks_retrieve(my_service_list[my_processing_idx].service_id, NULL) != -1) {
  412. res = my_service_list[my_processing_idx].sync_process ();
  413. } else {
  414. res = 0;
  415. }
  416. if (res == 0) {
  417. sync_barrier_enter();
  418. } else {
  419. return (-1);
  420. }
  421. }
  422. return (0);
  423. }
  424. void sync_start (
  425. const unsigned int *member_list,
  426. size_t member_list_entries,
  427. const struct memb_ring_id *ring_id)
  428. {
  429. ENTER();
  430. memcpy (&my_ring_id, ring_id, sizeof (struct memb_ring_id));
  431. sync_servicelist_build_enter (member_list, member_list_entries,
  432. ring_id);
  433. }
  434. void sync_save_transitional (
  435. const unsigned int *member_list,
  436. size_t member_list_entries,
  437. const struct memb_ring_id *ring_id)
  438. {
  439. ENTER();
  440. memcpy (my_trans_list, member_list, member_list_entries *
  441. sizeof (unsigned int));
  442. my_trans_list_entries = member_list_entries;
  443. }
  444. void sync_abort (void)
  445. {
  446. ENTER();
  447. if (my_state == SYNC_PROCESS) {
  448. schedwrk_destroy (my_schedwrk_handle);
  449. if (my_sync_callbacks_retrieve(my_service_list[my_processing_idx].service_id, NULL) != -1) {
  450. my_service_list[my_processing_idx].sync_abort ();
  451. }
  452. }
  453. /* this will cause any "old" barrier messages from causing
  454. * problems.
  455. */
  456. memset (&my_ring_id, 0, sizeof (struct memb_ring_id));
  457. }