4
0

sync.c 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549
  1. /*
  2. * Copyright (c) 2009-2012 Red Hat, Inc.
  3. *
  4. * All rights reserved.
  5. *
  6. * Author: Steven Dake (sdake@redhat.com)
  7. *
  8. * This software licensed under BSD license, the text of which follows:
  9. *
  10. * Redistribution and use in source and binary forms, with or without
  11. * modification, are permitted provided that the following conditions are met:
  12. *
  13. * - Redistributions of source code must retain the above copyright notice,
  14. * this list of conditions and the following disclaimer.
  15. * - Redistributions in binary form must reproduce the above copyright notice,
  16. * this list of conditions and the following disclaimer in the documentation
  17. * and/or other materials provided with the distribution.
  18. * - Neither the name of the MontaVista Software, Inc. nor the names of its
  19. * contributors may be used to endorse or promote products derived from this
  20. * software without specific prior written permission.
  21. *
  22. * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
  23. * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
  24. * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
  25. * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
  26. * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
  27. * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
  28. * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
  29. * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
  30. * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
  31. * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
  32. * THE POSSIBILITY OF SUCH DAMAGE.
  33. */
  34. #include <config.h>
  35. #include <sys/types.h>
  36. #include <sys/socket.h>
  37. #include <sys/un.h>
  38. #include <sys/ioctl.h>
  39. #include <netinet/in.h>
  40. #include <sys/uio.h>
  41. #include <unistd.h>
  42. #include <fcntl.h>
  43. #include <stdlib.h>
  44. #include <stdio.h>
  45. #include <errno.h>
  46. #include <time.h>
  47. #include <arpa/inet.h>
  48. #include <corosync/corotypes.h>
  49. #include <corosync/swab.h>
  50. #include <corosync/totem/totempg.h>
  51. #include <corosync/totem/totem.h>
  52. #include <corosync/logsys.h>
  53. #include <qb/qbipc_common.h>
  54. #include "schedwrk.h"
  55. #include "quorum.h"
  56. #include "sync.h"
  57. #include "main.h"
  58. LOGSYS_DECLARE_SUBSYS ("SYNC");
  59. #define MESSAGE_REQ_SYNC_BARRIER 0
  60. #define MESSAGE_REQ_SYNC_SERVICE_BUILD 1
  61. enum sync_process_state {
  62. PROCESS,
  63. ACTIVATE
  64. };
  65. enum sync_state {
  66. SYNC_SERVICELIST_BUILD,
  67. SYNC_PROCESS,
  68. SYNC_BARRIER
  69. };
  70. struct service_entry {
  71. int service_id;
  72. void (*sync_init) (
  73. const unsigned int *trans_list,
  74. size_t trans_list_entries,
  75. const unsigned int *member_list,
  76. size_t member_list_entries,
  77. const struct memb_ring_id *ring_id);
  78. void (*sync_abort) (void);
  79. int (*sync_process) (void);
  80. void (*sync_activate) (void);
  81. enum sync_process_state state;
  82. char name[128];
  83. };
  84. struct processor_entry {
  85. int nodeid;
  86. int received;
  87. };
  88. struct req_exec_service_build_message {
  89. struct qb_ipc_request_header header __attribute__((aligned(8)));
  90. struct memb_ring_id ring_id __attribute__((aligned(8)));
  91. int service_list_entries __attribute__((aligned(8)));
  92. int service_list[128] __attribute__((aligned(8)));
  93. };
  94. struct req_exec_barrier_message {
  95. struct qb_ipc_request_header header __attribute__((aligned(8)));
  96. struct memb_ring_id ring_id __attribute__((aligned(8)));
  97. };
  98. static enum sync_state my_state = SYNC_BARRIER;
  99. static struct memb_ring_id my_ring_id;
  100. static int my_processing_idx = 0;
  101. static hdb_handle_t my_schedwrk_handle;
  102. static struct processor_entry my_processor_list[PROCESSOR_COUNT_MAX];
  103. static unsigned int my_member_list[PROCESSOR_COUNT_MAX];
  104. static unsigned int my_trans_list[PROCESSOR_COUNT_MAX];
  105. static size_t my_member_list_entries = 0;
  106. static size_t my_trans_list_entries = 0;
  107. static int my_processor_list_entries = 0;
  108. static struct service_entry my_service_list[SERVICES_COUNT_MAX];
  109. static int my_service_list_entries = 0;
  110. static void (*sync_synchronization_completed) (void);
  111. static void sync_deliver_fn (
  112. unsigned int nodeid,
  113. const void *msg,
  114. unsigned int msg_len,
  115. int endian_conversion_required);
  116. static int schedwrk_processor (const void *context);
  117. static void sync_process_enter (void);
  118. static void sync_process_call_init (void);
  119. static struct totempg_group sync_group = {
  120. .group = "sync",
  121. .group_len = 4
  122. };
  123. static void *sync_group_handle;
  124. int (*my_sync_callbacks_retrieve) (
  125. int service_id,
  126. struct sync_callbacks *callbacks);
  127. int sync_init (
  128. int (*sync_callbacks_retrieve) (
  129. int service_id,
  130. struct sync_callbacks *callbacks),
  131. void (*synchronization_completed) (void))
  132. {
  133. unsigned int res;
  134. res = totempg_groups_initialize (
  135. &sync_group_handle,
  136. sync_deliver_fn,
  137. NULL);
  138. if (res == -1) {
  139. log_printf (LOGSYS_LEVEL_ERROR,
  140. "Couldn't initialize groups interface.");
  141. return (-1);
  142. }
  143. res = totempg_groups_join (
  144. sync_group_handle,
  145. &sync_group,
  146. 1);
  147. if (res == -1) {
  148. log_printf (LOGSYS_LEVEL_ERROR, "Couldn't join group.");
  149. return (-1);
  150. }
  151. sync_synchronization_completed = synchronization_completed;
  152. my_sync_callbacks_retrieve = sync_callbacks_retrieve;
  153. return (0);
  154. }
  155. static void sync_barrier_handler (unsigned int nodeid, const void *msg)
  156. {
  157. const struct req_exec_barrier_message *req_exec_barrier_message = msg;
  158. int i;
  159. int barrier_reached = 1;
  160. if (memcmp (&my_ring_id, &req_exec_barrier_message->ring_id,
  161. sizeof (struct memb_ring_id)) != 0) {
  162. log_printf (LOGSYS_LEVEL_DEBUG, "barrier for old ring - discarding");
  163. return;
  164. }
  165. for (i = 0; i < my_processor_list_entries; i++) {
  166. if (my_processor_list[i].nodeid == nodeid) {
  167. my_processor_list[i].received = 1;
  168. }
  169. }
  170. for (i = 0; i < my_processor_list_entries; i++) {
  171. if (my_processor_list[i].received == 0) {
  172. barrier_reached = 0;
  173. }
  174. }
  175. if (barrier_reached) {
  176. log_printf (LOGSYS_LEVEL_DEBUG, "Committing synchronization for %s",
  177. my_service_list[my_processing_idx].name);
  178. my_service_list[my_processing_idx].state = ACTIVATE;
  179. if (my_sync_callbacks_retrieve(my_service_list[my_processing_idx].service_id, NULL) != -1) {
  180. my_service_list[my_processing_idx].sync_activate ();
  181. }
  182. my_processing_idx += 1;
  183. if (my_service_list_entries == my_processing_idx) {
  184. sync_synchronization_completed ();
  185. } else {
  186. sync_process_enter ();
  187. }
  188. }
  189. }
  190. static void dummy_sync_abort (void)
  191. {
  192. }
  193. static int dummy_sync_process (void)
  194. {
  195. return (0);
  196. }
  197. static void dummy_sync_activate (void)
  198. {
  199. }
  200. static int service_entry_compare (const void *a, const void *b)
  201. {
  202. const struct service_entry *service_entry_a = a;
  203. const struct service_entry *service_entry_b = b;
  204. return (service_entry_a->service_id > service_entry_b->service_id);
  205. }
  206. static void sync_service_build_handler (unsigned int nodeid, const void *msg)
  207. {
  208. const struct req_exec_service_build_message *req_exec_service_build_message = msg;
  209. int i, j;
  210. int barrier_reached = 1;
  211. int found;
  212. int qsort_trigger = 0;
  213. if (memcmp (&my_ring_id, &req_exec_service_build_message->ring_id,
  214. sizeof (struct memb_ring_id)) != 0) {
  215. log_printf (LOGSYS_LEVEL_DEBUG, "service build for old ring - discarding");
  216. return;
  217. }
  218. for (i = 0; i < req_exec_service_build_message->service_list_entries; i++) {
  219. found = 0;
  220. for (j = 0; j < my_service_list_entries; j++) {
  221. if (req_exec_service_build_message->service_list[i] ==
  222. my_service_list[j].service_id) {
  223. found = 1;
  224. break;
  225. }
  226. }
  227. if (found == 0) {
  228. my_service_list[my_service_list_entries].state = PROCESS;
  229. my_service_list[my_service_list_entries].service_id =
  230. req_exec_service_build_message->service_list[i];
  231. sprintf (my_service_list[my_service_list_entries].name,
  232. "Unknown External Service (id = %d)\n",
  233. req_exec_service_build_message->service_list[i]);
  234. my_service_list[my_service_list_entries].sync_init =
  235. NULL;
  236. my_service_list[my_service_list_entries].sync_abort =
  237. dummy_sync_abort;
  238. my_service_list[my_service_list_entries].sync_process =
  239. dummy_sync_process;
  240. my_service_list[my_service_list_entries].sync_activate =
  241. dummy_sync_activate;
  242. my_service_list_entries += 1;
  243. qsort_trigger = 1;
  244. }
  245. }
  246. if (qsort_trigger) {
  247. qsort (my_service_list, my_service_list_entries,
  248. sizeof (struct service_entry), service_entry_compare);
  249. }
  250. for (i = 0; i < my_processor_list_entries; i++) {
  251. if (my_processor_list[i].nodeid == nodeid) {
  252. my_processor_list[i].received = 1;
  253. }
  254. }
  255. for (i = 0; i < my_processor_list_entries; i++) {
  256. if (my_processor_list[i].received == 0) {
  257. barrier_reached = 0;
  258. }
  259. }
  260. if (barrier_reached) {
  261. log_printf (LOGSYS_LEVEL_DEBUG, "enter sync process");
  262. sync_process_enter ();
  263. }
  264. }
  265. static void sync_deliver_fn (
  266. unsigned int nodeid,
  267. const void *msg,
  268. unsigned int msg_len,
  269. int endian_conversion_required)
  270. {
  271. struct qb_ipc_request_header *header = (struct qb_ipc_request_header *)msg;
  272. switch (header->id) {
  273. case MESSAGE_REQ_SYNC_BARRIER:
  274. sync_barrier_handler (nodeid, msg);
  275. break;
  276. case MESSAGE_REQ_SYNC_SERVICE_BUILD:
  277. sync_service_build_handler (nodeid, msg);
  278. break;
  279. }
  280. }
  281. static void barrier_message_transmit (void)
  282. {
  283. struct iovec iovec;
  284. struct req_exec_barrier_message req_exec_barrier_message;
  285. memset(&req_exec_barrier_message, 0, sizeof(req_exec_barrier_message));
  286. req_exec_barrier_message.header.size = sizeof (struct req_exec_barrier_message);
  287. req_exec_barrier_message.header.id = MESSAGE_REQ_SYNC_BARRIER;
  288. memcpy (&req_exec_barrier_message.ring_id, &my_ring_id,
  289. sizeof (struct memb_ring_id));
  290. iovec.iov_base = (char *)&req_exec_barrier_message;
  291. iovec.iov_len = sizeof (req_exec_barrier_message);
  292. (void)totempg_groups_mcast_joined (sync_group_handle,
  293. &iovec, 1, TOTEMPG_AGREED);
  294. }
  295. static void service_build_message_transmit (struct req_exec_service_build_message *service_build_message)
  296. {
  297. struct iovec iovec;
  298. service_build_message->header.size = sizeof (struct req_exec_service_build_message);
  299. service_build_message->header.id = MESSAGE_REQ_SYNC_SERVICE_BUILD;
  300. memcpy (&service_build_message->ring_id, &my_ring_id,
  301. sizeof (struct memb_ring_id));
  302. iovec.iov_base = (void *)service_build_message;
  303. iovec.iov_len = sizeof (struct req_exec_service_build_message);
  304. (void)totempg_groups_mcast_joined (sync_group_handle,
  305. &iovec, 1, TOTEMPG_AGREED);
  306. }
  307. static void sync_barrier_enter (void)
  308. {
  309. my_state = SYNC_BARRIER;
  310. barrier_message_transmit ();
  311. }
  312. static void sync_process_call_init (void)
  313. {
  314. unsigned int old_trans_list[PROCESSOR_COUNT_MAX];
  315. size_t old_trans_list_entries = 0;
  316. int o, m;
  317. int i;
  318. memcpy (old_trans_list, my_trans_list, my_trans_list_entries *
  319. sizeof (unsigned int));
  320. old_trans_list_entries = my_trans_list_entries;
  321. my_trans_list_entries = 0;
  322. for (o = 0; o < old_trans_list_entries; o++) {
  323. for (m = 0; m < my_member_list_entries; m++) {
  324. if (old_trans_list[o] == my_member_list[m]) {
  325. my_trans_list[my_trans_list_entries] = my_member_list[m];
  326. my_trans_list_entries++;
  327. break;
  328. }
  329. }
  330. }
  331. for (i = 0; i < my_service_list_entries; i++) {
  332. if (my_sync_callbacks_retrieve(my_service_list[i].service_id, NULL) != -1) {
  333. my_service_list[i].sync_init (my_trans_list,
  334. my_trans_list_entries, my_member_list,
  335. my_member_list_entries,
  336. &my_ring_id);
  337. }
  338. }
  339. }
  340. static void sync_process_enter (void)
  341. {
  342. int i;
  343. my_state = SYNC_PROCESS;
  344. /*
  345. * No sync services
  346. */
  347. if (my_service_list_entries == 0) {
  348. my_state = SYNC_SERVICELIST_BUILD;
  349. sync_synchronization_completed ();
  350. return;
  351. }
  352. for (i = 0; i < my_processor_list_entries; i++) {
  353. my_processor_list[i].received = 0;
  354. }
  355. schedwrk_create (&my_schedwrk_handle,
  356. schedwrk_processor,
  357. NULL);
  358. }
  359. static void sync_servicelist_build_enter (
  360. const unsigned int *member_list,
  361. size_t member_list_entries,
  362. const struct memb_ring_id *ring_id)
  363. {
  364. struct req_exec_service_build_message service_build;
  365. int i;
  366. int res;
  367. struct sync_callbacks sync_callbacks;
  368. memset(&service_build, 0, sizeof(service_build));
  369. my_state = SYNC_SERVICELIST_BUILD;
  370. for (i = 0; i < member_list_entries; i++) {
  371. my_processor_list[i].nodeid = member_list[i];
  372. my_processor_list[i].received = 0;
  373. }
  374. my_processor_list_entries = member_list_entries;
  375. memcpy (my_member_list, member_list,
  376. member_list_entries * sizeof (unsigned int));
  377. my_member_list_entries = member_list_entries;
  378. my_processing_idx = 0;
  379. memset(my_service_list, 0, sizeof (struct service_entry) * SERVICES_COUNT_MAX);
  380. my_service_list_entries = 0;
  381. for (i = 0; i < SERVICES_COUNT_MAX; i++) {
  382. res = my_sync_callbacks_retrieve (i, &sync_callbacks);
  383. if (res == -1) {
  384. continue;
  385. }
  386. if (sync_callbacks.sync_init == NULL) {
  387. continue;
  388. }
  389. my_service_list[my_service_list_entries].state = PROCESS;
  390. my_service_list[my_service_list_entries].service_id = i;
  391. assert(strlen(sync_callbacks.name) < sizeof(my_service_list[my_service_list_entries].name));
  392. strcpy (my_service_list[my_service_list_entries].name,
  393. sync_callbacks.name);
  394. my_service_list[my_service_list_entries].sync_init = sync_callbacks.sync_init;
  395. my_service_list[my_service_list_entries].sync_process = sync_callbacks.sync_process;
  396. my_service_list[my_service_list_entries].sync_abort = sync_callbacks.sync_abort;
  397. my_service_list[my_service_list_entries].sync_activate = sync_callbacks.sync_activate;
  398. my_service_list_entries += 1;
  399. }
  400. for (i = 0; i < my_service_list_entries; i++) {
  401. service_build.service_list[i] =
  402. my_service_list[i].service_id;
  403. }
  404. service_build.service_list_entries = my_service_list_entries;
  405. service_build_message_transmit (&service_build);
  406. log_printf (LOGSYS_LEVEL_DEBUG, "call init for locally known services");
  407. sync_process_call_init ();
  408. }
  409. static int schedwrk_processor (const void *context)
  410. {
  411. int res = 0;
  412. if (my_service_list[my_processing_idx].state == PROCESS) {
  413. if (my_sync_callbacks_retrieve(my_service_list[my_processing_idx].service_id, NULL) != -1) {
  414. res = my_service_list[my_processing_idx].sync_process ();
  415. } else {
  416. res = 0;
  417. }
  418. if (res == 0) {
  419. sync_barrier_enter();
  420. } else {
  421. return (-1);
  422. }
  423. }
  424. return (0);
  425. }
  426. void sync_start (
  427. const unsigned int *member_list,
  428. size_t member_list_entries,
  429. const struct memb_ring_id *ring_id)
  430. {
  431. ENTER();
  432. memcpy (&my_ring_id, ring_id, sizeof (struct memb_ring_id));
  433. sync_servicelist_build_enter (member_list, member_list_entries,
  434. ring_id);
  435. }
  436. void sync_save_transitional (
  437. const unsigned int *member_list,
  438. size_t member_list_entries,
  439. const struct memb_ring_id *ring_id)
  440. {
  441. ENTER();
  442. memcpy (my_trans_list, member_list, member_list_entries *
  443. sizeof (unsigned int));
  444. my_trans_list_entries = member_list_entries;
  445. }
  446. void sync_abort (void)
  447. {
  448. ENTER();
  449. if (my_state == SYNC_PROCESS) {
  450. schedwrk_destroy (my_schedwrk_handle);
  451. if (my_sync_callbacks_retrieve(my_service_list[my_processing_idx].service_id, NULL) != -1) {
  452. my_service_list[my_processing_idx].sync_abort ();
  453. }
  454. }
  455. /* this will cause any "old" barrier messages from causing
  456. * problems.
  457. */
  458. memset (&my_ring_id, 0, sizeof (struct memb_ring_id));
  459. }