syncv2.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624
  1. /*
  2. * Copyright (c) 2009 Red Hat, Inc.
  3. *
  4. * All rights reserved.
  5. *
  6. * Author: Steven Dake (sdake@redhat.com)
  7. *
  8. * This software licensed under BSD license, the text of which follows:
  9. *
  10. * Redistribution and use in source and binary forms, with or without
  11. * modification, are permitted provided that the following conditions are met:
  12. *
  13. * - Redistributions of source code must retain the above copyright notice,
  14. * this list of conditions and the following disclaimer.
  15. * - Redistributions in binary form must reproduce the above copyright notice,
  16. * this list of conditions and the following disclaimer in the documentation
  17. * and/or other materials provided with the distribution.
  18. * - Neither the name of the MontaVista Software, Inc. nor the names of its
  19. * contributors may be used to endorse or promote products derived from this
  20. * software without specific prior written permission.
  21. *
  22. * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
  23. * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
  24. * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
  25. * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
  26. * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
  27. * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
  28. * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
  29. * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
  30. * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
  31. * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
  32. * THE POSSIBILITY OF SUCH DAMAGE.
  33. */
  34. #include <config.h>
  35. #include <sys/types.h>
  36. #include <sys/socket.h>
  37. #include <sys/un.h>
  38. #include <sys/ioctl.h>
  39. #include <netinet/in.h>
  40. #include <sys/uio.h>
  41. #include <unistd.h>
  42. #include <fcntl.h>
  43. #include <stdlib.h>
  44. #include <stdio.h>
  45. #include <errno.h>
  46. #include <time.h>
  47. #include <unistd.h>
  48. #include <netinet/in.h>
  49. #include <arpa/inet.h>
  50. #include <corosync/corotypes.h>
  51. #include <corosync/swab.h>
  52. #include <corosync/totem/totempg.h>
  53. #include <corosync/totem/totem.h>
  54. #include <corosync/lcr/lcr_ifact.h>
  55. #include <corosync/engine/logsys.h>
  56. #include <qb/qbipc_common.h>
  57. #include "schedwrk.h"
  58. #include "quorum.h"
  59. #include "sync.h"
  60. #include "syncv2.h"
  61. LOGSYS_DECLARE_SUBSYS ("SYNCV2");
  62. #define MESSAGE_REQ_SYNC_BARRIER 0
  63. #define MESSAGE_REQ_SYNC_SERVICE_BUILD 1
  64. #define MESSAGE_REQ_SYNC_MEMB_DETERMINE 2
  65. enum sync_process_state {
  66. INIT,
  67. PROCESS,
  68. ACTIVATE
  69. };
  70. enum sync_state {
  71. SYNC_SERVICELIST_BUILD,
  72. SYNC_PROCESS,
  73. SYNC_BARRIER
  74. };
  75. struct service_entry {
  76. int service_id;
  77. int api_version;
  78. union sync_init_api sync_init_api;
  79. void (*sync_abort) (void);
  80. int (*sync_process) (void);
  81. void (*sync_activate) (void);
  82. enum sync_process_state state;
  83. char name[128];
  84. };
  85. struct processor_entry {
  86. int nodeid;
  87. int received;
  88. };
  89. struct req_exec_memb_determine_message {
  90. struct qb_ipc_request_header header __attribute__((aligned(8)));
  91. struct memb_ring_id ring_id __attribute__((aligned(8)));
  92. };
  93. struct req_exec_service_build_message {
  94. struct qb_ipc_request_header header __attribute__((aligned(8)));
  95. struct memb_ring_id ring_id __attribute__((aligned(8)));
  96. int service_list_entries __attribute__((aligned(8)));
  97. int service_list[128] __attribute__((aligned(8)));
  98. };
  99. struct req_exec_barrier_message {
  100. struct qb_ipc_request_header header __attribute__((aligned(8)));
  101. struct memb_ring_id ring_id __attribute__((aligned(8)));
  102. };
  103. static enum sync_state my_state = SYNC_BARRIER;
  104. static struct memb_ring_id my_ring_id;
  105. static struct memb_ring_id my_memb_determine_ring_id;
  106. static int my_memb_determine = 0;
  107. static unsigned int my_memb_determine_list[PROCESSOR_COUNT_MAX];
  108. static unsigned int my_memb_determine_list_entries = 0;
  109. static int my_processing_idx = 0;
  110. static hdb_handle_t my_schedwrk_handle;
  111. static struct processor_entry my_processor_list[PROCESSOR_COUNT_MAX];
  112. static unsigned int my_member_list[PROCESSOR_COUNT_MAX];
  113. static unsigned int my_trans_list[PROCESSOR_COUNT_MAX];
  114. static size_t my_member_list_entries = 0;
  115. static size_t my_trans_list_entries = 0;
  116. static int my_processor_list_entries = 0;
  117. static struct service_entry my_service_list[128];
  118. static int my_service_list_entries = 0;
  119. static const struct memb_ring_id sync_ring_id;
  120. static struct service_entry my_initial_service_list[PROCESSOR_COUNT_MAX];
  121. static int my_initial_service_list_entries;
  122. static void (*sync_synchronization_completed) (void);
  123. static void sync_deliver_fn (
  124. unsigned int nodeid,
  125. const void *msg,
  126. unsigned int msg_len,
  127. int endian_conversion_required);
  128. static int schedwrk_processor (const void *context);
  129. static void sync_process_enter (void);
  130. static struct totempg_group sync_group = {
  131. .group = "syncv2",
  132. .group_len = 6
  133. };
  134. static hdb_handle_t sync_group_handle;
  135. int sync_v2_init (
  136. int (*sync_callbacks_retrieve) (
  137. int service_id,
  138. struct sync_callbacks *callbacks),
  139. void (*synchronization_completed) (void))
  140. {
  141. unsigned int res;
  142. int i;
  143. struct sync_callbacks sync_callbacks;
  144. res = totempg_groups_initialize (
  145. &sync_group_handle,
  146. sync_deliver_fn,
  147. NULL);
  148. if (res == -1) {
  149. log_printf (LOGSYS_LEVEL_ERROR,
  150. "Couldn't initialize groups interface.");
  151. return (-1);
  152. }
  153. res = totempg_groups_join (
  154. sync_group_handle,
  155. &sync_group,
  156. 1);
  157. if (res == -1) {
  158. log_printf (LOGSYS_LEVEL_ERROR, "Couldn't join group.\n");
  159. return (-1);
  160. }
  161. sync_synchronization_completed = synchronization_completed;
  162. for (i = 0; i < 64; i++) {
  163. res = sync_callbacks_retrieve (i, &sync_callbacks);
  164. if (res == -1) {
  165. continue;
  166. }
  167. if (sync_callbacks.sync_init_api.sync_init_v1 == NULL) {
  168. continue;
  169. }
  170. my_initial_service_list[my_initial_service_list_entries].state =
  171. INIT;
  172. my_initial_service_list[my_initial_service_list_entries].service_id = i;
  173. strcpy (my_initial_service_list[my_initial_service_list_entries].name,
  174. sync_callbacks.name);
  175. my_initial_service_list[my_initial_service_list_entries].api_version = sync_callbacks.api_version;
  176. my_initial_service_list[my_initial_service_list_entries].sync_init_api = sync_callbacks.sync_init_api;
  177. my_initial_service_list[my_initial_service_list_entries].sync_process = sync_callbacks.sync_process;
  178. my_initial_service_list[my_initial_service_list_entries].sync_abort = sync_callbacks.sync_abort;
  179. my_initial_service_list[my_initial_service_list_entries].sync_activate = sync_callbacks.sync_activate;
  180. my_initial_service_list_entries += 1;
  181. }
  182. return (0);
  183. }
  184. static void sync_barrier_handler (unsigned int nodeid, const void *msg)
  185. {
  186. const struct req_exec_barrier_message *req_exec_barrier_message = msg;
  187. int i;
  188. int barrier_reached = 1;
  189. if (memcmp (&my_ring_id, &req_exec_barrier_message->ring_id,
  190. sizeof (struct memb_ring_id)) != 0) {
  191. log_printf (LOGSYS_LEVEL_DEBUG, "barrier for old ring - discarding\n");
  192. return;
  193. }
  194. for (i = 0; i < my_processor_list_entries; i++) {
  195. if (my_processor_list[i].nodeid == nodeid) {
  196. my_processor_list[i].received = 1;
  197. }
  198. }
  199. for (i = 0; i < my_processor_list_entries; i++) {
  200. if (my_processor_list[i].received == 0) {
  201. barrier_reached = 0;
  202. }
  203. }
  204. if (barrier_reached) {
  205. log_printf (LOGSYS_LEVEL_DEBUG, "Committing synchronization for %s\n",
  206. my_service_list[my_processing_idx].name);
  207. my_service_list[my_processing_idx].state = ACTIVATE;
  208. my_service_list[my_processing_idx].sync_activate ();
  209. my_processing_idx += 1;
  210. if (my_service_list_entries == my_processing_idx) {
  211. my_memb_determine_list_entries = 0;
  212. sync_synchronization_completed ();
  213. } else {
  214. sync_process_enter ();
  215. }
  216. }
  217. }
  218. static void dummy_sync_init (
  219. const unsigned int *member_list,
  220. size_t member_list_entries,
  221. const struct memb_ring_id *ring_id)
  222. {
  223. }
  224. static void dummy_sync_abort (void)
  225. {
  226. }
  227. static int dummy_sync_process (void)
  228. {
  229. return (0);
  230. }
  231. static void dummy_sync_activate (void)
  232. {
  233. }
  234. static int service_entry_compare (const void *a, const void *b)
  235. {
  236. const struct service_entry *service_entry_a = a;
  237. const struct service_entry *service_entry_b = b;
  238. return (service_entry_a->service_id > service_entry_b->service_id);
  239. }
  240. static void sync_memb_determine (unsigned int nodeid, const void *msg)
  241. {
  242. const struct req_exec_memb_determine_message *req_exec_memb_determine_message = msg;
  243. int found = 0;
  244. int i;
  245. if (memcmp (&req_exec_memb_determine_message->ring_id,
  246. &my_memb_determine_ring_id, sizeof (struct memb_ring_id)) != 0) {
  247. log_printf (LOGSYS_LEVEL_DEBUG, "memb determine for old ring - discarding\n");
  248. return;
  249. }
  250. my_memb_determine = 1;
  251. for (i = 0; i < my_memb_determine_list_entries; i++) {
  252. if (my_memb_determine_list[i] == nodeid) {
  253. found = 1;
  254. }
  255. }
  256. if (found == 0) {
  257. my_memb_determine_list[my_memb_determine_list_entries] = nodeid;
  258. my_memb_determine_list_entries += 1;
  259. }
  260. }
  261. static void sync_service_build_handler (unsigned int nodeid, const void *msg)
  262. {
  263. const struct req_exec_service_build_message *req_exec_service_build_message = msg;
  264. int i, j;
  265. int barrier_reached = 1;
  266. int found;
  267. int qsort_trigger = 0;
  268. if (memcmp (&my_ring_id, &req_exec_service_build_message->ring_id,
  269. sizeof (struct memb_ring_id)) != 0) {
  270. log_printf (LOGSYS_LEVEL_DEBUG, "service build for old ring - discarding\n");
  271. return;
  272. }
  273. for (i = 0; i < req_exec_service_build_message->service_list_entries; i++) {
  274. found = 0;
  275. for (j = 0; j < my_service_list_entries; j++) {
  276. if (req_exec_service_build_message->service_list[i] ==
  277. my_service_list[j].service_id) {
  278. found = 1;
  279. break;
  280. }
  281. }
  282. if (found == 0) {
  283. my_service_list[my_service_list_entries].state =
  284. INIT;
  285. my_service_list[my_service_list_entries].service_id =
  286. req_exec_service_build_message->service_list[i];
  287. sprintf (my_service_list[my_service_list_entries].name,
  288. "External Service (id = %d)\n",
  289. req_exec_service_build_message->service_list[i]);
  290. my_service_list[my_service_list_entries].api_version = 1;
  291. my_service_list[my_service_list_entries].sync_init_api.sync_init_v1 =
  292. dummy_sync_init;
  293. my_service_list[my_service_list_entries].sync_abort =
  294. dummy_sync_abort;
  295. my_service_list[my_service_list_entries].sync_process =
  296. dummy_sync_process;
  297. my_service_list[my_service_list_entries].sync_activate =
  298. dummy_sync_activate;
  299. my_service_list_entries += 1;
  300. qsort_trigger = 1;
  301. }
  302. }
  303. if (qsort_trigger) {
  304. qsort (my_service_list, my_service_list_entries,
  305. sizeof (struct service_entry), service_entry_compare);
  306. }
  307. for (i = 0; i < my_processor_list_entries; i++) {
  308. if (my_processor_list[i].nodeid == nodeid) {
  309. my_processor_list[i].received = 1;
  310. }
  311. }
  312. for (i = 0; i < my_processor_list_entries; i++) {
  313. if (my_processor_list[i].received == 0) {
  314. barrier_reached = 0;
  315. }
  316. }
  317. if (barrier_reached) {
  318. sync_process_enter ();
  319. }
  320. }
  321. static void sync_deliver_fn (
  322. unsigned int nodeid,
  323. const void *msg,
  324. unsigned int msg_len,
  325. int endian_conversion_required)
  326. {
  327. struct qb_ipc_request_header *header = (struct qb_ipc_request_header *)msg;
  328. switch (header->id) {
  329. case MESSAGE_REQ_SYNC_BARRIER:
  330. sync_barrier_handler (nodeid, msg);
  331. break;
  332. case MESSAGE_REQ_SYNC_SERVICE_BUILD:
  333. sync_service_build_handler (nodeid, msg);
  334. break;
  335. case MESSAGE_REQ_SYNC_MEMB_DETERMINE:
  336. sync_memb_determine (nodeid, msg);
  337. break;
  338. }
  339. }
  340. static void memb_determine_message_transmit (void)
  341. {
  342. struct iovec iovec;
  343. struct req_exec_memb_determine_message req_exec_memb_determine_message;
  344. req_exec_memb_determine_message.header.size = sizeof (struct req_exec_memb_determine_message);
  345. req_exec_memb_determine_message.header.id = MESSAGE_REQ_SYNC_MEMB_DETERMINE;
  346. memcpy (&req_exec_memb_determine_message.ring_id,
  347. &my_memb_determine_ring_id,
  348. sizeof (struct memb_ring_id));
  349. iovec.iov_base = (char *)&req_exec_memb_determine_message;
  350. iovec.iov_len = sizeof (req_exec_memb_determine_message);
  351. (void)totempg_groups_mcast_joined (sync_group_handle,
  352. &iovec, 1, TOTEMPG_AGREED);
  353. }
  354. static void barrier_message_transmit (void)
  355. {
  356. struct iovec iovec;
  357. struct req_exec_barrier_message req_exec_barrier_message;
  358. req_exec_barrier_message.header.size = sizeof (struct req_exec_barrier_message);
  359. req_exec_barrier_message.header.id = MESSAGE_REQ_SYNC_BARRIER;
  360. memcpy (&req_exec_barrier_message.ring_id, &my_ring_id,
  361. sizeof (struct memb_ring_id));
  362. iovec.iov_base = (char *)&req_exec_barrier_message;
  363. iovec.iov_len = sizeof (req_exec_barrier_message);
  364. (void)totempg_groups_mcast_joined (sync_group_handle,
  365. &iovec, 1, TOTEMPG_AGREED);
  366. }
  367. static void service_build_message_transmit (struct req_exec_service_build_message *service_build_message)
  368. {
  369. struct iovec iovec;
  370. service_build_message->header.size = sizeof (struct req_exec_service_build_message);
  371. service_build_message->header.id = MESSAGE_REQ_SYNC_SERVICE_BUILD;
  372. memcpy (&service_build_message->ring_id, &my_ring_id,
  373. sizeof (struct memb_ring_id));
  374. iovec.iov_base = (void *)service_build_message;
  375. iovec.iov_len = sizeof (struct req_exec_service_build_message);
  376. (void)totempg_groups_mcast_joined (sync_group_handle,
  377. &iovec, 1, TOTEMPG_AGREED);
  378. }
  379. static void sync_barrier_enter (void)
  380. {
  381. my_state = SYNC_BARRIER;
  382. barrier_message_transmit ();
  383. }
  384. static void sync_process_enter (void)
  385. {
  386. int i;
  387. my_state = SYNC_PROCESS;
  388. /*
  389. * No syncv2 services
  390. */
  391. if (my_service_list_entries == 0) {
  392. my_state = SYNC_SERVICELIST_BUILD;
  393. my_memb_determine_list_entries = 0;
  394. sync_synchronization_completed ();
  395. return;
  396. }
  397. for (i = 0; i < my_processor_list_entries; i++) {
  398. my_processor_list[i].received = 0;
  399. }
  400. schedwrk_create (&my_schedwrk_handle,
  401. schedwrk_processor,
  402. NULL);
  403. }
  404. static void sync_servicelist_build_enter (
  405. const unsigned int *member_list,
  406. size_t member_list_entries,
  407. const struct memb_ring_id *ring_id)
  408. {
  409. struct req_exec_service_build_message service_build;
  410. int i;
  411. my_state = SYNC_SERVICELIST_BUILD;
  412. for (i = 0; i < member_list_entries; i++) {
  413. my_processor_list[i].nodeid = member_list[i];
  414. my_processor_list[i].received = 0;
  415. }
  416. my_processor_list_entries = member_list_entries;
  417. memcpy (my_member_list, member_list,
  418. member_list_entries * sizeof (unsigned int));
  419. my_member_list_entries = member_list_entries;
  420. my_processing_idx = 0;
  421. memcpy (my_service_list, my_initial_service_list,
  422. sizeof (struct service_entry) *
  423. my_initial_service_list_entries);
  424. my_service_list_entries = my_initial_service_list_entries;
  425. for (i = 0; i < my_initial_service_list[i].service_id; i++) {
  426. service_build.service_list[i] =
  427. my_initial_service_list[i].service_id;
  428. }
  429. service_build.service_list_entries = i;
  430. service_build_message_transmit (&service_build);
  431. }
  432. static int schedwrk_processor (const void *context)
  433. {
  434. int res = 0;
  435. if (my_service_list[my_processing_idx].state == INIT) {
  436. my_service_list[my_processing_idx].state = PROCESS;
  437. if (my_service_list[my_processing_idx].api_version == 1) {
  438. my_service_list[my_processing_idx].sync_init_api.sync_init_v1 (my_member_list,
  439. my_member_list_entries,
  440. &my_ring_id);
  441. } else {
  442. unsigned int old_trans_list[PROCESSOR_COUNT_MAX];
  443. size_t old_trans_list_entries = 0;
  444. int o, m;
  445. memcpy (old_trans_list, my_trans_list, my_trans_list_entries *
  446. sizeof (unsigned int));
  447. old_trans_list_entries = my_trans_list_entries;
  448. my_trans_list_entries = 0;
  449. for (o = 0; o < old_trans_list_entries; o++) {
  450. for (m = 0; m < my_member_list_entries; m++) {
  451. if (old_trans_list[o] == my_member_list[m]) {
  452. my_trans_list[my_trans_list_entries] = my_member_list[m];
  453. my_trans_list_entries++;
  454. break;
  455. }
  456. }
  457. }
  458. my_service_list[my_processing_idx].sync_init_api.sync_init_v2 (my_trans_list,
  459. my_trans_list_entries, my_member_list,
  460. my_member_list_entries,
  461. &my_ring_id);
  462. }
  463. }
  464. if (my_service_list[my_processing_idx].state == PROCESS) {
  465. my_service_list[my_processing_idx].state = PROCESS;
  466. res = my_service_list[my_processing_idx].sync_process ();
  467. if (res == 0) {
  468. sync_barrier_enter();
  469. } else {
  470. return (-1);
  471. }
  472. }
  473. return (0);
  474. }
  475. void sync_v2_start (
  476. const unsigned int *member_list,
  477. size_t member_list_entries,
  478. const struct memb_ring_id *ring_id)
  479. {
  480. ENTER();
  481. memcpy (&my_ring_id, ring_id, sizeof (struct memb_ring_id));
  482. if (my_memb_determine) {
  483. my_memb_determine = 0;
  484. sync_servicelist_build_enter (my_memb_determine_list,
  485. my_memb_determine_list_entries, ring_id);
  486. } else {
  487. sync_servicelist_build_enter (member_list, member_list_entries,
  488. ring_id);
  489. }
  490. }
  491. void sync_v2_save_transitional (
  492. const unsigned int *member_list,
  493. size_t member_list_entries,
  494. const struct memb_ring_id *ring_id)
  495. {
  496. ENTER();
  497. memcpy (my_trans_list, member_list, member_list_entries *
  498. sizeof (unsigned int));
  499. my_trans_list_entries = member_list_entries;
  500. }
  501. void sync_v2_abort (void)
  502. {
  503. ENTER();
  504. if (my_state == SYNC_PROCESS) {
  505. schedwrk_destroy (my_schedwrk_handle);
  506. my_service_list[my_processing_idx].sync_abort ();
  507. }
  508. /* this will cause any "old" barrier messages from causing
  509. * problems.
  510. */
  511. memset (&my_ring_id, 0, sizeof (struct memb_ring_id));
  512. }
  513. void sync_v2_memb_list_determine (const struct memb_ring_id *ring_id)
  514. {
  515. ENTER();
  516. memcpy (&my_memb_determine_ring_id, ring_id,
  517. sizeof (struct memb_ring_id));
  518. memb_determine_message_transmit ();
  519. }
  520. void sync_v2_memb_list_abort (void)
  521. {
  522. ENTER();
  523. my_memb_determine_list_entries = 0;
  524. memset (&my_memb_determine_ring_id, 0, sizeof (struct memb_ring_id));
  525. }