syncv2.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623
  1. /*
  2. * Copyright (c) 2009 Red Hat, Inc.
  3. *
  4. * All rights reserved.
  5. *
  6. * Author: Steven Dake (sdake@redhat.com)
  7. *
  8. * This software licensed under BSD license, the text of which follows:
  9. *
  10. * Redistribution and use in source and binary forms, with or without
  11. * modification, are permitted provided that the following conditions are met:
  12. *
  13. * - Redistributions of source code must retain the above copyright notice,
  14. * this list of conditions and the following disclaimer.
  15. * - Redistributions in binary form must reproduce the above copyright notice,
  16. * this list of conditions and the following disclaimer in the documentation
  17. * and/or other materials provided with the distribution.
  18. * - Neither the name of the MontaVista Software, Inc. nor the names of its
  19. * contributors may be used to endorse or promote products derived from this
  20. * software without specific prior written permission.
  21. *
  22. * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
  23. * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
  24. * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
  25. * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
  26. * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
  27. * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
  28. * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
  29. * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
  30. * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
  31. * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
  32. * THE POSSIBILITY OF SUCH DAMAGE.
  33. */
  34. #include <config.h>
  35. #include <sys/types.h>
  36. #include <sys/socket.h>
  37. #include <sys/un.h>
  38. #include <sys/ioctl.h>
  39. #include <netinet/in.h>
  40. #include <sys/uio.h>
  41. #include <unistd.h>
  42. #include <fcntl.h>
  43. #include <stdlib.h>
  44. #include <stdio.h>
  45. #include <errno.h>
  46. #include <time.h>
  47. #include <unistd.h>
  48. #include <netinet/in.h>
  49. #include <arpa/inet.h>
  50. #include <corosync/corotypes.h>
  51. #include <corosync/swab.h>
  52. #include <corosync/totem/totempg.h>
  53. #include <corosync/totem/totem.h>
  54. #include <corosync/logsys.h>
  55. #include <qb/qbipc_common.h>
  56. #include "schedwrk.h"
  57. #include "quorum.h"
  58. #include "sync.h"
  59. #include "syncv2.h"
  60. LOGSYS_DECLARE_SUBSYS ("SYNCV2");
  61. #define MESSAGE_REQ_SYNC_BARRIER 0
  62. #define MESSAGE_REQ_SYNC_SERVICE_BUILD 1
  63. #define MESSAGE_REQ_SYNC_MEMB_DETERMINE 2
  64. enum sync_process_state {
  65. INIT,
  66. PROCESS,
  67. ACTIVATE
  68. };
  69. enum sync_state {
  70. SYNC_SERVICELIST_BUILD,
  71. SYNC_PROCESS,
  72. SYNC_BARRIER
  73. };
  74. struct service_entry {
  75. int service_id;
  76. int api_version;
  77. union sync_init_api sync_init_api;
  78. void (*sync_abort) (void);
  79. int (*sync_process) (void);
  80. void (*sync_activate) (void);
  81. enum sync_process_state state;
  82. char name[128];
  83. };
  84. struct processor_entry {
  85. int nodeid;
  86. int received;
  87. };
  88. struct req_exec_memb_determine_message {
  89. struct qb_ipc_request_header header __attribute__((aligned(8)));
  90. struct memb_ring_id ring_id __attribute__((aligned(8)));
  91. };
  92. struct req_exec_service_build_message {
  93. struct qb_ipc_request_header header __attribute__((aligned(8)));
  94. struct memb_ring_id ring_id __attribute__((aligned(8)));
  95. int service_list_entries __attribute__((aligned(8)));
  96. int service_list[128] __attribute__((aligned(8)));
  97. };
  98. struct req_exec_barrier_message {
  99. struct qb_ipc_request_header header __attribute__((aligned(8)));
  100. struct memb_ring_id ring_id __attribute__((aligned(8)));
  101. };
  102. static enum sync_state my_state = SYNC_BARRIER;
  103. static struct memb_ring_id my_ring_id;
  104. static struct memb_ring_id my_memb_determine_ring_id;
  105. static int my_memb_determine = 0;
  106. static unsigned int my_memb_determine_list[PROCESSOR_COUNT_MAX];
  107. static unsigned int my_memb_determine_list_entries = 0;
  108. static int my_processing_idx = 0;
  109. static hdb_handle_t my_schedwrk_handle;
  110. static struct processor_entry my_processor_list[PROCESSOR_COUNT_MAX];
  111. static unsigned int my_member_list[PROCESSOR_COUNT_MAX];
  112. static unsigned int my_trans_list[PROCESSOR_COUNT_MAX];
  113. static size_t my_member_list_entries = 0;
  114. static size_t my_trans_list_entries = 0;
  115. static int my_processor_list_entries = 0;
  116. static struct service_entry my_service_list[128];
  117. static int my_service_list_entries = 0;
  118. static const struct memb_ring_id sync_ring_id;
  119. static struct service_entry my_initial_service_list[PROCESSOR_COUNT_MAX];
  120. static int my_initial_service_list_entries;
  121. static void (*sync_synchronization_completed) (void);
  122. static void sync_deliver_fn (
  123. unsigned int nodeid,
  124. const void *msg,
  125. unsigned int msg_len,
  126. int endian_conversion_required);
  127. static int schedwrk_processor (const void *context);
  128. static void sync_process_enter (void);
  129. static struct totempg_group sync_group = {
  130. .group = "syncv2",
  131. .group_len = 6
  132. };
  133. static void *sync_group_handle;
  134. int sync_v2_init (
  135. int (*sync_callbacks_retrieve) (
  136. int service_id,
  137. struct sync_callbacks *callbacks),
  138. void (*synchronization_completed) (void))
  139. {
  140. unsigned int res;
  141. int i;
  142. struct sync_callbacks sync_callbacks;
  143. res = totempg_groups_initialize (
  144. &sync_group_handle,
  145. sync_deliver_fn,
  146. NULL);
  147. if (res == -1) {
  148. log_printf (LOGSYS_LEVEL_ERROR,
  149. "Couldn't initialize groups interface.");
  150. return (-1);
  151. }
  152. res = totempg_groups_join (
  153. sync_group_handle,
  154. &sync_group,
  155. 1);
  156. if (res == -1) {
  157. log_printf (LOGSYS_LEVEL_ERROR, "Couldn't join group.\n");
  158. return (-1);
  159. }
  160. sync_synchronization_completed = synchronization_completed;
  161. for (i = 0; i < 64; i++) {
  162. res = sync_callbacks_retrieve (i, &sync_callbacks);
  163. if (res == -1) {
  164. continue;
  165. }
  166. if (sync_callbacks.sync_init_api.sync_init_v1 == NULL) {
  167. continue;
  168. }
  169. my_initial_service_list[my_initial_service_list_entries].state =
  170. INIT;
  171. my_initial_service_list[my_initial_service_list_entries].service_id = i;
  172. strcpy (my_initial_service_list[my_initial_service_list_entries].name,
  173. sync_callbacks.name);
  174. my_initial_service_list[my_initial_service_list_entries].api_version = sync_callbacks.api_version;
  175. my_initial_service_list[my_initial_service_list_entries].sync_init_api = sync_callbacks.sync_init_api;
  176. my_initial_service_list[my_initial_service_list_entries].sync_process = sync_callbacks.sync_process;
  177. my_initial_service_list[my_initial_service_list_entries].sync_abort = sync_callbacks.sync_abort;
  178. my_initial_service_list[my_initial_service_list_entries].sync_activate = sync_callbacks.sync_activate;
  179. my_initial_service_list_entries += 1;
  180. }
  181. return (0);
  182. }
  183. static void sync_barrier_handler (unsigned int nodeid, const void *msg)
  184. {
  185. const struct req_exec_barrier_message *req_exec_barrier_message = msg;
  186. int i;
  187. int barrier_reached = 1;
  188. if (memcmp (&my_ring_id, &req_exec_barrier_message->ring_id,
  189. sizeof (struct memb_ring_id)) != 0) {
  190. log_printf (LOGSYS_LEVEL_DEBUG, "barrier for old ring - discarding\n");
  191. return;
  192. }
  193. for (i = 0; i < my_processor_list_entries; i++) {
  194. if (my_processor_list[i].nodeid == nodeid) {
  195. my_processor_list[i].received = 1;
  196. }
  197. }
  198. for (i = 0; i < my_processor_list_entries; i++) {
  199. if (my_processor_list[i].received == 0) {
  200. barrier_reached = 0;
  201. }
  202. }
  203. if (barrier_reached) {
  204. log_printf (LOGSYS_LEVEL_DEBUG, "Committing synchronization for %s\n",
  205. my_service_list[my_processing_idx].name);
  206. my_service_list[my_processing_idx].state = ACTIVATE;
  207. my_service_list[my_processing_idx].sync_activate ();
  208. my_processing_idx += 1;
  209. if (my_service_list_entries == my_processing_idx) {
  210. my_memb_determine_list_entries = 0;
  211. sync_synchronization_completed ();
  212. } else {
  213. sync_process_enter ();
  214. }
  215. }
  216. }
  217. static void dummy_sync_init (
  218. const unsigned int *member_list,
  219. size_t member_list_entries,
  220. const struct memb_ring_id *ring_id)
  221. {
  222. }
  223. static void dummy_sync_abort (void)
  224. {
  225. }
  226. static int dummy_sync_process (void)
  227. {
  228. return (0);
  229. }
  230. static void dummy_sync_activate (void)
  231. {
  232. }
  233. static int service_entry_compare (const void *a, const void *b)
  234. {
  235. const struct service_entry *service_entry_a = a;
  236. const struct service_entry *service_entry_b = b;
  237. return (service_entry_a->service_id > service_entry_b->service_id);
  238. }
  239. static void sync_memb_determine (unsigned int nodeid, const void *msg)
  240. {
  241. const struct req_exec_memb_determine_message *req_exec_memb_determine_message = msg;
  242. int found = 0;
  243. int i;
  244. if (memcmp (&req_exec_memb_determine_message->ring_id,
  245. &my_memb_determine_ring_id, sizeof (struct memb_ring_id)) != 0) {
  246. log_printf (LOGSYS_LEVEL_DEBUG, "memb determine for old ring - discarding\n");
  247. return;
  248. }
  249. my_memb_determine = 1;
  250. for (i = 0; i < my_memb_determine_list_entries; i++) {
  251. if (my_memb_determine_list[i] == nodeid) {
  252. found = 1;
  253. }
  254. }
  255. if (found == 0) {
  256. my_memb_determine_list[my_memb_determine_list_entries] = nodeid;
  257. my_memb_determine_list_entries += 1;
  258. }
  259. }
  260. static void sync_service_build_handler (unsigned int nodeid, const void *msg)
  261. {
  262. const struct req_exec_service_build_message *req_exec_service_build_message = msg;
  263. int i, j;
  264. int barrier_reached = 1;
  265. int found;
  266. int qsort_trigger = 0;
  267. if (memcmp (&my_ring_id, &req_exec_service_build_message->ring_id,
  268. sizeof (struct memb_ring_id)) != 0) {
  269. log_printf (LOGSYS_LEVEL_DEBUG, "service build for old ring - discarding\n");
  270. return;
  271. }
  272. for (i = 0; i < req_exec_service_build_message->service_list_entries; i++) {
  273. found = 0;
  274. for (j = 0; j < my_service_list_entries; j++) {
  275. if (req_exec_service_build_message->service_list[i] ==
  276. my_service_list[j].service_id) {
  277. found = 1;
  278. break;
  279. }
  280. }
  281. if (found == 0) {
  282. my_service_list[my_service_list_entries].state =
  283. INIT;
  284. my_service_list[my_service_list_entries].service_id =
  285. req_exec_service_build_message->service_list[i];
  286. sprintf (my_service_list[my_service_list_entries].name,
  287. "External Service (id = %d)\n",
  288. req_exec_service_build_message->service_list[i]);
  289. my_service_list[my_service_list_entries].api_version = 1;
  290. my_service_list[my_service_list_entries].sync_init_api.sync_init_v1 =
  291. dummy_sync_init;
  292. my_service_list[my_service_list_entries].sync_abort =
  293. dummy_sync_abort;
  294. my_service_list[my_service_list_entries].sync_process =
  295. dummy_sync_process;
  296. my_service_list[my_service_list_entries].sync_activate =
  297. dummy_sync_activate;
  298. my_service_list_entries += 1;
  299. qsort_trigger = 1;
  300. }
  301. }
  302. if (qsort_trigger) {
  303. qsort (my_service_list, my_service_list_entries,
  304. sizeof (struct service_entry), service_entry_compare);
  305. }
  306. for (i = 0; i < my_processor_list_entries; i++) {
  307. if (my_processor_list[i].nodeid == nodeid) {
  308. my_processor_list[i].received = 1;
  309. }
  310. }
  311. for (i = 0; i < my_processor_list_entries; i++) {
  312. if (my_processor_list[i].received == 0) {
  313. barrier_reached = 0;
  314. }
  315. }
  316. if (barrier_reached) {
  317. sync_process_enter ();
  318. }
  319. }
  320. static void sync_deliver_fn (
  321. unsigned int nodeid,
  322. const void *msg,
  323. unsigned int msg_len,
  324. int endian_conversion_required)
  325. {
  326. struct qb_ipc_request_header *header = (struct qb_ipc_request_header *)msg;
  327. switch (header->id) {
  328. case MESSAGE_REQ_SYNC_BARRIER:
  329. sync_barrier_handler (nodeid, msg);
  330. break;
  331. case MESSAGE_REQ_SYNC_SERVICE_BUILD:
  332. sync_service_build_handler (nodeid, msg);
  333. break;
  334. case MESSAGE_REQ_SYNC_MEMB_DETERMINE:
  335. sync_memb_determine (nodeid, msg);
  336. break;
  337. }
  338. }
  339. static void memb_determine_message_transmit (void)
  340. {
  341. struct iovec iovec;
  342. struct req_exec_memb_determine_message req_exec_memb_determine_message;
  343. req_exec_memb_determine_message.header.size = sizeof (struct req_exec_memb_determine_message);
  344. req_exec_memb_determine_message.header.id = MESSAGE_REQ_SYNC_MEMB_DETERMINE;
  345. memcpy (&req_exec_memb_determine_message.ring_id,
  346. &my_memb_determine_ring_id,
  347. sizeof (struct memb_ring_id));
  348. iovec.iov_base = (char *)&req_exec_memb_determine_message;
  349. iovec.iov_len = sizeof (req_exec_memb_determine_message);
  350. (void)totempg_groups_mcast_joined (sync_group_handle,
  351. &iovec, 1, TOTEMPG_AGREED);
  352. }
  353. static void barrier_message_transmit (void)
  354. {
  355. struct iovec iovec;
  356. struct req_exec_barrier_message req_exec_barrier_message;
  357. req_exec_barrier_message.header.size = sizeof (struct req_exec_barrier_message);
  358. req_exec_barrier_message.header.id = MESSAGE_REQ_SYNC_BARRIER;
  359. memcpy (&req_exec_barrier_message.ring_id, &my_ring_id,
  360. sizeof (struct memb_ring_id));
  361. iovec.iov_base = (char *)&req_exec_barrier_message;
  362. iovec.iov_len = sizeof (req_exec_barrier_message);
  363. (void)totempg_groups_mcast_joined (sync_group_handle,
  364. &iovec, 1, TOTEMPG_AGREED);
  365. }
  366. static void service_build_message_transmit (struct req_exec_service_build_message *service_build_message)
  367. {
  368. struct iovec iovec;
  369. service_build_message->header.size = sizeof (struct req_exec_service_build_message);
  370. service_build_message->header.id = MESSAGE_REQ_SYNC_SERVICE_BUILD;
  371. memcpy (&service_build_message->ring_id, &my_ring_id,
  372. sizeof (struct memb_ring_id));
  373. iovec.iov_base = (void *)service_build_message;
  374. iovec.iov_len = sizeof (struct req_exec_service_build_message);
  375. (void)totempg_groups_mcast_joined (sync_group_handle,
  376. &iovec, 1, TOTEMPG_AGREED);
  377. }
  378. static void sync_barrier_enter (void)
  379. {
  380. my_state = SYNC_BARRIER;
  381. barrier_message_transmit ();
  382. }
  383. static void sync_process_enter (void)
  384. {
  385. int i;
  386. my_state = SYNC_PROCESS;
  387. /*
  388. * No syncv2 services
  389. */
  390. if (my_service_list_entries == 0) {
  391. my_state = SYNC_SERVICELIST_BUILD;
  392. my_memb_determine_list_entries = 0;
  393. sync_synchronization_completed ();
  394. return;
  395. }
  396. for (i = 0; i < my_processor_list_entries; i++) {
  397. my_processor_list[i].received = 0;
  398. }
  399. schedwrk_create (&my_schedwrk_handle,
  400. schedwrk_processor,
  401. NULL);
  402. }
  403. static void sync_servicelist_build_enter (
  404. const unsigned int *member_list,
  405. size_t member_list_entries,
  406. const struct memb_ring_id *ring_id)
  407. {
  408. struct req_exec_service_build_message service_build;
  409. int i;
  410. my_state = SYNC_SERVICELIST_BUILD;
  411. for (i = 0; i < member_list_entries; i++) {
  412. my_processor_list[i].nodeid = member_list[i];
  413. my_processor_list[i].received = 0;
  414. }
  415. my_processor_list_entries = member_list_entries;
  416. memcpy (my_member_list, member_list,
  417. member_list_entries * sizeof (unsigned int));
  418. my_member_list_entries = member_list_entries;
  419. my_processing_idx = 0;
  420. memcpy (my_service_list, my_initial_service_list,
  421. sizeof (struct service_entry) *
  422. my_initial_service_list_entries);
  423. my_service_list_entries = my_initial_service_list_entries;
  424. for (i = 0; i < my_initial_service_list[i].service_id; i++) {
  425. service_build.service_list[i] =
  426. my_initial_service_list[i].service_id;
  427. }
  428. service_build.service_list_entries = i;
  429. service_build_message_transmit (&service_build);
  430. }
  431. static int schedwrk_processor (const void *context)
  432. {
  433. int res = 0;
  434. if (my_service_list[my_processing_idx].state == INIT) {
  435. my_service_list[my_processing_idx].state = PROCESS;
  436. if (my_service_list[my_processing_idx].api_version == 1) {
  437. my_service_list[my_processing_idx].sync_init_api.sync_init_v1 (my_member_list,
  438. my_member_list_entries,
  439. &my_ring_id);
  440. } else {
  441. unsigned int old_trans_list[PROCESSOR_COUNT_MAX];
  442. size_t old_trans_list_entries = 0;
  443. int o, m;
  444. memcpy (old_trans_list, my_trans_list, my_trans_list_entries *
  445. sizeof (unsigned int));
  446. old_trans_list_entries = my_trans_list_entries;
  447. my_trans_list_entries = 0;
  448. for (o = 0; o < old_trans_list_entries; o++) {
  449. for (m = 0; m < my_member_list_entries; m++) {
  450. if (old_trans_list[o] == my_member_list[m]) {
  451. my_trans_list[my_trans_list_entries] = my_member_list[m];
  452. my_trans_list_entries++;
  453. break;
  454. }
  455. }
  456. }
  457. my_service_list[my_processing_idx].sync_init_api.sync_init_v2 (my_trans_list,
  458. my_trans_list_entries, my_member_list,
  459. my_member_list_entries,
  460. &my_ring_id);
  461. }
  462. }
  463. if (my_service_list[my_processing_idx].state == PROCESS) {
  464. my_service_list[my_processing_idx].state = PROCESS;
  465. res = my_service_list[my_processing_idx].sync_process ();
  466. if (res == 0) {
  467. sync_barrier_enter();
  468. } else {
  469. return (-1);
  470. }
  471. }
  472. return (0);
  473. }
  474. void sync_v2_start (
  475. const unsigned int *member_list,
  476. size_t member_list_entries,
  477. const struct memb_ring_id *ring_id)
  478. {
  479. ENTER();
  480. memcpy (&my_ring_id, ring_id, sizeof (struct memb_ring_id));
  481. if (my_memb_determine) {
  482. my_memb_determine = 0;
  483. sync_servicelist_build_enter (my_memb_determine_list,
  484. my_memb_determine_list_entries, ring_id);
  485. } else {
  486. sync_servicelist_build_enter (member_list, member_list_entries,
  487. ring_id);
  488. }
  489. }
  490. void sync_v2_save_transitional (
  491. const unsigned int *member_list,
  492. size_t member_list_entries,
  493. const struct memb_ring_id *ring_id)
  494. {
  495. ENTER();
  496. memcpy (my_trans_list, member_list, member_list_entries *
  497. sizeof (unsigned int));
  498. my_trans_list_entries = member_list_entries;
  499. }
  500. void sync_v2_abort (void)
  501. {
  502. ENTER();
  503. if (my_state == SYNC_PROCESS) {
  504. schedwrk_destroy (my_schedwrk_handle);
  505. my_service_list[my_processing_idx].sync_abort ();
  506. }
  507. /* this will cause any "old" barrier messages from causing
  508. * problems.
  509. */
  510. memset (&my_ring_id, 0, sizeof (struct memb_ring_id));
  511. }
  512. void sync_v2_memb_list_determine (const struct memb_ring_id *ring_id)
  513. {
  514. ENTER();
  515. memcpy (&my_memb_determine_ring_id, ring_id,
  516. sizeof (struct memb_ring_id));
  517. memb_determine_message_transmit ();
  518. }
  519. void sync_v2_memb_list_abort (void)
  520. {
  521. ENTER();
  522. my_memb_determine_list_entries = 0;
  523. memset (&my_memb_determine_ring_id, 0, sizeof (struct memb_ring_id));
  524. }