amfsg.c 50 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833
  1. /** @file amfsg.c
  2. *
  3. * Copyright (c) 2002-2006 MontaVista Software, Inc.
  4. * Author: Steven Dake (sdake@mvista.com)
  5. *
  6. * Copyright (c) 2006 Ericsson AB.
  7. * Author: Hans Feldt, Anders Eriksson, Lars Holm
  8. * - Introduced AMF B.02 information model
  9. * - Use DN in API and multicast messages
  10. * - (Re-)Introduction of event based multicast messages
  11. * - Refactoring of code into several AMF files
  12. * - Component/SU restart, SU failover
  13. * - Constructors/destructors
  14. * - Serializers/deserializers
  15. *
  16. * All rights reserved.
  17. *
  18. *
  19. * This software licensed under BSD license, the text of which follows:
  20. *
  21. * Redistribution and use in source and binary forms, with or without
  22. * modification, are permitted provided that the following conditions are met:
  23. *
  24. * - Redistributions of source code must retain the above copyright notice,
  25. * this list of conditions and the following disclaimer.
  26. * - Redistributions in binary form must reproduce the above copyright notice,
  27. * this list of conditions and the following disclaimer in the documentation
  28. * and/or other materials provided with the distribution.
  29. * - Neither the name of the MontaVista Software, Inc. nor the names of its
  30. * contributors may be used to endorse or promote products derived from this
  31. * software without specific prior written permission.
  32. *
  33. * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
  34. * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
  35. * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
  36. * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
  37. * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
  38. * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
  39. * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
  40. * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
  41. * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
  42. * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
  43. * THE POSSIBILITY OF SUCH DAMAGE.
  44. *
  45. * AMF Service Group Class Implementation
  46. *
  47. * This file contains functions for handling AMF-service groups(SGs). It can be
  48. * viewed as the implementation of the AMF Service Group class (called SG)
  49. * as described in SAI-Overview-B.02.01. The SA Forum specification
  50. * SAI-AIS-AMF-B.02.01 has been used as specification of the behaviour
  51. * and is referred to as 'the spec' below.
  52. *
  53. * The functions in this file are responsible for:
  54. * -on request start the service group by instantiating the contained SUs
  55. * -on request assign the service instances it protects to the in-service
  56. * service units it contains respecting as many as possible of the configured
  57. * requirements for the group
  58. * -create and delete an SI-assignment object for each relation between
  59. * an SI and an SU
  60. * -order each contained SU to create and delete CSI-assignments
  61. * -request the Service Instance class (SI) to execute the transfer of the
  62. * HA-state set/remove requests to each component involved
  63. * -fully control the execution of component failover and SU failover
  64. * -on request control the execution of the initial steps of node switchover
  65. * and node failover
  66. * -fully handle the auto adjust procedure
  67. *
  68. * Currently only the 'n+m' redundancy model is implemented. It is the
  69. * ambition to identify n+m specific variables and functions and add the suffix
  70. * '_nplusm' to them so that they can be easily recognized.
  71. *
  72. * When SG is requested to assign workload to all SUs or all SUs hosted on
  73. * a specific node, a procedure containing several steps is executed:
  74. * <1> An algorithm is executed which assigns SIs to SUs respecting the rules
  75. * that has been configured for SG. The algorithm also has to consider
  76. * if assignments between som SIs and SUs already exist. The scope of this
  77. * algorithm is to create SI-assignments and set up requested HA-state for
  78. * each assignment but not to transfer those HA-states to the components.
  79. * <2> All SI-assignments with a requested HA state == ACTIVE are transferred
  80. * to the components concerned before any STANDBY assignments are
  81. * transferred. All components have to acknowledge the setting of the
  82. * ACTIVE HA state before the transfer of any STANDBY assignment is
  83. * initiated.
  84. * <3> All active assignments can not be transferred at the same time to the
  85. * different components because the rules for dependencies between SI and
  86. * SI application wide and CSI and CSI within one SI, has to be respected.
  87. *
  88. * SG is fully responsible for step <1> but not fully responsible for handling
  89. * step <2> and <3>. However, SG uses an attribute called 'dependency level'
  90. * when requsted to assign workload. This parameter refers to an integer that
  91. * has been calculated initially for each SI. The 'dependency level' indicates
  92. * to which extent an SI depends on other SIs such that an SI that depends on
  93. * no other SI is on dependecy_level == 1, an SI that depends only on an SI on
  94. * dependency_level == 1 is on dependency-level == 2.
  95. * An SI that depends on several SIs gets a
  96. * dependency_level that is one unit higher than the SI with the highest
  97. * dependency_level it depends on. When SG is requested to assign the workload
  98. * on a certain dependency level, it requests all SI objects on that level to
  99. * activate (all) SI-assignments that during step <1> has been requested to
  100. * assume the active HA state.
  101. *
  102. * SG contains the following state machines:
  103. * - administrative state machine (ADSM) (NOT IN THIS RELEASE)
  104. * - availability control state machine (ACSM)
  105. *
  106. * The availability control state machine contains two states and one of them
  107. * is composite. Being a composite state means that it contains substates.
  108. * The states are:
  109. * - IDLE (non composite state)
  110. * - MANAGING_SG (composite state)
  111. * MANAGING_SG is entered at several different events which has in common
  112. * the need to set up or change the assignment of SIs to SUs. Only one such
  113. * event can be handled at the time. If new events occur while one event is
  114. * being handled then the new event is saved and will be handled after the
  115. * handling of the first event is ready (return to IDLE state has been done).
  116. * MANAGING_SG handles the following events:
  117. * - start (requests SG to order SU to instantiate all SUs in SG and waits
  118. * for SU to indicate presence state change reports from the SUs and
  119. * finally responds 'started' to the requester)
  120. * - assign (requests SG to assign SIs to SUs according to pre-configured
  121. * rules (if not already done) and transfer the HA state of
  122. * the SIs on the requested SI dependency level. Then SG waits for
  123. * confirmation that the HA state has been succesfully set and
  124. * finally responds 'assigned' to the reqeuster)
  125. * - auto_adjust (this event indicates that the auto-adjust probation timer has
  126. * expired and that SG should evaluate current assignments of
  127. * SIs to SUs and if needed remove current assignments and
  128. * create new according to what is specified in paragraph
  129. * 3.7.1.2)
  130. * - failover_comp (requests SG to failover a specific component according to
  131. * the procedure described in paragraph 3.12.1.3)
  132. * - failover_su (requests SG to failover a specific SU according to the
  133. * procedure described in paragraph 3.12.1.3 and 3.12.1.4)
  134. * - switchover_node (requests SG to execute the recovery actions described
  135. * in 3.12.1.3 and respond to the requester when recovery
  136. * is completed)
  137. * - failover_node (requests SG to execute the recovery actions described
  138. * in 3.12.1.3 and respond to the requester when recovery is
  139. * completed)
  140. *
  141. */
  142. #include <stdlib.h>
  143. #include <errno.h>
  144. #include "amf.h"
  145. #include "print.h"
  146. #include "main.h"
  147. #include "util.h"
  148. static void acsm_enter_activating_standby (struct amf_sg *sg);
  149. static void delete_si_assignments_in_scope (struct amf_sg *sg);
  150. static void acsm_enter_repairing_su (struct amf_sg *sg);
  151. static void standby_su_activated_cbfn (
  152. struct amf_si_assignment *si_assignment, int result);
  153. static void dependent_si_deactivated_cbfn (
  154. struct amf_si_assignment *si_assignment, int result);
  155. static const char *sg_event_type_text[] = {
  156. "Unknown",
  157. "Failover su",
  158. "Failover node",
  159. "Failover comp",
  160. "Switchover node",
  161. "Start",
  162. "Autoadjust",
  163. "Assign si"
  164. };
  165. typedef struct sg_event {
  166. amf_sg_event_type_t event_type;
  167. amf_sg_t *sg;
  168. amf_su_t *su;
  169. amf_comp_t *comp;
  170. amf_node_t *node;
  171. } sg_event_t;
  172. static void sg_set_event (amf_sg_event_type_t sg_event_type,
  173. amf_sg_t *sg, amf_su_t *su, amf_comp_t *comp, amf_node_t * node,
  174. sg_event_t *sg_event)
  175. {
  176. sg_event->event_type = sg_event_type;
  177. sg_event->node = node;
  178. sg_event->su = su;
  179. sg_event->comp = comp;
  180. sg_event->sg = sg;
  181. }
  182. static void sg_defer_event (amf_sg_event_type_t event_type,
  183. sg_event_t *sg_event)
  184. {
  185. amf_fifo_put (event_type, &sg_event->sg->deferred_events,
  186. sizeof (sg_event_t),
  187. sg_event);
  188. }
  189. static void sg_recall_deferred_events (amf_sg_t *sg)
  190. {
  191. sg_event_t sg_event;
  192. ENTER ("");
  193. if (amf_fifo_get (&sg->deferred_events, &sg_event)) {
  194. switch (sg_event.event_type) {
  195. case SG_FAILOVER_SU_EV:
  196. amf_sg_failover_su_req (sg_event.sg,
  197. sg_event.su, sg_event.node);
  198. break;
  199. case SG_FAILOVER_NODE_EV:
  200. amf_sg_failover_node_req (sg_event.sg,
  201. sg_event.node);
  202. break;
  203. case SG_FAILOVER_COMP_EV:
  204. case SG_SWITCH_OVER_NODE_EV:
  205. case SG_START_EV:
  206. case SG_AUTO_ADJUST_EV:
  207. default:
  208. break;
  209. }
  210. }
  211. }
  212. static void timer_function_sg_recall_deferred_events (void *data)
  213. {
  214. amf_sg_t *sg = (amf_sg_t*)data;
  215. ENTER ("");
  216. sg_recall_deferred_events (sg);
  217. }
  218. static void sg_enter_idle (amf_sg_t *sg)
  219. {
  220. SaNameT dn;
  221. ENTER ("sg: %s state: %d", sg->name.value, sg->avail_state);
  222. sg->avail_state = SG_AC_Idle;
  223. if (sg->recovery_scope.event_type != 0) {
  224. switch (sg->recovery_scope.event_type) {
  225. case SG_FAILOVER_SU_EV:
  226. assert (sg->recovery_scope.sus[0] != NULL);
  227. amf_su_dn_make (sg->recovery_scope.sus[0], &dn);
  228. log_printf (
  229. LOG_NOTICE,
  230. "'%s' %s recovery action finished",
  231. dn.value,
  232. sg_event_type_text[sg->recovery_scope.event_type]);
  233. break;
  234. case SG_FAILOVER_NODE_EV:
  235. amf_node_sg_failed_over (
  236. sg->recovery_scope.node, sg);
  237. log_printf (
  238. LOG_NOTICE,
  239. "'%s for %s' recovery action finished",
  240. sg_event_type_text[sg->recovery_scope.event_type],
  241. sg->name.value);
  242. break;
  243. case SG_START_EV:
  244. amf_application_sg_started (sg->application,
  245. sg, this_amf_node);
  246. break;
  247. default:
  248. log_printf (
  249. LOG_NOTICE,
  250. "'%s' recovery action finished",
  251. sg_event_type_text[0]);
  252. break;
  253. }
  254. }
  255. if (sg->recovery_scope.sus != NULL) {
  256. free ((void *)sg->recovery_scope.sus);
  257. }
  258. if (sg->recovery_scope.sis != NULL) {
  259. free ((void *)sg->recovery_scope.sis);
  260. }
  261. memset (&sg->recovery_scope, 0, sizeof (struct sg_recovery_scope));
  262. sg->node_to_start = NULL;
  263. amf_call_function_asynchronous (
  264. timer_function_sg_recall_deferred_events, sg);
  265. }
  266. static int su_instantiated_count (struct amf_sg *sg)
  267. {
  268. int cnt = 0;
  269. struct amf_su *su;
  270. for (su = sg->su_head; su != NULL; su = su->next) {
  271. if (su->saAmfSUPresenceState == SA_AMF_PRESENCE_INSTANTIATED)
  272. cnt++;
  273. }
  274. return cnt;
  275. }
  276. static int has_any_su_in_scope_active_workload (struct amf_sg *sg)
  277. {
  278. struct amf_su **sus= sg->recovery_scope.sus;
  279. struct amf_si_assignment *si_assignment;
  280. while (*sus != NULL) {
  281. si_assignment = amf_su_get_next_si_assignment (*sus, NULL);
  282. while (si_assignment != NULL) {
  283. if (si_assignment->saAmfSISUHAState !=
  284. SA_AMF_HA_ACTIVE) {
  285. break;
  286. }
  287. si_assignment = amf_su_get_next_si_assignment (
  288. *sus, si_assignment);
  289. }
  290. if (si_assignment != NULL) {
  291. break;
  292. }
  293. sus++;
  294. }
  295. return(*sus == NULL);
  296. }
  297. static int is_standby_for_non_active_si_in_scope (struct amf_sg *sg)
  298. {
  299. struct amf_si **sis= sg->recovery_scope.sis;
  300. struct amf_si_assignment *si_assignment;
  301. /*
  302. * Check if there is any si in the scope which has no active assignment
  303. * and at least one standby assignment.
  304. */
  305. while (*sis != NULL) {
  306. si_assignment = (*sis)->assigned_sis;
  307. while (si_assignment != NULL) {
  308. if (si_assignment->saAmfSISUHAState ==
  309. SA_AMF_HA_ACTIVE) {
  310. break;
  311. }
  312. si_assignment = si_assignment->next;
  313. }
  314. if (si_assignment == NULL) {
  315. /* There is no ACTIVE assignment ..*/
  316. si_assignment = (*sis)->assigned_sis;
  317. while (si_assignment != NULL) {
  318. if (si_assignment->saAmfSISUHAState ==
  319. SA_AMF_HA_STANDBY) {
  320. break;
  321. }
  322. si_assignment = si_assignment->next;
  323. }
  324. if (si_assignment != NULL) {
  325. /* .. and one STANDBY assignment*/
  326. break;
  327. }
  328. }
  329. sis++;
  330. }
  331. return(*sis != NULL);
  332. }
  333. static void acsm_enter_terminating_suspected (struct amf_sg *sg)
  334. {
  335. struct amf_su **sus= sg->recovery_scope.sus;
  336. sg->avail_state = SG_AC_TerminatingSuspected;
  337. /*
  338. * Terminate suspected SU(s)
  339. */
  340. while (*sus != 0) {
  341. amf_su_terminate (*sus);
  342. sus++;
  343. }
  344. }
  345. static inline int su_presense_state_is_ored (amf_su_t *su,
  346. SaAmfPresenceStateT state1,SaAmfPresenceStateT state2,
  347. SaAmfPresenceStateT state3)
  348. {
  349. return(su->saAmfSUPresenceState == state1 || su->saAmfSUPresenceState ==
  350. state2 || su->saAmfSUPresenceState == state3) ? 1 : 0;
  351. }
  352. static inline int su_presense_state_is_not (amf_su_t *su,
  353. SaAmfPresenceStateT state1,SaAmfPresenceStateT state2,
  354. SaAmfPresenceStateT state3)
  355. {
  356. return(su->saAmfSUPresenceState != state1 && su->saAmfSUPresenceState !=
  357. state2 && su->saAmfSUPresenceState != state3) ? 1 : 0;
  358. }
  359. /**
  360. * Callback function used by SI when there is no dependent SI to
  361. * deactivate.
  362. * @param sg
  363. */
  364. static void dependent_si_deactivated_cbfn2 (struct amf_sg *sg)
  365. {
  366. struct amf_su **sus = sg->recovery_scope.sus;
  367. ENTER("'%s'", sg->name.value);
  368. /*
  369. * Select next state depending on if some
  370. * SU in the scope is needs to be terminated.
  371. */
  372. while (*sus != NULL) {
  373. amf_su_t *su = *sus;
  374. ENTER("SU %s pr_state='%d'",su->name.value,
  375. su->saAmfSUPresenceState);
  376. if (su_presense_state_is_ored (su,
  377. SA_AMF_PRESENCE_UNINSTANTIATED,
  378. SA_AMF_PRESENCE_TERMINATION_FAILED,
  379. SA_AMF_PRESENCE_INSTANTIATION_FAILED)) {
  380. sus++;
  381. continue;
  382. }
  383. break;
  384. }
  385. if (*sus != NULL) {
  386. acsm_enter_terminating_suspected (sg);
  387. } else {
  388. delete_si_assignments_in_scope(sg);
  389. acsm_enter_activating_standby (sg);
  390. }
  391. }
  392. static void timer_function_dependent_si_deactivated2 (void *data)
  393. {
  394. ENTER ("");
  395. amf_sg_t *sg = (amf_sg_t *)data;
  396. dependent_si_deactivated_cbfn2 (sg);
  397. }
  398. static struct amf_si *si_get_dependent (struct amf_si *si)
  399. {
  400. struct amf_si *tmp_si = NULL;
  401. ENTER("'%p'",si->depends_on);
  402. if (si->depends_on != NULL) {
  403. if (si->depends_on->name.length < SA_MAX_NAME_LENGTH) {
  404. si->depends_on->name.value[si->depends_on->name.length] = '\0';
  405. }
  406. SaNameT res_arr[2];
  407. int is_match;
  408. is_match = sa_amf_grep ((char*)si->depends_on->name.value,
  409. "safDepend=.*,safSi=(.*),safApp=.*",
  410. 2, res_arr);
  411. if (is_match) {
  412. tmp_si = amf_si_find (si->application,
  413. (char*)res_arr[1].value);
  414. } else {
  415. log_printf (LOG_LEVEL_ERROR, "distinguished name for "
  416. "amf_si_depedency failed\n");
  417. openais_exit_error (AIS_DONE_FATAL_ERR);
  418. }
  419. }
  420. return tmp_si;
  421. }
  422. static struct amf_si *amf_dependent_get_next (struct amf_si *si,
  423. struct amf_si *si_iter)
  424. {
  425. struct amf_si *tmp_si;
  426. struct amf_application *application;
  427. ENTER("");
  428. if (si_iter == NULL) {
  429. assert(amf_cluster != NULL);
  430. application = amf_cluster->application_head;
  431. assert(application != NULL);
  432. tmp_si = application->si_head;
  433. } else {
  434. tmp_si = si_iter->next;
  435. if (tmp_si == NULL) {
  436. application = si->application->next;
  437. if (application == NULL) {
  438. goto out;
  439. }
  440. }
  441. }
  442. for (; tmp_si != NULL; tmp_si = tmp_si->next) {
  443. struct amf_si *depends_on_si = si_get_dependent (tmp_si);
  444. while (depends_on_si != NULL) {
  445. if (depends_on_si == si) {
  446. goto out;
  447. }
  448. depends_on_si = depends_on_si->next;
  449. }
  450. }
  451. out:
  452. return tmp_si;
  453. }
  454. static void acsm_enter_deactivating_dependent_workload (amf_sg_t *sg)
  455. {
  456. struct amf_si **sis= sg->recovery_scope.sis;
  457. struct amf_si_assignment *si_assignment;
  458. int callback_pending = 0;
  459. sg->avail_state = SG_AC_DeactivatingDependantWorkload;
  460. ENTER("'%s'",sg->name.value);
  461. /*
  462. * For each SI in the recovery scope, find all active assignments
  463. * and request them to be deactivated.
  464. */
  465. while (*sis != NULL) {
  466. struct amf_si *dependent_si;
  467. struct amf_si *si = *sis;
  468. si_assignment = si->assigned_sis;
  469. dependent_si = amf_dependent_get_next (si, NULL);
  470. while (dependent_si != NULL) {
  471. si_assignment = dependent_si->assigned_sis;
  472. while (si_assignment != NULL) {
  473. if (si_assignment->saAmfSISUHAState ==
  474. SA_AMF_HA_ACTIVE) {
  475. si_assignment->requested_ha_state =
  476. SA_AMF_HA_QUIESCED;
  477. callback_pending = 1;
  478. amf_si_ha_state_assume (
  479. si_assignment,
  480. dependent_si_deactivated_cbfn);
  481. }
  482. si_assignment = si_assignment->next;
  483. }
  484. dependent_si = amf_dependent_get_next (si, dependent_si);
  485. }
  486. sis++;
  487. }
  488. if (callback_pending == 0) {
  489. static poll_timer_handle dependent_si_deactivated_handle;
  490. ENTER("");
  491. poll_timer_add (aisexec_poll_handle, 0, sg,
  492. timer_function_dependent_si_deactivated2,
  493. &dependent_si_deactivated_handle);
  494. }
  495. }
  496. /**
  497. * Enter function for state SG_AC_ActivatingStandby. It activates
  498. * one STANDBY assignment for each SI in the recovery scope.
  499. * @param sg
  500. */
  501. static void acsm_enter_activating_standby (struct amf_sg *sg)
  502. {
  503. struct amf_si **sis= sg->recovery_scope.sis;
  504. struct amf_si_assignment *si_assignment;
  505. int is_no_standby_activated = 1;
  506. ENTER("'%s'",sg->name.value);
  507. sg->avail_state = SG_AC_ActivatingStandby;
  508. /*
  509. * For each SI in the recovery scope, find one standby
  510. * SI assignment and activate it.
  511. */
  512. while (*sis != NULL) {
  513. si_assignment = (*sis)->assigned_sis;
  514. while (si_assignment != NULL) {
  515. if (si_assignment->saAmfSISUHAState ==
  516. SA_AMF_HA_STANDBY) {
  517. si_assignment->requested_ha_state =
  518. SA_AMF_HA_ACTIVE;
  519. amf_si_ha_state_assume (
  520. si_assignment, standby_su_activated_cbfn);
  521. is_no_standby_activated = 0;
  522. break;
  523. }
  524. si_assignment = si_assignment->next;
  525. }
  526. sis++;
  527. }
  528. if (is_no_standby_activated) {
  529. sg->avail_state = SG_AC_AssigningStandbyToSpare;
  530. acsm_enter_repairing_su (sg);
  531. }
  532. }
  533. static void acsm_enter_repairing_su (struct amf_sg *sg)
  534. {
  535. struct amf_su **sus= sg->recovery_scope.sus;
  536. ENTER("'%s'",sg->name.value);
  537. sg->avail_state = SG_AC_ReparingSu;
  538. int is_any_su_instantiated = 0;
  539. /*
  540. * Instantiate SUs in current recovery scope until the configured
  541. * preference is fulfiled.
  542. */
  543. while (*sus != NULL) {
  544. if (su_instantiated_count ((*sus)->sg) <
  545. (*sus)->sg->saAmfSGNumPrefInserviceSUs) {
  546. struct amf_node *node =
  547. amf_node_find(&((*sus)->saAmfSUHostedByNode));
  548. if (node == NULL) {
  549. log_printf (LOG_LEVEL_ERROR,
  550. "no node to hosted on su found"
  551. "amf_si_depedency failed\n");
  552. openais_exit_error (AIS_DONE_FATAL_ERR);
  553. }
  554. if (node->saAmfNodeOperState ==
  555. SA_AMF_OPERATIONAL_ENABLED) {
  556. /* node is synchronized */
  557. is_any_su_instantiated = 1;
  558. amf_su_instantiate ((*sus));
  559. }
  560. }
  561. sus++;
  562. }
  563. if (is_any_su_instantiated == 0) {
  564. sg_enter_idle (sg);
  565. }
  566. }
  567. /**
  568. * Checks if the si pointed out is already in the scope.
  569. * @param sg
  570. * @param si
  571. */
  572. static int is_si_in_scope(struct amf_sg *sg, struct amf_si *si)
  573. {
  574. struct amf_si **tmp_sis= sg->recovery_scope.sis;
  575. while (*tmp_sis != NULL) {
  576. if (*tmp_sis == si) {
  577. break;
  578. }
  579. tmp_sis++;
  580. }
  581. return(*tmp_sis == si);
  582. }
  583. /**
  584. * Adds the si pointed out to the scope.
  585. * @param sg
  586. * @param si
  587. */
  588. static void add_si_to_scope ( struct amf_sg *sg, struct amf_si *si)
  589. {
  590. int number_of_si = 2; /* It shall be at least two */
  591. struct amf_si **tmp_sis= sg->recovery_scope.sis;
  592. ENTER ("'%s'", si->name.value);
  593. while (*tmp_sis != NULL) {
  594. number_of_si++;
  595. tmp_sis++;
  596. }
  597. sg->recovery_scope.sis = (struct amf_si **)
  598. realloc((void *)sg->recovery_scope.sis,
  599. sizeof (struct amf_si *)*number_of_si);
  600. assert (sg->recovery_scope.sis != NULL);
  601. tmp_sis= sg->recovery_scope.sis;
  602. while (*tmp_sis != NULL) {
  603. tmp_sis++;
  604. }
  605. *tmp_sis = si;
  606. *(++tmp_sis) = NULL;
  607. }
  608. /**
  609. * Adds the ssu pointed out to the scope.
  610. * @param sg
  611. * @param su
  612. */
  613. static void add_su_to_scope (struct amf_sg *sg, struct amf_su *su)
  614. {
  615. int number_of_su = 2; /* It shall be at least two */
  616. struct amf_su **tmp_sus= sg->recovery_scope.sus;
  617. ENTER ("'%s'", su->name.value);
  618. while (*tmp_sus != NULL) {
  619. number_of_su++;
  620. tmp_sus++;
  621. }
  622. sg->recovery_scope.sus = (struct amf_su **)
  623. realloc((void *)sg->recovery_scope.sus,
  624. sizeof (struct amf_su *)*number_of_su);
  625. assert (sg->recovery_scope.sus != NULL);
  626. tmp_sus= sg->recovery_scope.sus;
  627. while (*tmp_sus != NULL) {
  628. tmp_sus++;
  629. }
  630. *tmp_sus = su;
  631. *(++tmp_sus) = NULL;
  632. }
  633. /**
  634. * Set recovery scope for failover SU.
  635. * @param sg
  636. * @param su
  637. */
  638. static void set_scope_for_failover_su (struct amf_sg *sg, struct amf_su *su)
  639. {
  640. struct amf_si_assignment *si_assignment;
  641. struct amf_si **sis;
  642. struct amf_su **sus;
  643. SaNameT dn;
  644. sg->recovery_scope.event_type = SG_FAILOVER_SU_EV;
  645. sg->recovery_scope.node = NULL;
  646. sg->recovery_scope.comp = NULL;
  647. sg->recovery_scope.sus = (struct amf_su **)
  648. calloc (2, sizeof (struct amf_su *));
  649. sg->recovery_scope.sis = (struct amf_si **)
  650. calloc (1, sizeof (struct amf_si *));
  651. assert ((sg->recovery_scope.sus != NULL) &&
  652. (sg->recovery_scope.sis != NULL));
  653. sg->recovery_scope.sus[0] = su;
  654. amf_su_dn_make (sg->recovery_scope.sus[0], &dn);
  655. log_printf (
  656. LOG_NOTICE, "'%s' for %s recovery action started",
  657. sg_event_type_text[sg->recovery_scope.event_type],
  658. dn.value);
  659. si_assignment = amf_su_get_next_si_assignment (su, NULL);
  660. while (si_assignment != NULL) {
  661. if (is_si_in_scope(sg, si_assignment->si) == 0) {
  662. add_si_to_scope(sg,si_assignment->si );
  663. }
  664. si_assignment = amf_su_get_next_si_assignment (su, si_assignment);
  665. }
  666. sus = sg->recovery_scope.sus;
  667. dprintf("The following sus are within the scope:\n");
  668. while (*sus != NULL) {
  669. dprintf("%s\n", (*sus)->name.value);
  670. sus++;
  671. }
  672. sis= sg->recovery_scope.sis;
  673. dprintf("The following sis are within the scope:\n");
  674. while (*sis != NULL) {
  675. dprintf("%s\n", (*sis)->name.value);
  676. sis++;
  677. }
  678. }
  679. static void set_scope_for_failover_node (struct amf_sg *sg, struct amf_node *node)
  680. {
  681. struct amf_si_assignment *si_assignment;
  682. struct amf_si **sis;
  683. struct amf_su **sus;
  684. struct amf_su *su;
  685. ENTER ("'%s'", node->name.value);
  686. sg->recovery_scope.event_type = SG_FAILOVER_NODE_EV;
  687. sg->recovery_scope.node = node;
  688. sg->recovery_scope.comp = NULL;
  689. sg->recovery_scope.sus = (struct amf_su **)
  690. calloc (1, sizeof (struct amf_su *));
  691. sg->recovery_scope.sis = (struct amf_si **)
  692. calloc (1, sizeof (struct amf_si *));
  693. log_printf (
  694. LOG_NOTICE, "'%s' for node %s recovery action started",
  695. sg_event_type_text[sg->recovery_scope.event_type],
  696. node->name.value);
  697. assert ((sg->recovery_scope.sus != NULL) &&
  698. (sg->recovery_scope.sis != NULL));
  699. for (su = sg->su_head; su != NULL; su = su->next) {
  700. if (name_match (&node->name, &su->saAmfSUHostedByNode)) {
  701. add_su_to_scope (sg, su);
  702. }
  703. }
  704. sus = sg->recovery_scope.sus;
  705. while (*sus != 0) {
  706. su = *sus;
  707. si_assignment = amf_su_get_next_si_assignment (su, NULL);
  708. while (si_assignment != NULL) {
  709. if (is_si_in_scope(sg, si_assignment->si) == 0) {
  710. add_si_to_scope(sg, si_assignment->si );
  711. }
  712. si_assignment = amf_su_get_next_si_assignment (
  713. su, si_assignment);
  714. }
  715. sus++;
  716. }
  717. sus = sg->recovery_scope.sus;
  718. dprintf("The following sus are within the scope:\n");
  719. while (*sus != NULL) {
  720. dprintf("%s\n", (*sus)->name.value);
  721. sus++;
  722. }
  723. sis = sg->recovery_scope.sis;
  724. dprintf("The following sis are within the scope:\n");
  725. while (*sis != NULL) {
  726. dprintf("%s\n", (*sis)->name.value);
  727. sis++;
  728. }
  729. }
  730. /**
  731. * Delete all SI assignments and all CSI assignments
  732. * by requesting all contained components.
  733. * @param su
  734. */
  735. static void delete_si_assignments (struct amf_su *su)
  736. {
  737. struct amf_csi *csi;
  738. struct amf_si *si;
  739. struct amf_si_assignment *si_assignment;
  740. struct amf_si_assignment **prev;
  741. ENTER ("'%s'", su->name.value);
  742. for (si = su->sg->application->si_head; si != NULL; si = si->next) {
  743. prev = &si->assigned_sis;
  744. if (!name_match (&si->saAmfSIProtectedbySG, &su->sg->name)) {
  745. continue;
  746. }
  747. for (csi = si->csi_head; csi != NULL; csi = csi->next) {
  748. amf_csi_delete_assignments (csi, su);
  749. }
  750. for (si_assignment = si->assigned_sis; si_assignment != NULL;
  751. si_assignment = si_assignment->next) {
  752. if (si_assignment->su == su) {
  753. struct amf_si_assignment *tmp = si_assignment;
  754. *prev = si_assignment->next;
  755. dprintf ("SI assignment %s unlinked",
  756. tmp->name.value);
  757. free (tmp);
  758. } else {
  759. prev = &si_assignment->next;
  760. }
  761. }
  762. }
  763. }
  764. /**
  765. * Delete all SI assignments and all CSI assignments in current
  766. * recovery scope.
  767. * @param sg
  768. */
  769. static void delete_si_assignments_in_scope (struct amf_sg *sg)
  770. {
  771. struct amf_su **sus= sg->recovery_scope.sus;
  772. while (*sus != NULL) {
  773. delete_si_assignments (*sus);
  774. sus++;
  775. }
  776. }
  777. /**
  778. * Callback function used by SI when an SI has been deactivated.
  779. * @param si_assignment
  780. * @param result
  781. */
  782. static void dependent_si_deactivated_cbfn (
  783. struct amf_si_assignment *si_assignment, int result)
  784. {
  785. struct amf_sg *sg = si_assignment->su->sg;
  786. struct amf_su **sus = sg->recovery_scope.sus;
  787. struct amf_su *su;
  788. ENTER ("'%s', %d", si_assignment->si->name.value, result);
  789. /*
  790. * If all SI assignments for all SUs in the SG are not pending,
  791. * goto next state (TerminatingSuspected).
  792. */
  793. for (su = sg->su_head ; su != NULL; su = su->next) {
  794. struct amf_si_assignment *si_assignment;
  795. si_assignment = amf_su_get_next_si_assignment(su, NULL);
  796. while (si_assignment != NULL) {
  797. if (si_assignment->saAmfSISUHAState !=
  798. si_assignment->requested_ha_state) {
  799. goto still_wating;
  800. }
  801. si_assignment = amf_su_get_next_si_assignment(su,
  802. si_assignment);
  803. }
  804. }
  805. still_wating:
  806. if (su == NULL) {
  807. sus = si_assignment->su->sg->recovery_scope.sus;
  808. /*
  809. * Select next state depending on if some
  810. * SU in the scope is needs to be terminated.
  811. */
  812. while (*sus != NULL) {
  813. if (su_presense_state_is_not (*sus,
  814. SA_AMF_PRESENCE_UNINSTANTIATED,
  815. SA_AMF_PRESENCE_TERMINATION_FAILED,
  816. SA_AMF_PRESENCE_INSTANTIATION_FAILED)) {
  817. break;
  818. }
  819. sus++;
  820. }
  821. if (*sus != NULL) {
  822. acsm_enter_terminating_suspected (sg);
  823. } else {
  824. delete_si_assignments_in_scope(sg);
  825. acsm_enter_activating_standby (sg);
  826. }
  827. }
  828. LEAVE("");
  829. }
  830. static void standby_su_activated_cbfn (
  831. struct amf_si_assignment *si_assignment, int result)
  832. {
  833. struct amf_su **sus= si_assignment->su->sg->recovery_scope.sus;
  834. struct amf_si **sis= si_assignment->su->sg->recovery_scope.sis;
  835. ENTER ("'%s', %d", si_assignment->si->name.value, result);
  836. /*
  837. * If all SI assignments for all SIs in the scope are activated, goto next
  838. * state.
  839. */
  840. while (*sis != NULL) {
  841. if ((*sis)->assigned_sis != NULL &&
  842. (*sis)->assigned_sis->saAmfSISUHAState != SA_AMF_HA_ACTIVE) {
  843. break;
  844. }
  845. sis++;
  846. }
  847. if (*sis == NULL) {
  848. /*
  849. * TODO: create SI assignment to spare and assign them
  850. */
  851. (*sus)->sg->avail_state = SG_AC_AssigningStandbyToSpare;
  852. acsm_enter_repairing_su ((*sus)->sg);
  853. }
  854. }
  855. static void assign_si_assumed_cbfn (
  856. struct amf_si_assignment *si_assignment, int result)
  857. {
  858. struct amf_si_assignment *tmp_si_assignment;
  859. struct amf_si *si;
  860. struct amf_sg *sg = si_assignment->su->sg;
  861. int si_assignment_cnt = 0;
  862. int confirmed_assignments = 0;
  863. ENTER ("'%s', %d", si_assignment->si->name.value, result);
  864. /*
  865. * Report to application when all SIs that this SG protects
  866. * has been assigned or go back to idle state if not cluster
  867. * start.
  868. */
  869. for (si = sg->application->si_head; si != NULL; si = si->next) {
  870. if (name_match (&si->saAmfSIProtectedbySG, &sg->name)) {
  871. for (tmp_si_assignment = si->assigned_sis;
  872. tmp_si_assignment != NULL;
  873. tmp_si_assignment = tmp_si_assignment->next) {
  874. si_assignment_cnt++;
  875. if (tmp_si_assignment->requested_ha_state ==
  876. tmp_si_assignment->saAmfSISUHAState) {
  877. confirmed_assignments++;
  878. }
  879. }
  880. }
  881. }
  882. assert (confirmed_assignments != 0);
  883. switch (sg->avail_state) {
  884. case SG_AC_AssigningOnRequest:
  885. if (si_assignment_cnt == confirmed_assignments) {
  886. sg_enter_idle (sg);
  887. amf_application_sg_assigned (sg->application, sg);
  888. } else {
  889. dprintf ("%d, %d", si_assignment_cnt, confirmed_assignments);
  890. }
  891. break;
  892. case SG_AC_AssigningStandBy:
  893. {
  894. if (si_assignment_cnt == confirmed_assignments) {
  895. sg_enter_idle (sg);
  896. }
  897. break;
  898. }
  899. default:
  900. dprintf ("%d, %d, %d", sg->avail_state, si_assignment_cnt,
  901. confirmed_assignments);
  902. amf_runtime_attributes_print (amf_cluster);
  903. assert (0);
  904. break;
  905. }
  906. }
  907. static inline int div_round (int a, int b)
  908. {
  909. int res;
  910. assert (b != 0);
  911. res = a / b;
  912. if ((a % b) != 0)
  913. res++;
  914. return res;
  915. }
  916. static int no_su_has_presence_state (
  917. struct amf_sg *sg, struct amf_node *node_to_start,
  918. SaAmfPresenceStateT state)
  919. {
  920. struct amf_su *su;
  921. int no_su_has_presence_state = 1;
  922. for (su = sg->su_head; su != NULL; su = su->next) {
  923. if (su->saAmfSUPresenceState == state) {
  924. if (node_to_start == NULL) {
  925. no_su_has_presence_state = 0;
  926. break;
  927. } else {
  928. if (name_match(&node_to_start->name,
  929. &su->saAmfSUHostedByNode)) {
  930. no_su_has_presence_state = 0;
  931. break;
  932. }
  933. }
  934. }
  935. }
  936. return no_su_has_presence_state;
  937. }
  938. static int all_su_in_scope_has_presence_state (
  939. struct amf_sg *sg, SaAmfPresenceStateT state)
  940. {
  941. struct amf_su **sus= sg->recovery_scope.sus;
  942. while (*sus != NULL) {
  943. if ((*sus)->saAmfSUPresenceState != state) {
  944. break;
  945. }
  946. sus++;
  947. }
  948. return(*sus == NULL);
  949. }
  950. /**
  951. * Get number of SIs protected by the specified SG.
  952. * @param sg
  953. *
  954. * @return int
  955. */
  956. static int sg_si_count_get (struct amf_sg *sg)
  957. {
  958. struct amf_si *si;
  959. int cnt = 0;
  960. for (si = sg->application->si_head; si != NULL; si = si->next) {
  961. if (name_match (&si->saAmfSIProtectedbySG, &sg->name)) {
  962. cnt += 1;
  963. }
  964. }
  965. return(cnt);
  966. }
  967. static int amf_si_get_saAmfSINumReqActiveAssignments(struct amf_si *si)
  968. {
  969. struct amf_si_assignment *si_assignment = si->assigned_sis;
  970. int number_of_req_active_assignments = 0;
  971. for (; si_assignment != NULL; si_assignment = si_assignment->next) {
  972. if (si_assignment->requested_ha_state == SA_AMF_HA_ACTIVE) {
  973. number_of_req_active_assignments++;
  974. }
  975. }
  976. return number_of_req_active_assignments;
  977. }
  978. static int amf_si_get_saAmfSINumReqStandbyAssignments(struct amf_si *si)
  979. {
  980. struct amf_si_assignment *si_assignment = si->assigned_sis;
  981. int number_of_req_active_assignments = 0;
  982. for (; si_assignment != NULL; si_assignment = si_assignment->next) {
  983. if (si_assignment->requested_ha_state == SA_AMF_HA_STANDBY) {
  984. number_of_req_active_assignments++;
  985. }
  986. }
  987. return number_of_req_active_assignments;
  988. }
  989. static int sg_assign_nm_active (struct amf_sg *sg, int su_active_assign)
  990. {
  991. struct amf_su *su;
  992. struct amf_si *si;
  993. int assigned = 0;
  994. int assign_to_su = 0;
  995. int total_assigned = 0;
  996. int si_left;
  997. int si_total;
  998. int su_left_to_assign = su_active_assign;
  999. si_total = sg_si_count_get (sg);
  1000. si_left = si_total;
  1001. assign_to_su = div_round (si_left, su_active_assign);
  1002. if (assign_to_su > sg->saAmfSGMaxActiveSIsperSUs) {
  1003. assign_to_su = sg->saAmfSGMaxActiveSIsperSUs;
  1004. }
  1005. su = sg->su_head;
  1006. while (su != NULL && su_left_to_assign > 0) {
  1007. if (amf_su_get_saAmfSUReadinessState (su) !=
  1008. SA_AMF_READINESS_IN_SERVICE ||
  1009. amf_su_get_saAmfSUNumCurrActiveSIs (su) ==
  1010. assign_to_su ||
  1011. amf_su_get_saAmfSUNumCurrStandbySIs (su) > 0) {
  1012. su = su->next;
  1013. continue; /* Not in service */
  1014. }
  1015. si = sg->application->si_head;
  1016. assigned = 0;
  1017. assign_to_su = div_round (si_left, su_left_to_assign);
  1018. if (assign_to_su > sg->saAmfSGMaxActiveSIsperSUs) {
  1019. assign_to_su = sg->saAmfSGMaxActiveSIsperSUs;
  1020. }
  1021. while (si != NULL) {
  1022. if (name_match (&si->saAmfSIProtectedbySG, &sg->name) &&
  1023. assigned < assign_to_su &&
  1024. amf_si_get_saAmfSINumReqActiveAssignments(si) == 0) {
  1025. assigned += 1;
  1026. total_assigned += 1;
  1027. amf_su_assign_si (su, si, SA_AMF_HA_ACTIVE);
  1028. }
  1029. si = si->next;
  1030. }
  1031. su = su->next;
  1032. su_left_to_assign -= 1;
  1033. si_left -= assigned;
  1034. dprintf (" su_left_to_assign =%d, si_left=%d\n",
  1035. su_left_to_assign, si_left);
  1036. }
  1037. assert (total_assigned <= si_total);
  1038. if (total_assigned == 0) {
  1039. dprintf ("Info: No SIs assigned");
  1040. }
  1041. LEAVE();
  1042. return total_assigned;
  1043. }
  1044. static int sg_assign_nm_standby (struct amf_sg *sg, int su_standby_assign)
  1045. {
  1046. struct amf_su *su;
  1047. struct amf_si *si;
  1048. int assigned = 0;
  1049. int assign_to_su = 0;
  1050. int total_assigned = 0;
  1051. int si_left;
  1052. int si_total;
  1053. int su_left_to_assign = su_standby_assign;
  1054. ENTER ("'%s'", sg->name.value);
  1055. if (su_standby_assign == 0) {
  1056. return 0;
  1057. }
  1058. si_total = sg_si_count_get (sg);
  1059. si_left = si_total;
  1060. assign_to_su = div_round (si_left, su_standby_assign);
  1061. if (assign_to_su > sg->saAmfSGMaxStandbySIsperSUs) {
  1062. assign_to_su = sg->saAmfSGMaxStandbySIsperSUs;
  1063. }
  1064. su = sg->su_head;
  1065. while (su != NULL && su_left_to_assign > 0) {
  1066. if (amf_su_get_saAmfSUReadinessState (su) !=
  1067. SA_AMF_READINESS_IN_SERVICE ||
  1068. amf_su_get_saAmfSUNumCurrActiveSIs (su) > 0 ||
  1069. amf_su_get_saAmfSUNumCurrStandbySIs (su) ==
  1070. assign_to_su) {
  1071. su = su->next;
  1072. continue; /* Not available for assignment */
  1073. }
  1074. si = sg->application->si_head;
  1075. assigned = 0;
  1076. assign_to_su = div_round (si_left, su_left_to_assign);
  1077. if (assign_to_su > sg->saAmfSGMaxStandbySIsperSUs) {
  1078. assign_to_su = sg->saAmfSGMaxStandbySIsperSUs;
  1079. }
  1080. while (si != NULL) {
  1081. if (name_match (&si->saAmfSIProtectedbySG, &sg->name) &&
  1082. assigned < assign_to_su &&
  1083. amf_si_get_saAmfSINumReqStandbyAssignments (si) == 0) {
  1084. assigned += 1;
  1085. total_assigned += 1;
  1086. amf_su_assign_si (su, si, SA_AMF_HA_STANDBY);
  1087. }
  1088. si = si->next;
  1089. }
  1090. su_left_to_assign -= 1;
  1091. si_left -= assigned;
  1092. dprintf (" su_left_to_assign =%d, si_left=%d\n",
  1093. su_left_to_assign, si_left);
  1094. su = su->next;
  1095. }
  1096. assert (total_assigned <= si_total);
  1097. if (total_assigned == 0) {
  1098. dprintf ("Info: No SIs assigned!");
  1099. }
  1100. return total_assigned;
  1101. }
  1102. static int su_inservice_count_get (struct amf_sg *sg)
  1103. {
  1104. struct amf_su *su;
  1105. int answer = 0;
  1106. for (su = sg->su_head; su != NULL; su = su->next) {
  1107. if (amf_su_get_saAmfSUReadinessState (su) ==
  1108. SA_AMF_READINESS_IN_SERVICE) {
  1109. answer += 1;
  1110. }
  1111. }
  1112. return(answer);
  1113. }
  1114. /**
  1115. * TODO: dependency_level not used, hard coded
  1116. * @param sg
  1117. * @param dependency_level
  1118. */
  1119. static int assign_si (struct amf_sg *sg, int dependency_level)
  1120. {
  1121. int active_sus_needed = 0;
  1122. int standby_sus_needed = 0;
  1123. int inservice_count;
  1124. int su_active_assign;
  1125. int su_standby_assign;
  1126. int su_spare_assign;
  1127. int assigned = 0;
  1128. ENTER ("'%s'", sg->name.value);
  1129. /**
  1130. * Phase 1: Calculate assignments and create all runtime objects in
  1131. * information model. Do not do the actual assignment, done in
  1132. * phase 2.
  1133. */
  1134. /**
  1135. * Calculate number of SUs to assign to active or standby state
  1136. */
  1137. inservice_count = su_inservice_count_get (sg);
  1138. if (sg->saAmfSGNumPrefActiveSUs > 0) {
  1139. dprintf("LHL sg_si_count_get (sg) %d ,sg->saAmfSGMaxActiveSIsperSUs %d, ",
  1140. sg_si_count_get (sg),
  1141. sg->saAmfSGMaxActiveSIsperSUs);
  1142. active_sus_needed = div_round (
  1143. sg_si_count_get (sg),
  1144. sg->saAmfSGMaxActiveSIsperSUs);
  1145. } else {
  1146. log_printf (LOG_LEVEL_ERROR, "ERROR: saAmfSGNumPrefActiveSUs == 0 !!");
  1147. openais_exit_error (AIS_DONE_FATAL_ERR);
  1148. }
  1149. if (sg->saAmfSGNumPrefStandbySUs > 0) {
  1150. standby_sus_needed = div_round (
  1151. sg_si_count_get (sg),
  1152. sg->saAmfSGMaxStandbySIsperSUs);
  1153. } else {
  1154. log_printf (LOG_LEVEL_ERROR, "ERROR: saAmfSGNumPrefStandbySUs == 0 !!");
  1155. openais_exit_error (AIS_DONE_FATAL_ERR);
  1156. }
  1157. dprintf ("(inservice=%d) (active_sus_needed=%d) (standby_sus_needed=%d)"
  1158. "\n",
  1159. inservice_count, active_sus_needed, standby_sus_needed);
  1160. /* Determine number of active and standby service units
  1161. * to assign based upon reduction procedure
  1162. */
  1163. if ((inservice_count < active_sus_needed)) {
  1164. dprintf ("assignment VI - partial assignment with SIs drop outs\n");
  1165. su_active_assign = inservice_count;
  1166. su_standby_assign = 0;
  1167. su_spare_assign = 0;
  1168. } else
  1169. if ((inservice_count < active_sus_needed + standby_sus_needed)) {
  1170. dprintf ("assignment V - partial assignment with reduction of"
  1171. " standby units\n");
  1172. su_active_assign = active_sus_needed;
  1173. su_standby_assign = inservice_count - active_sus_needed;
  1174. su_spare_assign = 0;
  1175. } else
  1176. if ((inservice_count < sg->saAmfSGNumPrefActiveSUs + standby_sus_needed)) {
  1177. dprintf ("IV: full assignment with reduction of active service"
  1178. " units\n");
  1179. su_active_assign = inservice_count - standby_sus_needed;
  1180. su_standby_assign = standby_sus_needed;
  1181. su_spare_assign = 0;
  1182. } else
  1183. if ((inservice_count <
  1184. sg->saAmfSGNumPrefActiveSUs + sg->saAmfSGNumPrefStandbySUs)) {
  1185. dprintf ("III: full assignment with reduction of standby service"
  1186. " units\n");
  1187. su_active_assign = sg->saAmfSGNumPrefActiveSUs;
  1188. su_standby_assign = inservice_count - sg->saAmfSGNumPrefActiveSUs;
  1189. su_spare_assign = 0;
  1190. } else
  1191. if ((inservice_count ==
  1192. sg->saAmfSGNumPrefActiveSUs + sg->saAmfSGNumPrefStandbySUs)) {
  1193. if (sg->saAmfSGNumPrefInserviceSUs > inservice_count) {
  1194. dprintf ("II: full assignment with spare reduction\n");
  1195. } else {
  1196. dprintf ("II: full assignment without spares\n");
  1197. }
  1198. su_active_assign = sg->saAmfSGNumPrefActiveSUs;
  1199. su_standby_assign = sg->saAmfSGNumPrefStandbySUs;
  1200. su_spare_assign = 0;
  1201. } else {
  1202. dprintf ("I: full assignment with spares\n");
  1203. su_active_assign = sg->saAmfSGNumPrefActiveSUs;
  1204. su_standby_assign = sg->saAmfSGNumPrefStandbySUs;
  1205. su_spare_assign = inservice_count -
  1206. sg->saAmfSGNumPrefActiveSUs - sg->saAmfSGNumPrefStandbySUs;
  1207. }
  1208. dprintf ("(inservice=%d) (assigning active=%d) (assigning standby=%d)"
  1209. " (assigning spares=%d)\n",
  1210. inservice_count, su_active_assign, su_standby_assign, su_spare_assign);
  1211. if (inservice_count > 0) {
  1212. assigned = sg_assign_nm_active (sg, su_active_assign);
  1213. assigned += sg_assign_nm_standby (sg, su_standby_assign);
  1214. sg->saAmfSGNumCurrAssignedSUs = inservice_count;
  1215. /**
  1216. * Phase 2: do the actual assignment to the component
  1217. * TODO: first do active, then standby
  1218. */
  1219. {
  1220. struct amf_si *si;
  1221. struct amf_si_assignment *si_assignment;
  1222. for (si = sg->application->si_head; si != NULL; si = si->next) {
  1223. if (name_match (&si->saAmfSIProtectedbySG, &sg->name)) {
  1224. for (si_assignment = si->assigned_sis;
  1225. si_assignment != NULL;
  1226. si_assignment = si_assignment->next) {
  1227. if (si_assignment->requested_ha_state !=
  1228. si_assignment->saAmfSISUHAState) {
  1229. amf_si_ha_state_assume (
  1230. si_assignment, assign_si_assumed_cbfn);
  1231. }
  1232. }
  1233. }
  1234. }
  1235. }
  1236. }
  1237. LEAVE ("'%s'", sg->name.value);
  1238. return assigned;
  1239. }
  1240. int amf_sg_assign_si_req (struct amf_sg *sg, int dependency_level)
  1241. {
  1242. int posible_to_assign_si;
  1243. sg->recovery_scope.event_type = SG_ASSIGN_SI_EV;
  1244. sg->avail_state = SG_AC_AssigningOnRequest;
  1245. if ((posible_to_assign_si = assign_si (sg, dependency_level)) == 0) {
  1246. sg_enter_idle (sg);
  1247. }
  1248. return posible_to_assign_si;
  1249. }
  1250. void amf_sg_failover_node_req (struct amf_sg *sg, struct amf_node *node)
  1251. {
  1252. ENTER("'%s, %s'",node->name.value, sg->name.value);
  1253. sg_event_t sg_event;
  1254. switch (sg->avail_state) {
  1255. case SG_AC_Idle:
  1256. set_scope_for_failover_node(sg, node);
  1257. if (has_any_su_in_scope_active_workload (sg)) {
  1258. acsm_enter_deactivating_dependent_workload (sg);
  1259. } else {
  1260. amf_su_t **sus = sg->recovery_scope.sus;
  1261. /*
  1262. * Select next state depending on if some
  1263. * SU in the scope needs to be terminated.
  1264. */
  1265. while (*sus != NULL) {
  1266. amf_su_t *su = *sus;
  1267. ENTER("SU %s pr_state='%d'",su->name.value,
  1268. su->saAmfSUPresenceState);
  1269. if (su_presense_state_is_ored (su,
  1270. SA_AMF_PRESENCE_UNINSTANTIATED,
  1271. SA_AMF_PRESENCE_TERMINATION_FAILED,
  1272. SA_AMF_PRESENCE_INSTANTIATION_FAILED)) {
  1273. sus++;
  1274. continue;
  1275. }
  1276. break;
  1277. }
  1278. if (*sus != NULL) {
  1279. acsm_enter_terminating_suspected (sg);
  1280. } else {
  1281. delete_si_assignments_in_scope (sg);
  1282. sg_enter_idle (sg);
  1283. }
  1284. }
  1285. break;
  1286. case SG_AC_DeactivatingDependantWorkload:
  1287. case SG_AC_TerminatingSuspected:
  1288. case SG_AC_ActivatingStandby:
  1289. case SG_AC_AssigningStandbyToSpare:
  1290. case SG_AC_ReparingComponent:
  1291. case SG_AC_ReparingSu:
  1292. case SG_AC_AssigningOnRequest:
  1293. case SG_AC_InstantiatingServiceUnits:
  1294. case SG_AC_RemovingAssignment:
  1295. case SG_AC_AssigningActiveworkload:
  1296. case SG_AC_AssigningAutoAdjust:
  1297. case SG_AC_AssigningStandBy:
  1298. case SG_AC_WaitingAfterOperationFailed:
  1299. sg_set_event (SG_FAILOVER_NODE_EV, sg, 0, 0, node, &sg_event);
  1300. sg_defer_event (SG_FAILOVER_NODE_EV, &sg_event);
  1301. break;
  1302. default:
  1303. assert (0);
  1304. break;
  1305. }
  1306. }
  1307. void amf_sg_start (struct amf_sg *sg, struct amf_node *node)
  1308. {
  1309. sg_event_t sg_event;
  1310. sg->recovery_scope.event_type = SG_START_EV;
  1311. switch (sg->avail_state) {
  1312. case SG_AC_Idle: {
  1313. amf_su_t *su;
  1314. sg_avail_control_state_t old_avail_state = sg->avail_state;
  1315. int instantiated_sus = 0;
  1316. ENTER ("'%s'", sg->name.value);
  1317. sg->node_to_start = node;
  1318. sg->avail_state = SG_AC_InstantiatingServiceUnits;
  1319. for (su = sg->su_head;
  1320. (su != NULL) &&
  1321. (instantiated_sus < sg->saAmfSGNumPrefInserviceSUs);
  1322. su = su->next) {
  1323. if (node == NULL) {
  1324. /*
  1325. * Cluster start
  1326. */
  1327. amf_su_instantiate (su);
  1328. instantiated_sus++;
  1329. } else {
  1330. /*
  1331. * Node start, match if SU is hosted on the
  1332. * specified node
  1333. */
  1334. if (name_match (&node->name,
  1335. &su->saAmfSUHostedByNode)) {
  1336. amf_su_instantiate (su);
  1337. instantiated_sus++;
  1338. }
  1339. }
  1340. }
  1341. if (instantiated_sus == 0) {
  1342. sg->avail_state = old_avail_state;
  1343. }
  1344. break;
  1345. }
  1346. case SG_AC_InstantiatingServiceUnits:
  1347. sg_set_event (SG_START_EV, sg, 0, 0, node, &sg_event);
  1348. sg_defer_event (SG_START_EV, &sg_event);
  1349. break;
  1350. case SG_AC_DeactivatingDependantWorkload:
  1351. case SG_AC_TerminatingSuspected:
  1352. case SG_AC_ActivatingStandby:
  1353. case SG_AC_AssigningStandbyToSpare:
  1354. case SG_AC_ReparingComponent:
  1355. case SG_AC_ReparingSu:
  1356. case SG_AC_AssigningOnRequest:
  1357. case SG_AC_RemovingAssignment:
  1358. case SG_AC_AssigningActiveworkload:
  1359. case SG_AC_AssigningAutoAdjust:
  1360. case SG_AC_AssigningStandBy:
  1361. case SG_AC_WaitingAfterOperationFailed:
  1362. default:
  1363. assert (0);
  1364. break;
  1365. }
  1366. }
  1367. void amf_sg_su_state_changed (struct amf_sg *sg,
  1368. struct amf_su *su, SaAmfStateT type, int state)
  1369. {
  1370. ENTER ("'%s' SU '%s' state %s",
  1371. sg->name.value, su->name.value, amf_presence_state(state));
  1372. if (type == SA_AMF_PRESENCE_STATE) {
  1373. if (state == SA_AMF_PRESENCE_INSTANTIATED) {
  1374. if (sg->avail_state == SG_AC_InstantiatingServiceUnits) {
  1375. if (no_su_has_presence_state(sg, sg->node_to_start,
  1376. SA_AMF_PRESENCE_INSTANTIATING)) {
  1377. sg_enter_idle (sg);
  1378. }
  1379. } else if (sg->avail_state == SG_AC_ReparingSu) {
  1380. if (all_su_in_scope_has_presence_state(su->sg,
  1381. SA_AMF_PRESENCE_INSTANTIATED)) {
  1382. su->sg->avail_state = SG_AC_AssigningStandBy;
  1383. if (assign_si (sg, 0) == 0) {
  1384. sg_enter_idle (sg);
  1385. }
  1386. } else {
  1387. dprintf ("avail-state: %u", sg->avail_state);
  1388. assert (0);
  1389. }
  1390. } else {
  1391. dprintf ("avail-state: %u", sg->avail_state);
  1392. assert (0);
  1393. }
  1394. } else if (state == SA_AMF_PRESENCE_UNINSTANTIATED) {
  1395. if (sg->avail_state == SG_AC_TerminatingSuspected) {
  1396. if (all_su_in_scope_has_presence_state (sg, state)) {
  1397. delete_si_assignments_in_scope (sg);
  1398. if (is_standby_for_non_active_si_in_scope (sg)) {
  1399. acsm_enter_activating_standby (sg);
  1400. } else {
  1401. /*
  1402. * TODO: create SI assignment to spare and assign them
  1403. */
  1404. sg->avail_state = SG_AC_AssigningStandbyToSpare;
  1405. acsm_enter_repairing_su (sg);
  1406. }
  1407. }
  1408. } else {
  1409. assert (0);
  1410. }
  1411. } else if (state == SA_AMF_PRESENCE_INSTANTIATING) {
  1412. ; /* nop */
  1413. } else if (state == SA_AMF_PRESENCE_INSTANTIATION_FAILED) {
  1414. if (sg->avail_state == SG_AC_InstantiatingServiceUnits) {
  1415. if (no_su_has_presence_state(sg, sg->node_to_start,
  1416. SA_AMF_PRESENCE_INSTANTIATING)) {
  1417. sg_enter_idle (sg);
  1418. }
  1419. }
  1420. } else {
  1421. assert (0);
  1422. }
  1423. } else {
  1424. assert (0);
  1425. }
  1426. }
  1427. void amf_sg_init (void)
  1428. {
  1429. log_init ("AMF");
  1430. }
  1431. void amf_sg_failover_su_req (struct amf_sg *sg, struct amf_su *su,
  1432. struct amf_node *node)
  1433. {
  1434. ENTER ("");
  1435. sg_event_t sg_event;
  1436. switch (sg->avail_state) {
  1437. case SG_AC_Idle:
  1438. set_scope_for_failover_su (sg, su);
  1439. if (has_any_su_in_scope_active_workload (sg)) {
  1440. acsm_enter_deactivating_dependent_workload (sg);
  1441. } else {
  1442. acsm_enter_terminating_suspected (sg);
  1443. }
  1444. break;
  1445. case SG_AC_DeactivatingDependantWorkload:
  1446. case SG_AC_TerminatingSuspected:
  1447. case SG_AC_ActivatingStandby:
  1448. case SG_AC_AssigningStandbyToSpare:
  1449. case SG_AC_ReparingComponent:
  1450. case SG_AC_ReparingSu:
  1451. case SG_AC_AssigningOnRequest:
  1452. case SG_AC_InstantiatingServiceUnits:
  1453. case SG_AC_RemovingAssignment:
  1454. case SG_AC_AssigningActiveworkload:
  1455. case SG_AC_AssigningAutoAdjust:
  1456. case SG_AC_AssigningStandBy:
  1457. case SG_AC_WaitingAfterOperationFailed:
  1458. sg_set_event (SG_FAILOVER_SU_EV, sg, su, 0, 0, &sg_event);
  1459. sg_defer_event (SG_FAILOVER_SU_EV, &sg_event);
  1460. break;
  1461. default:
  1462. assert (0);
  1463. break;
  1464. }
  1465. }
  1466. /**
  1467. * Constructor for SG objects. Adds SG to the list owned by
  1468. * the specified application. Always returns a valid SG
  1469. * object, out-of-memory problems are handled here. Default
  1470. * values are initialized.
  1471. * @param sg
  1472. * @param name
  1473. *
  1474. * @return struct amf_sg*
  1475. */
  1476. struct amf_sg *amf_sg_new (struct amf_application *app, char *name)
  1477. {
  1478. struct amf_sg *sg = amf_calloc (1, sizeof (struct amf_sg));
  1479. setSaNameT (&sg->name, name);
  1480. sg->saAmfSGAdminState = SA_AMF_ADMIN_UNLOCKED;
  1481. sg->saAmfSGNumPrefActiveSUs = 1;
  1482. sg->saAmfSGNumPrefStandbySUs = 1;
  1483. sg->saAmfSGNumPrefInserviceSUs = ~0;
  1484. sg->saAmfSGNumPrefAssignedSUs = ~0;
  1485. sg->saAmfSGCompRestartProb = -1;
  1486. sg->saAmfSGCompRestartMax = ~0;
  1487. sg->saAmfSGSuRestartProb = -1;
  1488. sg->saAmfSGSuRestartMax = ~0;
  1489. sg->saAmfSGAutoAdjustProb = -1;
  1490. sg->saAmfSGAutoRepair = SA_TRUE;
  1491. sg->application = app;
  1492. sg->next = app->sg_head;
  1493. app->sg_head = sg;
  1494. sg->deferred_events = NULL;
  1495. return sg;
  1496. }
  1497. void amf_sg_delete (struct amf_sg *sg)
  1498. {
  1499. struct amf_su *su;
  1500. for (su = sg->su_head; su != NULL;) {
  1501. struct amf_su *tmp = su;
  1502. su = su->next;
  1503. amf_su_delete (tmp);
  1504. }
  1505. free (sg);
  1506. }
  1507. void *amf_sg_serialize (struct amf_sg *sg, int *len)
  1508. {
  1509. char *buf = NULL;
  1510. int offset = 0, size = 0;
  1511. TRACE8 ("%s", sg->name.value);
  1512. buf = amf_serialize_SaNameT (buf, &size, &offset, &sg->name);
  1513. buf = amf_serialize_SaUint32T (buf, &size, &offset, sg->saAmfSGRedundancyModel);
  1514. buf = amf_serialize_SaUint32T (
  1515. buf, &size, &offset, sg->saAmfSGAutoAdjust);
  1516. buf = amf_serialize_SaUint32T (
  1517. buf, &size, &offset, sg->saAmfSGNumPrefActiveSUs);
  1518. buf = amf_serialize_SaUint32T (
  1519. buf, &size, &offset, sg->saAmfSGNumPrefStandbySUs);
  1520. buf = amf_serialize_SaUint32T (
  1521. buf, &size, &offset, sg->saAmfSGNumPrefInserviceSUs);
  1522. buf = amf_serialize_SaUint32T (
  1523. buf, &size, &offset, sg->saAmfSGNumPrefAssignedSUs);
  1524. buf = amf_serialize_SaUint32T (
  1525. buf, &size, &offset, sg->saAmfSGMaxActiveSIsperSUs);
  1526. buf = amf_serialize_SaUint32T (
  1527. buf, &size, &offset, sg->saAmfSGMaxStandbySIsperSUs);
  1528. buf = amf_serialize_SaUint32T (
  1529. buf, &size, &offset, sg->saAmfSGCompRestartProb);
  1530. buf = amf_serialize_SaUint32T (
  1531. buf, &size, &offset, sg->saAmfSGCompRestartMax);
  1532. buf = amf_serialize_SaUint32T (
  1533. buf, &size, &offset, sg->saAmfSGSuRestartProb);
  1534. buf = amf_serialize_SaUint32T (
  1535. buf, &size, &offset, sg->saAmfSGSuRestartMax);
  1536. buf = amf_serialize_SaUint32T (
  1537. buf, &size, &offset, sg->saAmfSGAutoAdjustProb);
  1538. buf = amf_serialize_SaUint32T (
  1539. buf, &size, &offset, sg->saAmfSGAutoRepair);
  1540. buf = amf_serialize_SaUint32T (
  1541. buf, &size, &offset, sg->saAmfSGAdminState);
  1542. buf = amf_serialize_SaUint32T (
  1543. buf, &size, &offset, sg->saAmfSGNumCurrAssignedSUs);
  1544. buf = amf_serialize_SaUint32T (
  1545. buf, &size, &offset, sg->saAmfSGNumCurrNonInstantiatedSpareSUs);
  1546. buf = amf_serialize_SaUint32T (
  1547. buf, &size, &offset, sg->saAmfSGNumCurrInstantiatedSpareSUs);
  1548. buf = amf_serialize_SaStringT (
  1549. buf, &size, &offset, sg->clccli_path);
  1550. buf = amf_serialize_SaUint32T (
  1551. buf, &size, &offset, sg->avail_state);
  1552. *len = offset;
  1553. return buf;
  1554. }
  1555. struct amf_sg *amf_sg_deserialize (struct amf_application *app, char *buf)
  1556. {
  1557. char *tmp = buf;
  1558. struct amf_sg *sg = amf_sg_new (app, "");
  1559. tmp = amf_deserialize_SaNameT (tmp, &sg->name);
  1560. tmp = amf_deserialize_SaUint32T (tmp, &sg->saAmfSGRedundancyModel);
  1561. tmp = amf_deserialize_SaUint32T (tmp, &sg->saAmfSGAutoAdjust);
  1562. tmp = amf_deserialize_SaUint32T (tmp, &sg->saAmfSGNumPrefActiveSUs);
  1563. tmp = amf_deserialize_SaUint32T (tmp, &sg->saAmfSGNumPrefStandbySUs);
  1564. tmp = amf_deserialize_SaUint32T (tmp, &sg->saAmfSGNumPrefInserviceSUs);
  1565. tmp = amf_deserialize_SaUint32T (tmp, &sg->saAmfSGNumPrefAssignedSUs);
  1566. tmp = amf_deserialize_SaUint32T (tmp, &sg->saAmfSGMaxActiveSIsperSUs);
  1567. tmp = amf_deserialize_SaUint32T (tmp, &sg->saAmfSGMaxStandbySIsperSUs);
  1568. tmp = amf_deserialize_SaUint32T (tmp, &sg->saAmfSGCompRestartProb);
  1569. tmp = amf_deserialize_SaUint32T (tmp, &sg->saAmfSGCompRestartMax);
  1570. tmp = amf_deserialize_SaUint32T (tmp, &sg->saAmfSGSuRestartProb);
  1571. tmp = amf_deserialize_SaUint32T (tmp, &sg->saAmfSGSuRestartMax);
  1572. tmp = amf_deserialize_SaUint32T (tmp, &sg->saAmfSGAutoAdjustProb);
  1573. tmp = amf_deserialize_SaUint32T (tmp, &sg->saAmfSGAutoRepair);
  1574. tmp = amf_deserialize_SaUint32T (tmp, &sg->saAmfSGAdminState);
  1575. tmp = amf_deserialize_SaUint32T (tmp, &sg->saAmfSGNumCurrAssignedSUs);
  1576. tmp = amf_deserialize_SaUint32T (tmp, &sg->saAmfSGNumCurrNonInstantiatedSpareSUs);
  1577. tmp = amf_deserialize_SaUint32T (tmp, &sg->saAmfSGNumCurrInstantiatedSpareSUs);
  1578. tmp = amf_deserialize_SaStringT (tmp, &sg->clccli_path);
  1579. tmp = amf_deserialize_SaUint32T (tmp, &sg->avail_state);
  1580. return sg;
  1581. }
  1582. struct amf_sg *amf_sg_find (struct amf_application *app, char *name)
  1583. {
  1584. struct amf_sg *sg;
  1585. for (sg = app->sg_head; sg != NULL; sg = sg->next) {
  1586. if (sg->name.length == strlen(name) &&
  1587. strncmp (name, (char*)sg->name.value, sg->name.length) == 0) {
  1588. break;
  1589. }
  1590. }
  1591. return sg;
  1592. }