wthread.c 7.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212
  1. /*
  2. * Copyright (c) 2005 MontaVista Software, Inc.
  3. * Copyright (c) 2006 Red Hat, Inc.
  4. *
  5. * All rights reserved.
  6. *
  7. * Author: Steven Dake (sdake@redhat.com)
  8. *
  9. * This software licensed under BSD license, the text of which follows:
  10. *
  11. * Redistribution and use in source and binary forms, with or without
  12. * modification, are permitted provided that the following conditions are met:
  13. *
  14. * - Redistributions of source code must retain the above copyright notice,
  15. * this list of conditions and the following disclaimer.
  16. * - Redistributions in binary form must reproduce the above copyright notice,
  17. * this list of conditions and the following disclaimer in the documentation
  18. * and/or other materials provided with the distribution.
  19. * - Neither the name of the MontaVista Software, Inc. nor the names of its
  20. * contributors may be used to endorse or promote products derived from this
  21. * software without specific prior written permission.
  22. *
  23. * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
  24. * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
  25. * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
  26. * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
  27. * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
  28. * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
  29. * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
  30. * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
  31. * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
  32. * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
  33. * THE POSSIBILITY OF SUCH DAMAGE.
  34. */
  35. /*
  36. * Add work to a work group and have threads process the work
  37. * Provide blocking for all work to complete
  38. */
  39. #include <stdlib.h>
  40. #include <pthread.h>
  41. #include <errno.h>
  42. #include <corosync/queue.h>
  43. #include "wthread.h"
  44. struct thread_data {
  45. void *thread_state;
  46. void *data;
  47. };
  48. struct worker_thread {
  49. struct worker_thread_group *worker_thread_group;
  50. pthread_mutex_t new_work_mutex;
  51. pthread_cond_t new_work_cond;
  52. pthread_cond_t cond;
  53. pthread_mutex_t done_work_mutex;
  54. pthread_cond_t done_work_cond;
  55. pthread_t thread_id;
  56. struct queue queue;
  57. void *thread_state;
  58. struct thread_data thread_data;
  59. };
  60. void *worker_thread (void *thread_data_in) {
  61. struct thread_data *thread_data = (struct thread_data *)thread_data_in;
  62. struct worker_thread *worker_thread =
  63. (struct worker_thread *)thread_data->data;
  64. void *data_for_worker_fn;
  65. for (;;) {
  66. pthread_mutex_lock (&worker_thread->new_work_mutex);
  67. if (queue_is_empty (&worker_thread->queue) == 1) {
  68. pthread_cond_wait (&worker_thread->new_work_cond,
  69. &worker_thread->new_work_mutex);
  70. }
  71. /*
  72. * We unlock then relock the new_work_mutex to allow the
  73. * worker function to execute and also allow new work to be
  74. * added to the work queue
  75. */
  76. data_for_worker_fn = queue_item_get (&worker_thread->queue);
  77. pthread_mutex_unlock (&worker_thread->new_work_mutex);
  78. worker_thread->worker_thread_group->worker_fn (worker_thread->thread_state, data_for_worker_fn);
  79. pthread_mutex_lock (&worker_thread->new_work_mutex);
  80. queue_item_remove (&worker_thread->queue);
  81. pthread_mutex_unlock (&worker_thread->new_work_mutex);
  82. pthread_mutex_lock (&worker_thread->done_work_mutex);
  83. if (queue_is_empty (&worker_thread->queue) == 1) {
  84. pthread_cond_signal (&worker_thread->done_work_cond);
  85. }
  86. pthread_mutex_unlock (&worker_thread->done_work_mutex);
  87. }
  88. return (0);
  89. }
  90. int worker_thread_group_init (
  91. struct worker_thread_group *worker_thread_group,
  92. int threads,
  93. int items_max,
  94. int item_size,
  95. int thread_state_size,
  96. void (*thread_state_constructor)(void *),
  97. void (*worker_fn)(void *thread_state, void *work_item))
  98. {
  99. int i;
  100. worker_thread_group->threadcount = threads;
  101. worker_thread_group->last_scheduled = 0;
  102. worker_thread_group->worker_fn = worker_fn;
  103. worker_thread_group->threads = malloc (sizeof (struct worker_thread) *
  104. threads);
  105. if (worker_thread_group->threads == 0) {
  106. return (-1);
  107. }
  108. for (i = 0; i < threads; i++) {
  109. if (thread_state_size) {
  110. worker_thread_group->threads[i].thread_state = malloc (thread_state_size);
  111. } else {
  112. worker_thread_group->threads[i].thread_state = NULL;
  113. }
  114. if (thread_state_constructor) {
  115. thread_state_constructor (worker_thread_group->threads[i].thread_state);
  116. }
  117. worker_thread_group->threads[i].worker_thread_group = worker_thread_group;
  118. pthread_mutex_init (&worker_thread_group->threads[i].new_work_mutex, NULL);
  119. pthread_cond_init (&worker_thread_group->threads[i].new_work_cond, NULL);
  120. pthread_mutex_init (&worker_thread_group->threads[i].done_work_mutex, NULL);
  121. pthread_cond_init (&worker_thread_group->threads[i].done_work_cond, NULL);
  122. queue_init (&worker_thread_group->threads[i].queue, items_max,
  123. item_size);
  124. worker_thread_group->threads[i].thread_data.thread_state =
  125. worker_thread_group->threads[i].thread_state;
  126. worker_thread_group->threads[i].thread_data.data = &worker_thread_group->threads[i];
  127. pthread_create (&worker_thread_group->threads[i].thread_id,
  128. NULL, worker_thread, &worker_thread_group->threads[i].thread_data);
  129. }
  130. return (0);
  131. }
  132. int worker_thread_group_work_add (
  133. struct worker_thread_group *worker_thread_group,
  134. void *item)
  135. {
  136. int schedule;
  137. schedule = (worker_thread_group->last_scheduled + 1) % (worker_thread_group->threadcount);
  138. worker_thread_group->last_scheduled = schedule;
  139. pthread_mutex_lock (&worker_thread_group->threads[schedule].new_work_mutex);
  140. if (queue_is_full (&worker_thread_group->threads[schedule].queue)) {
  141. pthread_mutex_unlock (&worker_thread_group->threads[schedule].new_work_mutex);
  142. return (-1);
  143. }
  144. queue_item_add (&worker_thread_group->threads[schedule].queue, item);
  145. pthread_cond_signal (&worker_thread_group->threads[schedule].new_work_cond);
  146. pthread_mutex_unlock (&worker_thread_group->threads[schedule].new_work_mutex);
  147. return (0);
  148. }
  149. void worker_thread_group_wait (
  150. struct worker_thread_group *worker_thread_group)
  151. {
  152. int i;
  153. for (i = 0; i < worker_thread_group->threadcount; i++) {
  154. pthread_mutex_lock (&worker_thread_group->threads[i].done_work_mutex);
  155. if (queue_is_empty (&worker_thread_group->threads[i].queue) == 0) {
  156. pthread_cond_wait (&worker_thread_group->threads[i].done_work_cond,
  157. &worker_thread_group->threads[i].done_work_mutex);
  158. }
  159. pthread_mutex_unlock (&worker_thread_group->threads[i].done_work_mutex);
  160. }
  161. }
  162. void worker_thread_group_exit (
  163. struct worker_thread_group *worker_thread_group)
  164. {
  165. int i;
  166. for (i = 0; i < worker_thread_group->threadcount; i++) {
  167. pthread_cancel (worker_thread_group->threads[i].thread_id);
  168. /* Wait for worker thread to exit gracefully before destroying
  169. * mutexes and processing items in the queue etc.
  170. */
  171. pthread_join (worker_thread_group->threads[i].thread_id, NULL);
  172. pthread_mutex_destroy (&worker_thread_group->threads[i].new_work_mutex);
  173. pthread_cond_destroy (&worker_thread_group->threads[i].new_work_cond);
  174. pthread_mutex_destroy (&worker_thread_group->threads[i].done_work_mutex);
  175. pthread_cond_destroy (&worker_thread_group->threads[i].done_work_cond);
  176. }
  177. }
  178. void worker_thread_group_atsegv (
  179. struct worker_thread_group *worker_thread_group)
  180. {
  181. void *data_for_worker_fn;
  182. struct worker_thread *worker_thread;
  183. unsigned int i;
  184. for (i = 0; i < worker_thread_group->threadcount; i++) {
  185. worker_thread = &worker_thread_group->threads[i];
  186. while (queue_is_empty (&worker_thread->queue) == 0) {
  187. data_for_worker_fn = queue_item_get (&worker_thread->queue);
  188. worker_thread->worker_thread_group->worker_fn (worker_thread->thread_state, data_for_worker_fn);
  189. queue_item_remove (&worker_thread->queue);
  190. }
  191. }
  192. }