wthread.c 6.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169
  1. /*
  2. * Copyright (c) 2005 MontaVista Software, Inc.
  3. *
  4. * All rights reserved.
  5. *
  6. * Author: Steven Dake (sdake@mvista.com)
  7. *
  8. * This software licensed under BSD license, the text of which follows:
  9. *
  10. * Redistribution and use in source and binary forms, with or without
  11. * modification, are permitted provided that the following conditions are met:
  12. *
  13. * - Redistributions of source code must retain the above copyright notice,
  14. * this list of conditions and the following disclaimer.
  15. * - Redistributions in binary form must reproduce the above copyright notice,
  16. * this list of conditions and the following disclaimer in the documentation
  17. * and/or other materials provided with the distribution.
  18. * - Neither the name of the MontaVista Software, Inc. nor the names of its
  19. * contributors may be used to endorse or promote products derived from this
  20. * software without specific prior written permission.
  21. *
  22. * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
  23. * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
  24. * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
  25. * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
  26. * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
  27. * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
  28. * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
  29. * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
  30. * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
  31. * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
  32. * THE POSSIBILITY OF SUCH DAMAGE.
  33. */
  34. /*
  35. * Add work to a work group and have threads process the work
  36. * Provide blocking for all work to complete
  37. */
  38. #include <stdlib.h>
  39. #include <pthread.h>
  40. #include <errno.h>
  41. #include "wthread.h"
  42. #include "../include/queue.h"
  43. struct thread_data {
  44. void *thread_state;
  45. void *data;
  46. };
  47. struct worker_thread {
  48. struct worker_thread_group *worker_thread_group;
  49. pthread_mutex_t new_work_mutex;
  50. pthread_cond_t new_work_cond;
  51. pthread_cond_t cond;
  52. pthread_mutex_t done_work_mutex;
  53. pthread_cond_t done_work_cond;
  54. pthread_t thread_id;
  55. struct queue queue;
  56. void *thread_state;
  57. struct thread_data thread_data;
  58. };
  59. void *worker_thread (void *thread_data_in) {
  60. struct thread_data *thread_data = (struct thread_data *)thread_data_in;
  61. struct orf_token_mcast_thread_state *orf_token_mcast_thread_state =
  62. (struct orf_token_mcast_thread_state *)thread_data->thread_state;
  63. struct worker_thread *worker_thread =
  64. (struct worker_thread *)thread_data->data;
  65. void *data_for_worker_fn;
  66. for (;;) {
  67. pthread_mutex_lock (&worker_thread->new_work_mutex);
  68. if (queue_is_empty (&worker_thread->queue) == 1) {
  69. pthread_cond_wait (&worker_thread->new_work_cond,
  70. &worker_thread->new_work_mutex);
  71. }
  72. data_for_worker_fn = queue_item_get (&worker_thread->queue);
  73. worker_thread->worker_thread_group->worker_fn (orf_token_mcast_thread_state, data_for_worker_fn);
  74. queue_item_remove (&worker_thread->queue);
  75. pthread_mutex_unlock (&worker_thread->new_work_mutex);
  76. pthread_mutex_lock (&worker_thread->done_work_mutex);
  77. if (queue_is_empty (&worker_thread->queue) == 1) {
  78. pthread_cond_signal (&worker_thread->done_work_cond);
  79. }
  80. pthread_mutex_unlock (&worker_thread->done_work_mutex);
  81. }
  82. return (0);
  83. }
  84. int worker_thread_group_init (
  85. struct worker_thread_group *worker_thread_group,
  86. int threads,
  87. int items_max,
  88. int item_size,
  89. int thread_state_size,
  90. void (*thread_state_constructor)(void *),
  91. void (*worker_fn)(void *thread_state, void *work_item))
  92. {
  93. int i;
  94. worker_thread_group->threadcount = threads;
  95. worker_thread_group->last_scheduled = 0;
  96. worker_thread_group->worker_fn = worker_fn;
  97. worker_thread_group->threads = malloc (sizeof (struct worker_thread) *
  98. threads);
  99. if (worker_thread_group->threads == 0) {
  100. return (-1);
  101. }
  102. for (i = 0; i < threads; i++) {
  103. worker_thread_group->threads[i].thread_state = malloc (thread_state_size);
  104. thread_state_constructor (worker_thread_group->threads[i].thread_state);
  105. worker_thread_group->threads[i].worker_thread_group = worker_thread_group;
  106. pthread_mutex_init (&worker_thread_group->threads[i].new_work_mutex, NULL);
  107. pthread_cond_init (&worker_thread_group->threads[i].new_work_cond, NULL);
  108. pthread_mutex_init (&worker_thread_group->threads[i].done_work_mutex, NULL);
  109. pthread_cond_init (&worker_thread_group->threads[i].done_work_cond, NULL);
  110. queue_init (&worker_thread_group->threads[i].queue, items_max,
  111. item_size);
  112. worker_thread_group->threads[i].thread_data.thread_state =
  113. worker_thread_group->threads[i].thread_state;
  114. worker_thread_group->threads[i].thread_data.data = &worker_thread_group->threads[i];
  115. pthread_create (&worker_thread_group->threads[i].thread_id,
  116. NULL, worker_thread, &worker_thread_group->threads[i].thread_data);
  117. }
  118. return (0);
  119. }
  120. void worker_thread_group_work_add (
  121. struct worker_thread_group *worker_thread_group,
  122. void *item)
  123. {
  124. int schedule;
  125. schedule = (worker_thread_group->last_scheduled + 1) % (worker_thread_group->threadcount);
  126. worker_thread_group->last_scheduled = schedule;
  127. pthread_mutex_lock (&worker_thread_group->threads[schedule].new_work_mutex);
  128. queue_item_add (&worker_thread_group->threads[schedule].queue, item);
  129. pthread_cond_signal (&worker_thread_group->threads[schedule].new_work_cond);
  130. pthread_mutex_unlock (&worker_thread_group->threads[schedule].new_work_mutex);
  131. }
  132. void worker_thread_group_wait (
  133. struct worker_thread_group *worker_thread_group)
  134. {
  135. int i;
  136. for (i = 0; i < worker_thread_group->threadcount; i++) {
  137. pthread_mutex_lock (&worker_thread_group->threads[i].done_work_mutex);
  138. if (queue_is_empty (&worker_thread_group->threads[i].queue) == 0) {
  139. pthread_cond_wait (&worker_thread_group->threads[i].done_work_cond,
  140. &worker_thread_group->threads[i].done_work_mutex);
  141. }
  142. pthread_mutex_unlock (&worker_thread_group->threads[i].done_work_mutex);
  143. }
  144. }
  145. void worker_thread_group_exit (
  146. struct worker_thread_group *worker_thread_group)
  147. {
  148. int i;
  149. for (i = 0; i < worker_thread_group->threadcount; i++) {
  150. pthread_cancel (worker_thread_group->threads[i].thread_id);
  151. }
  152. }