signals.py 10 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287
  1. import logging
  2. from threading import local
  3. from django.contrib.contenttypes.models import ContentType
  4. from django.core.exceptions import ObjectDoesNotExist, ValidationError
  5. from django.db.models.fields.reverse_related import ManyToManyRel, ManyToOneRel
  6. from django.db.models.signals import m2m_changed, post_migrate, post_save, pre_delete
  7. from django.dispatch import receiver, Signal
  8. from django.core.signals import request_finished
  9. from django.utils.translation import gettext_lazy as _
  10. from django_prometheus.models import model_deletes, model_inserts, model_updates
  11. from core.choices import JobStatusChoices, ObjectChangeActionChoices
  12. from core.events import *
  13. from core.models import ObjectType
  14. from extras.events import enqueue_event
  15. from extras.utils import run_validators
  16. from netbox.config import get_config
  17. from netbox.context import current_request, events_queue
  18. from netbox.models.features import ChangeLoggingMixin, get_model_features, model_is_public
  19. from utilities.exceptions import AbortRequest
  20. from .models import ConfigRevision, DataSource, ObjectChange
  21. __all__ = (
  22. 'clear_events',
  23. 'job_end',
  24. 'job_start',
  25. 'post_sync',
  26. 'pre_sync',
  27. )
  28. # Job signals
  29. job_start = Signal()
  30. job_end = Signal()
  31. # DataSource signals
  32. pre_sync = Signal()
  33. post_sync = Signal()
  34. # Event signals
  35. clear_events = Signal()
  36. #
  37. # Object types
  38. #
  39. @receiver(post_migrate)
  40. def update_object_types(sender, **kwargs):
  41. """
  42. Create or update the corresponding ObjectType for each model within the migrated app.
  43. """
  44. for model in sender.get_models():
  45. app_label, model_name = model._meta.label_lower.split('.')
  46. # Determine whether model is public and its supported features
  47. is_public = model_is_public(model)
  48. features = get_model_features(model)
  49. # Create/update the ObjectType for the model
  50. try:
  51. ot = ObjectType.objects.get_by_natural_key(app_label=app_label, model=model_name)
  52. ot.public = is_public
  53. ot.features = features
  54. ot.save()
  55. except ObjectDoesNotExist:
  56. ObjectType.objects.create(
  57. app_label=app_label,
  58. model=model_name,
  59. public=is_public,
  60. features=features,
  61. )
  62. #
  63. # Change logging & event handling
  64. #
  65. # Used to track received signals per object
  66. _signals_received = local()
  67. @receiver((post_save, m2m_changed))
  68. def handle_changed_object(sender, instance, **kwargs):
  69. """
  70. Fires when an object is created or updated.
  71. """
  72. m2m_changed = False
  73. if not hasattr(instance, 'to_objectchange'):
  74. return
  75. # Get the current request, or bail if not set
  76. request = current_request.get()
  77. if request is None:
  78. return
  79. # Determine the type of change being made
  80. if kwargs.get('created'):
  81. event_type = OBJECT_CREATED
  82. elif 'created' in kwargs:
  83. event_type = OBJECT_UPDATED
  84. elif kwargs.get('action') in ['post_add', 'post_remove'] and kwargs['pk_set']:
  85. # m2m_changed with objects added or removed
  86. m2m_changed = True
  87. event_type = OBJECT_UPDATED
  88. else:
  89. return
  90. # Create/update an ObjectChange record for this change
  91. action = {
  92. OBJECT_CREATED: ObjectChangeActionChoices.ACTION_CREATE,
  93. OBJECT_UPDATED: ObjectChangeActionChoices.ACTION_UPDATE,
  94. OBJECT_DELETED: ObjectChangeActionChoices.ACTION_DELETE,
  95. }[event_type]
  96. objectchange = instance.to_objectchange(action)
  97. # If this is a many-to-many field change, check for a previous ObjectChange instance recorded
  98. # for this object by this request and update it
  99. if m2m_changed and (
  100. prev_change := ObjectChange.objects.filter(
  101. changed_object_type=ContentType.objects.get_for_model(instance),
  102. changed_object_id=instance.pk,
  103. request_id=request.id
  104. ).first()
  105. ):
  106. prev_change.postchange_data = objectchange.postchange_data
  107. prev_change.save()
  108. elif objectchange and objectchange.has_changes:
  109. objectchange.user = request.user
  110. objectchange.request_id = request.id
  111. objectchange.save()
  112. # Ensure that we're working with fresh M2M assignments
  113. if m2m_changed:
  114. instance.refresh_from_db()
  115. # Enqueue the object for event processing
  116. queue = events_queue.get()
  117. enqueue_event(queue, instance, request, event_type)
  118. events_queue.set(queue)
  119. # Increment metric counters
  120. if event_type == OBJECT_CREATED:
  121. model_inserts.labels(instance._meta.model_name).inc()
  122. elif event_type == OBJECT_UPDATED:
  123. model_updates.labels(instance._meta.model_name).inc()
  124. @receiver(pre_delete)
  125. def handle_deleted_object(sender, instance, **kwargs):
  126. """
  127. Fires when an object is deleted.
  128. """
  129. # Run any deletion protection rules for the object. Note that this must occur prior
  130. # to queueing any events for the object being deleted, in case a validation error is
  131. # raised, causing the deletion to fail.
  132. model_name = f'{sender._meta.app_label}.{sender._meta.model_name}'
  133. validators = get_config().PROTECTION_RULES.get(model_name, [])
  134. try:
  135. run_validators(instance, validators)
  136. except ValidationError as e:
  137. raise AbortRequest(
  138. _("Deletion is prevented by a protection rule: {message}").format(message=e)
  139. )
  140. # Get the current request, or bail if not set
  141. request = current_request.get()
  142. if request is None:
  143. return
  144. # Check whether we've already processed a pre_delete signal for this object. (This can
  145. # happen e.g. when both a parent object and its child are deleted simultaneously, due
  146. # to cascading deletion.)
  147. if not hasattr(_signals_received, 'pre_delete'):
  148. _signals_received.pre_delete = set()
  149. signature = (ContentType.objects.get_for_model(instance), instance.pk)
  150. if signature in _signals_received.pre_delete:
  151. return
  152. _signals_received.pre_delete.add(signature)
  153. # Record an ObjectChange if applicable
  154. if hasattr(instance, 'to_objectchange'):
  155. if hasattr(instance, 'snapshot') and not getattr(instance, '_prechange_snapshot', None):
  156. instance.snapshot()
  157. objectchange = instance.to_objectchange(ObjectChangeActionChoices.ACTION_DELETE)
  158. objectchange.user = request.user
  159. objectchange.request_id = request.id
  160. objectchange.save()
  161. # Django does not automatically send an m2m_changed signal for the reverse direction of a
  162. # many-to-many relationship (see https://code.djangoproject.com/ticket/17688), so we need to
  163. # trigger one manually. We do this by checking for any reverse M2M relationships on the
  164. # instance being deleted, and explicitly call .remove() on the remote M2M field to delete
  165. # the association. This triggers an m2m_changed signal with the `post_remove` action type
  166. # for the forward direction of the relationship, ensuring that the change is recorded.
  167. # Similarly, for many-to-one relationships, we set the value on the related object to None
  168. # and save it to trigger a change record on that object.
  169. for relation in instance._meta.related_objects:
  170. if type(relation) not in [ManyToManyRel, ManyToOneRel]:
  171. continue
  172. related_model = relation.related_model
  173. related_field_name = relation.remote_field.name
  174. if not issubclass(related_model, ChangeLoggingMixin):
  175. # We only care about triggering the m2m_changed signal for models which support
  176. # change logging
  177. continue
  178. for obj in related_model.objects.filter(**{related_field_name: instance.pk}):
  179. obj.snapshot() # Ensure the change record includes the "before" state
  180. if type(relation) is ManyToManyRel:
  181. getattr(obj, related_field_name).remove(instance)
  182. elif type(relation) is ManyToOneRel and relation.field.null is True:
  183. setattr(obj, related_field_name, None)
  184. # make sure the object hasn't been deleted - in case of
  185. # deletion chaining of related objects
  186. try:
  187. obj.refresh_from_db()
  188. except DoesNotExist:
  189. continue
  190. obj.save()
  191. # Enqueue the object for event processing
  192. queue = events_queue.get()
  193. enqueue_event(queue, instance, request, OBJECT_DELETED)
  194. events_queue.set(queue)
  195. # Increment metric counters
  196. model_deletes.labels(instance._meta.model_name).inc()
  197. @receiver(request_finished)
  198. def clear_signal_history(sender, **kwargs):
  199. """
  200. Clear out the signals history once the request is finished.
  201. """
  202. _signals_received.pre_delete = set()
  203. @receiver(clear_events)
  204. def clear_events_queue(sender, **kwargs):
  205. """
  206. Delete any queued events (e.g. because of an aborted bulk transaction)
  207. """
  208. logger = logging.getLogger('events')
  209. logger.info(f"Clearing {len(events_queue.get())} queued events ({sender})")
  210. events_queue.set({})
  211. #
  212. # DataSource handlers
  213. #
  214. @receiver(post_save, sender=DataSource)
  215. def enqueue_sync_job(instance, created, **kwargs):
  216. """
  217. When a DataSource is saved, check its sync_interval and enqueue a sync job if appropriate.
  218. """
  219. from .jobs import SyncDataSourceJob
  220. if instance.enabled and instance.sync_interval:
  221. SyncDataSourceJob.enqueue_once(instance, interval=instance.sync_interval)
  222. elif not created:
  223. # Delete any previously scheduled recurring jobs for this DataSource
  224. for job in SyncDataSourceJob.get_jobs(instance).defer('data').filter(
  225. interval__isnull=False,
  226. status=JobStatusChoices.STATUS_SCHEDULED
  227. ):
  228. # Call delete() per instance to ensure the associated background task is deleted as well
  229. job.delete()
  230. @receiver(post_sync)
  231. def auto_sync(instance, **kwargs):
  232. """
  233. Automatically synchronize any DataFiles with AutoSyncRecords after synchronizing a DataSource.
  234. """
  235. from .models import AutoSyncRecord
  236. for autosync in AutoSyncRecord.objects.filter(datafile__source=instance).prefetch_related('object'):
  237. autosync.object.sync(save=True)
  238. @receiver(post_save, sender=ConfigRevision)
  239. def update_config(sender, instance, **kwargs):
  240. """
  241. Update the cached NetBox configuration when a new ConfigRevision is created.
  242. """
  243. instance.activate()