xcad 5 ماه پیش
والد
کامیت
23f6fb8cac
100فایلهای تغییر یافته به همراه2073 افزوده شده و 1169 حذف شده
  1. 23 0
      --directory/compose.yaml
  2. 18 0
      --directory/config/files/middlewares.yaml
  3. 27 0
      --directory/config/files/routers.yaml
  4. 28 0
      --directory/config/files/services.yaml
  5. 24 0
      --directory/config/traefik.yaml
  6. 0 21
      archetypes/compose/archetypes.yaml
  7. 7 1
      archetypes/compose/service-deploy-v1.j2
  8. 7 3
      archetypes/compose/service-labels-v1.j2
  9. 22 2
      cli/core/display/__init__.py
  10. 1 0
      cli/core/display/display_base.py
  11. 2 1
      cli/core/display/display_settings.py
  12. 28 9
      cli/core/display/display_status.py
  13. 27 6
      cli/core/display/display_template.py
  14. 1 1
      cli/core/input/input_settings.py
  15. 11 8
      cli/core/input/prompt_manager.py
  16. 86 64
      cli/core/module/base_commands.py
  17. 10 4
      cli/core/repo.py
  18. 43 0
      cli/modules/ansible/spec_v1_0.py
  19. 6 0
      cli/modules/compose/spec_v1_2.py
  20. 10 3
      cli/modules/helm/spec_v1_0.py
  21. 80 0
      cli/modules/kubernetes/spec_v1_0.py
  22. 25 0
      library/ansible/checkmk-install-agent/playbook.yaml.j2
  23. 58 0
      library/ansible/checkmk-install-agent/template.yaml
  24. 29 0
      library/ansible/checkmk-manage-hosts/playbook.yaml.j2
  25. 63 0
      library/ansible/checkmk-manage-hosts/template.yaml
  26. 0 19
      library/ansible/checkmk/activate-changes.yaml
  27. 0 17
      library/ansible/checkmk/install-agent.yaml
  28. 0 25
      library/ansible/checkmk/lookup-rule.yaml
  29. 0 22
      library/ansible/checkmk/manage-hosts.yaml
  30. 0 71
      library/ansible/checkmk/manage-rules.yaml
  31. 0 3
      library/ansible/checkmk/secrets.yaml
  32. 0 76
      library/ansible/discord/notify-discord.yaml
  33. 17 8
      library/ansible/docker-certs-enable/playbook.yaml.j2
  34. 31 0
      library/ansible/docker-certs-enable/template.yaml
  35. 167 0
      library/ansible/docker-certs/playbook.yaml.j2
  36. 39 0
      library/ansible/docker-certs/template.yaml
  37. 12 3
      library/ansible/docker-install-ubuntu/playbook.yaml.j2
  38. 23 0
      library/ansible/docker-install-ubuntu/template.yaml
  39. 12 2
      library/ansible/docker-prune/playbook.yaml.j2
  40. 23 0
      library/ansible/docker-prune/template.yaml
  41. 0 158
      library/ansible/docker/docker-certs.yaml
  42. 0 46
      library/ansible/kubernetes/README.md
  43. 0 2
      library/ansible/kubernetes/ansible.cfg
  44. 0 318
      library/ansible/kubernetes/inst-k8s.yaml
  45. 0 1
      library/ansible/kubernetes/k8s_worker_node_connection.j2
  46. 0 21
      library/ansible/portainer/deploy-portainer.yaml
  47. 0 18
      library/ansible/traefik/deploy-traefik.yaml
  48. 28 0
      library/ansible/ubuntu-add-sshkey/playbook.yaml.j2
  49. 23 0
      library/ansible/ubuntu-add-sshkey/template.yaml
  50. 12 2
      library/ansible/ubuntu-apt-update/playbook.yaml.j2
  51. 25 0
      library/ansible/ubuntu-apt-update/template.yaml
  52. 11 2
      library/ansible/ubuntu-vm-core/playbook.yaml.j2
  53. 23 0
      library/ansible/ubuntu-vm-core/template.yaml
  54. 0 19
      library/ansible/ubuntu/config-add-sshkey.yaml
  55. 0 11
      library/ansible/ubuntu/inst-qemu-agent.yaml
  56. 0 12
      library/ansible/ubuntu/inst-zsh.yaml
  57. 0 25
      library/ansible/ubuntu/maint-diskspace.yaml
  58. 0 16
      library/ansible/ubuntu/maint-reboot-required.yaml
  59. 0 9
      library/ansible/ubuntu/maint-reboot.yaml
  60. 0 16
      library/ansible/wireguard/inst-wireguard.yaml
  61. 12 0
      library/compose/grafana/.env.j2
  62. 1 0
      library/compose/grafana/.env.secret.grafana_db_password.j2
  63. 1 0
      library/compose/grafana/.env.secret.grafana_oauth_client_id.j2
  64. 1 0
      library/compose/grafana/.env.secret.grafana_oauth_client_secret.j2
  65. 81 2
      library/compose/grafana/compose.yaml.j2
  66. 26 0
      library/compose/grafana/template.yaml
  67. 149 28
      library/compose/loki/compose.yaml.j2
  68. 19 4
      library/compose/loki/template.yaml
  69. 20 19
      library/compose/pihole/template.yaml
  70. 80 17
      library/compose/prometheus/compose.yaml.j2
  71. 2 0
      library/compose/prometheus/template.yaml
  72. 2 2
      library/compose/traefik/config/files/middlewares.yaml.j2
  73. 3 10
      library/compose/traefik/template.yaml
  74. 1 1
      library/helm/authentik/values.yaml.j2
  75. 15 0
      library/kubernetes/certmanager-certificate/certificate.yaml.j2
  76. 0 15
      library/kubernetes/certmanager-certificate/certmanager-certificate.yaml
  77. 37 0
      library/kubernetes/certmanager-certificate/template.yaml
  78. 0 17
      library/kubernetes/certmanager-clusterissuer/certmanager-clusterissuer.yaml
  79. 17 0
      library/kubernetes/certmanager-clusterissuer/clusterissuer.yaml.j2
  80. 42 0
      library/kubernetes/certmanager-clusterissuer/template.yaml
  81. 18 0
      library/kubernetes/certmanager-issuer/issuer.yaml.j2
  82. 45 0
      library/kubernetes/certmanager-issuer/template.yaml
  83. 12 0
      library/kubernetes/core-configmap/configmap.yaml.j2
  84. 21 0
      library/kubernetes/core-configmap/template.yaml
  85. 19 0
      library/kubernetes/core-ingress/ingress.yaml.j2
  86. 39 0
      library/kubernetes/core-ingress/template.yaml
  87. 11 0
      library/kubernetes/core-ingressclass/ingressclass.yaml.j2
  88. 29 0
      library/kubernetes/core-ingressclass/template.yaml
  89. 16 0
      library/kubernetes/core-persistentvolume/pv.yaml.j2
  90. 50 0
      library/kubernetes/core-persistentvolume/template.yaml
  91. 15 0
      library/kubernetes/core-persistentvolumeclaim/pvc.yaml.j2
  92. 38 0
      library/kubernetes/core-persistentvolumeclaim/template.yaml
  93. 0 9
      library/kubernetes/core-secret/core-secret.yaml
  94. 31 0
      library/kubernetes/core-secret/template.yaml
  95. 15 0
      library/kubernetes/core-service/service.yaml.j2
  96. 49 0
      library/kubernetes/core-service/template.yaml
  97. 6 0
      library/kubernetes/core-serviceaccount/serviceaccount.yaml.j2
  98. 21 0
      library/kubernetes/core-serviceaccount/template.yaml
  99. 8 0
      library/kubernetes/core-storageclass/storageclass.yaml.j2
  100. 39 0
      library/kubernetes/core-storageclass/template.yaml

+ 23 - 0
--directory/compose.yaml

@@ -0,0 +1,23 @@
+services:
+  grafana:
+    image: docker.io/grafana/grafana-oss:12.1.1
+    restart: unless-stopped
+    container_name: grafana
+    environment:
+      - TZ=Europe/Berlin
+      - UID=1000
+      - GID=1000
+    networks:
+      bridge:
+    ports:
+      - "3000:3000"
+    volumes:
+      - grafana-data:/var/lib/grafana
+
+volumes:
+  grafana-data:
+    driver: local
+
+networks:
+  bridge:
+    driver: bridge

+ 18 - 0
--directory/config/files/middlewares.yaml

@@ -0,0 +1,18 @@
+---
+# Traefik Dynamic Middleware Configuration
+# This file is watched by Traefik and changes are applied automatically
+
+http:
+  middlewares:
+# Production-Ready Security Headers Middleware
+    # Use in service labels: traefik.http.routers.myservice.middlewares=security-headers@file
+    security-headers:
+      headers:
+        frameDeny: true
+        browserXssFilter: true
+        contentTypeNosniff: true
+        sslRedirect: true
+        forceSTSHeader: true
+        stsSeconds: 31536000
+        stsIncludeSubdomains: true
+        stsPreload: true

+ 27 - 0
--directory/config/files/routers.yaml

@@ -0,0 +1,27 @@
+---
+# Traefik Dynamic Router Configuration
+# Define routers to route traffic to services
+# Uncomment and customize the examples below
+
+# http:
+#   routers:
+#     # Example 1: Simple host-based routing with HTTPS
+#     my-app:
+#       rule: "Host(`app.example.com`)"
+#       service: my-app-service
+#       entryPoints:
+#         - websecure
+#       tls:
+#         certResolver: cloudflare
+#
+#     # Example 2: Path-based routing with middleware
+#     api:
+#       rule: "Host(`example.com`) && PathPrefix(`/api`)"
+#       service: api-service
+#       priority: 10
+#       entryPoints:
+#         - websecure
+#       tls:
+#         certResolver: cloudflare
+#       middlewares:
+#         - rate-limit@file

+ 28 - 0
--directory/config/files/services.yaml

@@ -0,0 +1,28 @@
+---
+# Traefik Dynamic Service Configuration
+# Define backend services that routers connect to
+# Uncomment and customize the examples below
+
+# http:
+#   services:
+#     # Example 1: Single backend server
+#     my-app-service:
+#       loadBalancer:
+#         servers:
+#           - url: "http://192.168.1.100:8080"
+#
+#     # Example 2: Load balanced service with multiple backends
+#     api-service:
+#       loadBalancer:
+#         servers:
+#           - url: "http://192.168.1.10:8080"
+#           - url: "http://192.168.1.11:8080"
+#         sticky:
+#           cookie:
+#             name: api-sticky
+#             httpOnly: true
+#
+# # Server Transport for HTTPS backends with self-signed certificates
+# serversTransports:
+#   insecure:
+#     insecureSkipVerify: true

+ 24 - 0
--directory/config/traefik.yaml

@@ -0,0 +1,24 @@
+---
+global:
+  checkNewVersion: false
+  sendAnonymousUsage: false
+
+log:
+  level: INFO
+
+ping:
+  entryPoint: ping
+
+entryPoints:
+  ping:
+    address: :8082
+  web:
+    address: :80
+
+providers:
+  docker:
+    exposedByDefault: false
+    network: proxy
+  file:
+    directory: /etc/traefik/files
+    watch: true

+ 0 - 21
archetypes/compose/archetypes.yaml

@@ -15,24 +15,3 @@ vars:
     default: testapp-host
   traefik_host:
     default: testapp.home.arpa
-  
-  # Additional variables not in module spec
-  service_image:
-    type: str
-    default: testapp:latest
-    description: Docker image for the service
-  
-  service_port:
-    type: int
-    default: 80
-    description: Internal container port
-  
-  volume_name:
-    type: str
-    default: testapp-data
-    description: Volume name for persistent storage
-  
-  traefik_middleware:
-    type: str
-    default: none
-    description: Traefik middleware chain (comma-separated)

+ 7 - 1
archetypes/compose/service-deploy-v1.j2

@@ -23,16 +23,22 @@
       labels:
         - traefik.enable=true
         - traefik.docker.network={{ traefik_network }}
-        - traefik.http.services.{{ service_name }}-web.loadBalancer.server.port={{ service_port }}
+        - traefik.http.services.{{ service_name }}-web.loadBalancer.server.port=80
         - traefik.http.routers.{{ service_name }}-http.service={{ service_name }}-web
         - traefik.http.routers.{{ service_name }}-http.rule=Host(`{{ traefik_host }}`)
         - traefik.http.routers.{{ service_name }}-http.entrypoints={{ traefik_entrypoint }}
+        {% if authentik_enabled %}
+        - traefik.http.routers.{{ service_name }}-http.middlewares={{ authentik_traefik_middleware }}
+        {% endif %}
         {% if traefik_tls_enabled %}
         - traefik.http.routers.{{ service_name }}-https.service={{ service_name }}-web
         - traefik.http.routers.{{ service_name }}-https.rule=Host(`{{ traefik_host }}`)
         - traefik.http.routers.{{ service_name }}-https.entrypoints={{ traefik_tls_entrypoint }}
         - traefik.http.routers.{{ service_name }}-https.tls=true
         - traefik.http.routers.{{ service_name }}-https.tls.certresolver={{ traefik_tls_certresolver }}
+        {% if authentik_enabled %}
+        - traefik.http.routers.{{ service_name }}-https.middlewares={{ authentik_traefik_middleware }}
+        {% endif %}
         {% endif %}
       {% endif %}
     {% endif %}

+ 7 - 3
archetypes/compose/service-labels-v1.j2

@@ -2,17 +2,21 @@
     labels:
       - traefik.enable=true
       - traefik.docker.network={{ traefik_network }}
-      - traefik.http.services.{{ service_name }}-web.loadBalancer.server.port={{ service_port }}
+      - traefik.http.services.{{ service_name }}-web.loadBalancer.server.port=80
       - traefik.http.routers.{{ service_name }}-http.service={{ service_name }}-web
       - traefik.http.routers.{{ service_name }}-http.rule=Host(`{{ traefik_host }}`)
       - traefik.http.routers.{{ service_name }}-http.entrypoints={{ traefik_entrypoint }}
-      - traefik.http.routers.{{ service_name }}-http.middlewares={{ traefik_middleware }}
+      {% if authentik_enabled %}
+      - traefik.http.routers.{{ service_name }}-http.middlewares={{ authentik_traefik_middleware }}
+      {% endif %}
       {% if traefik_tls_enabled %}
       - traefik.http.routers.{{ service_name }}-https.service={{ service_name }}-web
       - traefik.http.routers.{{ service_name }}-https.rule=Host(`{{ traefik_host }}`)
       - traefik.http.routers.{{ service_name }}-https.entrypoints={{ traefik_tls_entrypoint }}
       - traefik.http.routers.{{ service_name }}-https.tls=true
       - traefik.http.routers.{{ service_name }}-https.tls.certresolver={{ traefik_tls_certresolver }}
-      - traefik.http.routers.{{ service_name }}-https.middlewares={{ traefik_middleware }}
+      {% if authentik_enabled %}
+      - traefik.http.routers.{{ service_name }}-https.middlewares={{ authentik_traefik_middleware }}
+      {% endif %}
       {% endif %}
     {% endif %}

+ 22 - 2
cli/core/display/__init__.py

@@ -62,6 +62,18 @@ class DisplayManager:
         """Display a heading."""
         return self.base.heading(text, style)
 
+    def section(self, title: str, description: str | None = None) -> None:
+        """Display a section header with optional description.
+
+        Args:
+            title: Section title
+            description: Optional section description
+        """
+        self.base.text("")
+        self.base.text(f"[bold cyan]{title}[/bold cyan]")
+        if description:
+            self.base.text(f"[dim]{description}[/dim]")
+
     def table(
         self,
         headers: list[str] | None = None,
@@ -115,9 +127,9 @@ class DisplayManager:
         return self.tables.data_table(columns, rows, title, row_formatter)
 
     # ===== Delegate to status display =====
-    def error(self, message: str, context: str | None = None) -> None:
+    def error(self, message: str, context: str | None = None, details: str | None = None) -> None:
         """Display an error message."""
-        return self.status.error(message, context)
+        return self.status.error(message, context, details)
 
     def warning(self, message: str, context: str | None = None) -> None:
         """Display a warning message."""
@@ -140,6 +152,14 @@ class DisplayManager:
         """Get lock icon."""
         return self.base.get_lock_icon()
 
+    def print_table(self, table) -> None:
+        """Print a pre-built Rich Table object.
+
+        Args:
+            table: Rich Table object to print
+        """
+        return self.base._print_table(table)
+
 
 # Export public API
 __all__ = [

+ 1 - 0
cli/core/display/display_base.py

@@ -45,6 +45,7 @@ class BaseDisplay:
         if style is None:
             style = self.settings.STYLE_HEADER
         console.print(f"[{style}]{text}[/{style}]")
+        console.print("")  # Add newline after heading
 
     def text(self, text: str, style: str | None = None) -> None:
         """Display plain text with optional styling.

+ 2 - 1
cli/core/display/display_settings.py

@@ -21,11 +21,12 @@ class DisplaySettings:
     COLOR_LIBRARY_STATIC = "yellow"
 
     # === Style Constants ===
-    STYLE_HEADER = "bold blue"
+    STYLE_HEADER = "bold white underline"
     STYLE_HEADER_ALT = "bold cyan"
     STYLE_DISABLED = "bright_black"
     STYLE_SECTION_TITLE = "bold cyan"
     STYLE_SECTION_DESC = "dim"
+    STYLE_TEMPLATE_NAME = "bold white"
 
     # Table styles
     STYLE_TABLE_HEADER = "bold blue"

+ 28 - 9
cli/core/display/display_status.py

@@ -83,14 +83,12 @@ class StatusDisplay:
             return
 
         settings = self.settings
-        icon = IconManager.get_status_icon(level)
         colors = {
             "error": settings.COLOR_ERROR,
             "warning": settings.COLOR_WARNING,
             "success": settings.COLOR_SUCCESS,
-            "info": settings.COLOR_INFO,
         }
-        color = colors.get(level, "white")
+        color = colors.get(level)
 
         # Format message based on context
         if context:
@@ -102,7 +100,14 @@ class StatusDisplay:
         else:
             text = f"{level.capitalize()}: {message}" if level in {"error", "warning"} else message
 
-        formatted_text = f"[{color}]{icon} {text}[/{color}]"
+        # Only use icons and colors for actual status indicators (error, warning, success)
+        # Plain info messages use default terminal color (no markup)
+        if level in {"error", "warning", "success"}:
+            icon = IconManager.get_status_icon(level)
+            formatted_text = f"[{color}]{icon} {text}[/{color}]"
+        else:
+            formatted_text = text
+
         if use_stderr:
             console_err.print(formatted_text)
         else:
@@ -118,14 +123,29 @@ class StatusDisplay:
         }
         log_methods.get(level, logger.info)(log_message)
 
-    def error(self, message: str, context: str | None = None) -> None:
+    def error(self, message: str, context: str | None = None, details: str | None = None) -> None:
         """Display an error message.
 
         Args:
             message: Error message
             context: Optional context
+            details: Optional additional details (shown in dim style on same line)
         """
-        self._display_message("error", message, context)
+        if details:
+            # Combine message and details on same line with different formatting
+            settings = self.settings
+            color = settings.COLOR_ERROR
+            icon = IconManager.get_status_icon("error")
+
+            # Format: Icon Error: Message (details in dim)
+            formatted = f"[{color}]{icon} Error: {message}[/{color}] [dim]({details})[/dim]"
+            console_err.print(formatted)
+
+            # Log at debug level to avoid duplicate console output (already printed to stderr)
+            logger.debug(f"Error displayed: {message} ({details})")
+        else:
+            # No details, use standard display
+            self._display_message("error", message, context)
 
     def warning(self, message: str, context: str | None = None) -> None:
         """Display a warning message.
@@ -161,11 +181,10 @@ class StatusDisplay:
             message: The main message to display
             reason: Optional reason why it was skipped
         """
-        icon = IconManager.get_status_icon("skipped")
         if reason:
-            self.base.text(f"\n{icon} {message} (skipped - {reason})", style="dim")
+            self.base.text(f"\n{message} (skipped - {reason})", style="dim")
         else:
-            self.base.text(f"\n{icon} {message} (skipped)", style="dim")
+            self.base.text(f"\n{message} (skipped)", style="dim")
 
     def markdown(self, content: str) -> None:
         """Render markdown content with left-aligned headings.

+ 27 - 6
cli/core/display/display_template.py

@@ -72,11 +72,32 @@ class TemplateDisplay:
         color = "yellow" if library_type == "static" else "blue"
         library_display = f"[{color}]{icon} {library_name}[/{color}]"
 
-        self.base.text(
-            f"{template_name} ({template_id} - [cyan]{version}[/cyan] - "
-            f"[magenta]schema {schema}[/magenta]) {library_display}",
-            style=settings.STYLE_HEADER,
-        )
+        # Create custom H1-style header with Rich markup support
+        from rich import box
+        from rich.console import Console
+        from rich.panel import Panel
+        from rich.text import Text
+
+        # Build header content with Rich formatting
+        header_content = Text()
+        header_content.append(template_name, style="bold white")
+        header_content.append(" (", style="white")
+        header_content.append("id:", style="white")
+        header_content.append(template_id, style="dim")
+        header_content.append(" │ ", style="dim")
+        header_content.append("version:", style="white")
+        header_content.append(version, style="cyan")
+        header_content.append(" │ ", style="dim")
+        header_content.append("schema:", style="white")
+        header_content.append(schema, style="magenta")
+        header_content.append(" │ ", style="dim")
+        header_content.append("library:", style="white")
+        header_content.append(icon + " ", style=color)
+        header_content.append(library_name, style=color)
+        header_content.append(")", style="white")
+
+        panel = Panel(header_content, box=box.HEAVY, style="markdown.h1.border")
+        Console().print(panel)
         self.base.text("")
         self.status.markdown(description)
 
@@ -118,7 +139,7 @@ class TemplateDisplay:
             existing_files: List of existing files that will be overwritten
         """
         self.base.text("")
-        self.base.text("Files to be generated:", style="bold")
+        self.base.heading("Files to be Generated")
 
         def get_file_generation_info(file_path_str):
             file_path = Path(file_path_str)

+ 1 - 1
cli/core/input/input_settings.py

@@ -13,7 +13,7 @@ class InputSettings:
     """
 
     # === Prompt Styles ===
-    PROMPT_STYLE = "cyan"
+    PROMPT_STYLE = "white"
     PROMPT_DEFAULT_STYLE = "dim"
     PROMPT_ERROR_STYLE = "red"
     PROMPT_SUCCESS_STYLE = "green"

+ 11 - 8
cli/core/input/prompt_manager.py

@@ -4,9 +4,10 @@ import logging
 from typing import Any, Callable
 
 from rich.console import Console
-from rich.prompt import Confirm, IntPrompt, Prompt
+from rich.prompt import IntPrompt, Prompt
 
 from ..display import DisplayManager
+from ..input import InputManager
 from ..template import Variable, VariableCollection
 
 logger = logging.getLogger(__name__)
@@ -33,7 +34,7 @@ class PromptHandler:
             return True
 
         current_value = toggle_var.convert(toggle_var.value)
-        new_value = self._prompt_variable(toggle_var, required=section.required)
+        new_value = self._prompt_variable(toggle_var, _required=section.required)
 
         if new_value != current_value:
             collected[toggle_var.name] = new_value
@@ -65,7 +66,7 @@ class PromptHandler:
     def _collect_variable_value(self, variable: Variable, section, collected: dict[str, Any]) -> None:
         """Collect a single variable value and update if changed."""
         current_value = variable.convert(variable.value)
-        new_value = self._prompt_variable(variable, required=section.required)
+        new_value = self._prompt_variable(variable, _required=section.required)
 
         if variable.autogenerated and new_value is None:
             collected[variable.name] = None
@@ -83,13 +84,14 @@ class PromptHandler:
         Returns:
             Dict of variable names to collected values
         """
-        if not Confirm.ask("Customize any settings?", default=False):
+        input_mgr = InputManager()
+        if not input_mgr.confirm("Customize any settings?", default=False):
             logger.info("User opted to keep all default values")
             return {}
 
         collected: dict[str, Any] = {}
 
-        for _section_key, section in variables.iter_active_sections(include_disabled=True):
+        for section_key, section in variables.get_sections().items():
             if not section.variables:
                 continue
 
@@ -173,7 +175,7 @@ class PromptHandler:
                 text,
                 variable.options or [],
                 default,
-                extra=getattr(variable, "extra", None),
+                _extra=getattr(variable, "extra", None),
             ),
         }
         return handlers.get(
@@ -196,10 +198,11 @@ class PromptHandler:
         return stripped if stripped else None
 
     def _prompt_bool(self, prompt_text: str, default: Any = None) -> bool | None:
+        input_mgr = InputManager()
         if default is None:
-            return Confirm.ask(prompt_text, default=None)
+            return input_mgr.confirm(prompt_text, default=None)
         converted = default if isinstance(default, bool) else str(default).lower() in ("true", "1", "yes", "on")
-        return Confirm.ask(prompt_text, default=converted)
+        return input_mgr.confirm(prompt_text, default=converted)
 
     def _prompt_int(self, prompt_text: str, default: Any = None) -> int | None:
         converted = None

+ 86 - 64
cli/core/module/base_commands.py

@@ -101,7 +101,6 @@ def list_templates(module_instance, raw: bool = False) -> list:
                     {"name": "Library", "no_wrap": True},
                 ],
                 rows=filtered_templates,
-                title=f"{module_instance.name.capitalize()} templates",
                 row_formatter=format_template_row,
             )
     else:
@@ -144,7 +143,6 @@ def search_templates(module_instance, query: str) -> list:
                 {"name": "Library", "no_wrap": True},
             ],
             rows=filtered_templates,
-            title=f"{module_instance.name.capitalize()} templates matching '{query}'",
             row_formatter=format_template_row,
         )
     else:
@@ -210,9 +208,13 @@ def check_output_directory(
     # Warn if directory is not empty
     if dir_not_empty:
         if interactive:
-            display.warning(f"Directory '{output_dir}' is not empty.")
+            display.text("")  # Add newline before warning
+            # Combine directory warning and file count on same line
+            warning_msg = f"Directory '{output_dir}' is not empty."
             if existing_files:
-                display.text(f"  {len(existing_files)} file(s) will be overwritten.")
+                warning_msg += f" {len(existing_files)} file(s) will be overwritten."
+            display.warning(warning_msg)
+            display.text("")  # Add newline after warning
 
             input_mgr = InputManager()
             if not input_mgr.confirm("Continue?", default=False):
@@ -229,23 +231,7 @@ def check_output_directory(
 
 def get_generation_confirmation(ctx: ConfirmationContext) -> bool:
     """Display file generation confirmation and get user approval."""
-    if not ctx.interactive:
-        return True
-
-    # Skip file confirmation display in dry-run mode
-    if not ctx.dry_run:
-        # Use templates.render_file_generation_confirmation directly for now
-        ctx.display.templates.render_file_generation_confirmation(
-            ctx.output_dir, ctx.rendered_files, ctx.existing_files if ctx.existing_files else None
-        )
-
-        # Final confirmation (only if we didn't already ask about overwriting)
-        if not ctx.dir_not_empty:
-            input_mgr = InputManager()
-            if not input_mgr.confirm("Generate these files?", default=True):
-                ctx.display.info("Generation cancelled")
-                return False
-
+    # No confirmation needed - either non-interactive, dry-run, or already confirmed during directory check
     return True
 
 
@@ -300,8 +286,12 @@ def execute_dry_run(
     rendered_files: dict[str, str],
     show_files: bool,
     display: DisplayManager,
-) -> None:
-    """Execute dry run mode - preview files without writing."""
+) -> tuple[int, int, str]:
+    """Execute dry run mode - preview files without writing.
+
+    Returns:
+        Tuple of (total_files, overwrite_files, size_str) for summary display
+    """
     _file_operations, total_size, _new_files, overwrite_files = _analyze_file_operations(output_dir, rendered_files)
     size_str = _format_size(total_size)
 
@@ -315,17 +305,8 @@ def execute_dry_run(
             display.text(content)
         display.text("")
 
-    # Show summary message
-    display.text("")
-    if overwrite_files > 0:
-        display.warning(
-            f"Dry run: {len(rendered_files)} files ({size_str}) would be written to '{output_dir}' "
-            f"({overwrite_files} files would be overwritten)"
-        )
-    else:
-        display.success(f"Dry run: {len(rendered_files)} files ({size_str}) would be written to '{output_dir}'")
-
     logger.info(f"Dry run completed for template '{id}' - {len(rendered_files)} files, {total_size} bytes")
+    return len(rendered_files), overwrite_files, size_str
 
 
 def write_generated_files(
@@ -342,11 +323,7 @@ def write_generated_files(
         full_path.parent.mkdir(parents=True, exist_ok=True)
         with full_path.open("w", encoding="utf-8") as f:
             f.write(content)
-        if not quiet:
-            display.success(f"Generated file: {file_path}")
 
-    if not quiet:
-        display.success(f"Template generated successfully in '{output_dir}'")
     logger.info(f"Template written to directory: {output_dir}")
 
 
@@ -408,6 +385,38 @@ def _determine_output_dir(directory: str | None, id: str) -> Path:
     return output_dir
 
 
+def _display_template_error(display: DisplayManager, template_id: str, error: TemplateRenderError) -> None:
+    """Display template rendering error with clean formatting."""
+    display.text("")
+    display.text("─" * 80, style="dim")
+    display.text("")
+
+    # Build details if available
+    details = None
+    if error.file_path:
+        details = error.file_path
+        if error.line_number:
+            details += f":line {error.line_number}"
+
+    # Display error with details
+    display.error(f"Failed to generate boilerplate from template '{template_id}'", details=details)
+
+
+def _display_generic_error(display: DisplayManager, template_id: str, error: Exception) -> None:
+    """Display generic error with clean formatting."""
+    display.text("")
+    display.text("─" * 80, style="dim")
+    display.text("")
+
+    # Truncate long error messages
+    error_msg = str(error)
+    if len(error_msg) > 100:
+        error_msg = error_msg[:100] + "..."
+
+    # Display error with details
+    display.error(f"Failed to generate boilerplate from template '{template_id}'", details=error_msg)
+
+
 def generate_template(module_instance, config: GenerationConfig) -> None:
     """Generate from template."""
     logger.info(f"Starting generation for template '{config.id}' from module '{module_instance.name}'")
@@ -448,14 +457,15 @@ def generate_template(module_instance, config: GenerationConfig) -> None:
                 return  # User cancelled
 
         # Execute generation (dry run or actual)
+        dry_run_stats = None
         if config.dry_run:
             if not config.quiet:
-                execute_dry_run(config.id, output_dir, rendered_files, config.show_files, display)
+                dry_run_stats = execute_dry_run(config.id, output_dir, rendered_files, config.show_files, display)
         else:
             write_generated_files(output_dir, rendered_files, config.quiet, display)
 
         # Display next steps (not in quiet mode)
-        if template.metadata.next_steps and not config.quiet and not config.dry_run:
+        if template.metadata.next_steps and not config.quiet:
             display.text("")
             display.heading("Next Steps")
             try:
@@ -467,11 +477,31 @@ def generate_template(module_instance, config: GenerationConfig) -> None:
                 # Fallback to plain text if rendering fails
                 display.status.markdown(template.metadata.next_steps)
 
+        # Display final status message at the end
+        if not config.quiet:
+            display.text("")
+            display.text("─" * 80, style="dim")
+
+            if config.dry_run and dry_run_stats:
+                total_files, overwrite_files, size_str = dry_run_stats
+                if overwrite_files > 0:
+                    display.warning(
+                        f"Dry run complete: {total_files} files ({size_str}) would be written to '{output_dir}' "
+                        f"({overwrite_files} would be overwritten)"
+                    )
+                else:
+                    display.success(
+                        f"Dry run complete: {total_files} files ({size_str}) would be written to '{output_dir}'"
+                    )
+            else:
+                # Actual generation completed
+                display.success(f"Boilerplate generated successfully in '{output_dir}'")
+
     except TemplateRenderError as e:
-        display.error(str(e), context=f"template '{config.id}'")
+        _display_template_error(display, config.id, e)
         raise Exit(code=1) from None
     except Exception as e:
-        display.error(str(e), context=f"generating template '{config.id}'")
+        _display_generic_error(display, config.id, e)
         raise Exit(code=1) from None
 
 
@@ -513,7 +543,7 @@ def _load_template_for_validation(module_instance, template_id: str, path: str |
     if template_id:
         try:
             template = module_instance._load_template_by_id(template_id)
-            module_instance.display.info(f"[bold]Validating template:[/bold] [cyan]{template_id}[/cyan]")
+            module_instance.display.info(f"Validating template: {template_id}")
             return template
         except Exception as e:
             module_instance.display.error(f"Failed to load template '{template_id}': {e}")
@@ -553,7 +583,7 @@ def _validate_single_template(module_instance, template, template_id: str, verbo
 def _run_semantic_validation(module_instance, template, verbose: bool) -> None:
     """Run semantic validation on rendered template files."""
     module_instance.display.info("")
-    module_instance.display.info("[bold cyan]Running semantic validation...[/bold cyan]")
+    module_instance.display.info("Running semantic validation...")
 
     registry = get_validator_registry()
     debug_mode = logger.isEnabledFor(logging.DEBUG)
@@ -564,7 +594,7 @@ def _run_semantic_validation(module_instance, template, verbose: bool) -> None:
         result = registry.validate_file(content, file_path)
 
         if result.errors or result.warnings or (verbose and result.info):
-            module_instance.display.info(f"\n[cyan]File:[/cyan] {file_path}")
+            module_instance.display.info(f"\nFile: {file_path}")
             result.display(f"{file_path}")
 
             if result.errors:
@@ -579,17 +609,17 @@ def _run_semantic_validation(module_instance, template, verbose: bool) -> None:
 
 def _display_validation_details(module_instance, template, semantic: bool) -> None:
     """Display verbose validation details."""
-    module_instance.display.info(f"\n[dim]Template path: {template.template_dir}[/dim]")
-    module_instance.display.info(f"[dim]Found {len(template.used_variables)} variables[/dim]")
+    module_instance.display.info(f"\nTemplate path: {template.template_dir}")
+    module_instance.display.info(f"Found {len(template.used_variables)} variables")
     if semantic:
         debug_mode = logger.isEnabledFor(logging.DEBUG)
         rendered_files, _ = template.render(template.variables, debug=debug_mode)
-        module_instance.display.info(f"[dim]Generated {len(rendered_files)} files[/dim]")
+        module_instance.display.info(f"Generated {len(rendered_files)} files")
 
 
 def _validate_all_templates(module_instance, verbose: bool) -> None:
     """Validate all templates in the module."""
-    module_instance.display.info(f"[bold]Validating all {module_instance.name} templates...[/bold]")
+    module_instance.display.info(f"Validating all {module_instance.name} templates...")
 
     valid_count = 0
     invalid_count = 0
@@ -617,25 +647,17 @@ def _validate_all_templates(module_instance, verbose: bool) -> None:
                 module_instance.display.warning(template.id)
 
     # Display summary
-    summary_rows = [
-        ("Total templates:", str(total)),
-        ("[green]Valid:[/green]", str(valid_count)),
-        ("[red]Invalid:[/red]", str(invalid_count)),
-    ]
-    module_instance.display.table(
-        headers=None,
-        rows=summary_rows,
-        title="Validation Summary",
-        show_header=False,
-        borderless=True,
-    )
+    module_instance.display.info("")
+    module_instance.display.info(f"Total templates: {total}")
+    module_instance.display.info(f"Valid: {valid_count}")
+    module_instance.display.info(f"Invalid: {invalid_count}")
 
     if errors:
         module_instance.display.info("")
-        module_instance.display.error("Validation Errors:")
         for template_id, error_msg in errors:
-            module_instance.display.info(f"\n[yellow]Template:[/yellow] [cyan]{template_id}[/cyan]")
-            module_instance.display.info(f"[dim]{error_msg}[/dim]")
+            module_instance.display.error(f"{template_id}: {error_msg}")
         raise Exit(code=1)
 
-    module_instance.display.success("All templates are valid!")
+    if total > 0:
+        module_instance.display.info("")
+        module_instance.display.success("All templates are valid")

+ 10 - 4
cli/core/repo.py

@@ -265,8 +265,10 @@ def _get_library_path_for_static(lib: dict, config: ConfigManager) -> Path:
     return library_path
 
 
-def _get_library_info(lib: dict, config: ConfigManager, libraries_path: Path) -> tuple[str, str, str, str, bool]:
+def _get_library_info(lib: dict, config: ConfigManager, libraries_path: Path) -> tuple[str, str, str, str, str, str]:
     """Extract library information based on type."""
+    from cli.core.display import IconManager
+
     name = lib.get("name", "")
     lib_type = lib.get("type", "git")
     enabled = lib.get("enabled", True)
@@ -277,6 +279,7 @@ def _get_library_info(lib: dict, config: ConfigManager, libraries_path: Path) ->
         directory = lib.get("directory", "library")
         library_path = _get_library_path_for_git(lib, libraries_path, name)
         exists = library_path.exists()
+        type_icon = IconManager.UI_LIBRARY_GIT
 
     elif lib_type == "static":
         url_or_path = lib.get("path", "")
@@ -284,6 +287,7 @@ def _get_library_info(lib: dict, config: ConfigManager, libraries_path: Path) ->
         directory = "-"
         library_path = _get_library_path_for_static(lib, config)
         exists = library_path.exists()
+        type_icon = IconManager.UI_LIBRARY_STATIC
 
     else:
         # Unknown type
@@ -291,6 +295,7 @@ def _get_library_info(lib: dict, config: ConfigManager, libraries_path: Path) ->
         branch = "-"
         directory = "-"
         exists = False
+        type_icon = "?"
 
     # Build status string
     status_parts = []
@@ -302,8 +307,9 @@ def _get_library_info(lib: dict, config: ConfigManager, libraries_path: Path) ->
         status_parts.append("[yellow]not found[/yellow]")
 
     status = " ".join(status_parts)
+    type_display = f"{type_icon} {lib_type}"
 
-    return url_or_path, branch, directory, lib_type, status
+    return url_or_path, branch, directory, type_display, type_icon, status
 
 
 @app.command()
@@ -333,10 +339,10 @@ def list() -> None:
 
     for lib in libraries:
         name = lib.get("name", "")
-        url_or_path, branch, directory, type_display, status = _get_library_info(lib, config, libraries_path)
+        url_or_path, branch, directory, type_display, type_icon, status = _get_library_info(lib, config, libraries_path)
         table.add_row(name, url_or_path, branch, directory, type_display, status)
 
-    display._print_table(table)
+    display.print_table(table)
 
 
 @app.command()

+ 43 - 0
cli/modules/ansible/spec_v1_0.py

@@ -6,11 +6,54 @@ spec = OrderedDict(
     {
         "general": {
             "title": "General",
+            "required": True,
             "vars": {
                 "playbook_name": {
                     "description": "Ansible playbook name",
                     "type": "str",
                 },
+                "target_hosts": {
+                    "description": "Target hosts pattern (e.g., 'all', 'webservers', or '{{ my_hosts | d([]) }}')",
+                    "type": "str",
+                    "default": "{{ my_hosts | d([]) }}",
+                },
+                "become": {
+                    "description": "Run tasks with privilege escalation (sudo)",
+                    "type": "bool",
+                    "default": False,
+                },
+            },
+        },
+        "options": {
+            "title": "Options",
+            "toggle": "options_enabled",
+            "vars": {
+                "options_enabled": {
+                    "description": "Enable additional playbook options",
+                    "type": "bool",
+                    "default": False,
+                },
+                "gather_facts": {
+                    "description": "Gather facts about target hosts",
+                    "type": "bool",
+                    "default": True,
+                },
+            },
+        },
+        "secrets": {
+            "title": "Secrets",
+            "toggle": "secrets_enabled",
+            "vars": {
+                "secrets_enabled": {
+                    "description": "Use external secrets file",
+                    "type": "bool",
+                    "default": False,
+                },
+                "secrets_file": {
+                    "description": "Path to secrets file",
+                    "type": "str",
+                    "default": "secrets.yaml",
+                },
             },
         },
     }

+ 6 - 0
cli/modules/compose/spec_v1_2.py

@@ -384,6 +384,12 @@ spec = OrderedDict(
                     "type": "str",
                     "sensitive": True,
                 },
+                "authentik_traefik_middleware": {
+                    "description": "Traefik middleware name for Authentik authentication",
+                    "type": "str",
+                    "default": "authentik-middleware@file",
+                    "needs": "traefik_enabled=true",
+                },
             },
         },
     }

+ 10 - 3
cli/modules/helm/spec_v1_0.py

@@ -67,10 +67,17 @@ spec = OrderedDict(
                     "type": "bool",
                     "default": False,
                 },
-                "traefik_tls_certmanager_issuer": {
-                    "description": "Cert-manager ClusterIssuer name",
+                "certmanager_issuer": {
+                    "description": "Cert-manager ClusterIssuer or Issuer name",
                     "type": "str",
-                    "default": "letsencrypt-prod",
+                    "default": "cloudflare",
+                    "needs": "traefik_tls_certmanager=true",
+                },
+                "certmanager_issuer_kind": {
+                    "description": "Issuer kind",
+                    "type": "enum",
+                    "options": ["ClusterIssuer", "Issuer"],
+                    "default": "ClusterIssuer",
                     "needs": "traefik_tls_certmanager=true",
                 },
             },

+ 80 - 0
cli/modules/kubernetes/spec_v1_0.py

@@ -7,6 +7,10 @@ spec = OrderedDict(
         "general": {
             "title": "General",
             "vars": {
+                "resource_name": {
+                    "description": "Resource name (metadata.name)",
+                    "type": "str",
+                },
                 "namespace": {
                     "description": "Kubernetes namespace",
                     "type": "str",
@@ -14,5 +18,81 @@ spec = OrderedDict(
                 },
             },
         },
+        "traefik": {
+            "title": "Traefik",
+            "toggle": "traefik_enabled",
+            "description": "Traefik IngressRoute configuration for HTTP/HTTPS routing",
+            "vars": {
+                "traefik_enabled": {
+                    "description": "Enable Traefik IngressRoute",
+                    "type": "bool",
+                    "default": False,
+                },
+                "traefik_entrypoint": {
+                    "description": "Traefik entrypoint (non-TLS)",
+                    "type": "str",
+                    "default": "web",
+                },
+                "traefik_host": {
+                    "description": "Domain name for the service (e.g., app.example.com)",
+                    "type": "hostname",
+                },
+                "traefik_service_name": {
+                    "description": "Backend Kubernetes service name",
+                    "type": "str",
+                },
+                "traefik_service_port": {
+                    "description": "Backend service port",
+                    "type": "int",
+                    "default": 80,
+                },
+            },
+        },
+        "traefik_tls": {
+            "title": "Traefik TLS/SSL",
+            "toggle": "traefik_tls_enabled",
+            "needs": "traefik",
+            "description": "Enable HTTPS/TLS for Traefik with certificate management",
+            "vars": {
+                "traefik_tls_enabled": {
+                    "description": "Enable HTTPS/TLS",
+                    "type": "bool",
+                    "default": True,
+                },
+                "traefik_tls_entrypoint": {
+                    "description": "TLS entrypoint",
+                    "type": "str",
+                    "default": "websecure",
+                },
+                "traefik_tls_certresolver": {
+                    "description": "Traefik certificate resolver name",
+                    "type": "str",
+                    "default": "cloudflare",
+                },
+            },
+        },
+        "certmanager": {
+            "title": "Cert-Manager",
+            "toggle": "certmanager_enabled",
+            "description": "Cert-manager certificate management configuration",
+            "vars": {
+                "certmanager_enabled": {
+                    "description": "Enable cert-manager certificate",
+                    "type": "bool",
+                    "default": False,
+                },
+                "certmanager_issuer": {
+                    "description": "ClusterIssuer or Issuer name",
+                    "type": "str",
+                    "default": "cloudflare",
+                },
+                "certmanager_issuer_kind": {
+                    "description": "Issuer kind",
+                    "type": "enum",
+                    "options": ["ClusterIssuer", "Issuer"],
+                    "default": "ClusterIssuer",
+                },
+            },
+        },
     }
 )

+ 25 - 0
library/ansible/checkmk-install-agent/playbook.yaml.j2

@@ -0,0 +1,25 @@
+---
+- name: {{ playbook_name }}
+  hosts: {{ target_hosts }}
+{% if become %}
+  become: true
+{% endif %}
+{% if options_enabled and not gather_facts %}
+  gather_facts: false
+{% endif %}
+  roles:
+    - checkmk.general.agent
+  vars:
+    checkmk_agent_version: {{ checkmk_version }}
+    checkmk_agent_server: {{ checkmk_server }}
+    checkmk_agent_server_protocol: {{ checkmk_protocol }}
+    checkmk_agent_site: {{ checkmk_site }}
+    checkmk_agent_auto_activate: {{ checkmk_auto_activate | lower }}
+    checkmk_agent_tls: "{{ checkmk_tls | lower }}"
+    checkmk_agent_user: {{ '{{' }} automation_user {{ '}}' }}
+    checkmk_agent_pass: {{ '{{' }} automation_secret {{ '}}' }}
+    checkmk_agent_host_name: {{ '{{' }} ansible_hostname {{ '}}' }}
+{% if secrets_enabled %}
+  vars_files:
+    - {{ secrets_file }}
+{% endif %}

+ 58 - 0
library/ansible/checkmk-install-agent/template.yaml

@@ -0,0 +1,58 @@
+---
+kind: ansible
+schema: "1.0"
+metadata:
+  name: Install Checkmk Agent
+  description: >
+    Ansible playbook to install Checkmk monitoring agent on hosts.
+    Uses the checkmk.general.agent role with automatic registration.
+
+
+    Project: https://checkmk.com
+
+    Documentation: https://docs.checkmk.com/
+  version: 2.4.0
+  author: Christian Lempa
+  date: '2025-11-11'
+spec:
+  general:
+    vars:
+      playbook_name:
+        default: Install Checkmk agent on all hosts
+      become:
+        default: false
+  secrets:
+    vars:
+      secrets_enabled:
+        default: true
+  checkmk:
+    title: Checkmk Configuration
+    required: true
+    vars:
+      checkmk_version:
+        type: str
+        description: Checkmk agent version
+        default: 2.4.0p4
+      checkmk_server:
+        type: hostname
+        description: Checkmk server hostname
+        default: checkmk.home.arpa
+      checkmk_protocol:
+        type: enum
+        description: Server protocol
+        options:
+          - https
+          - http
+        default: https
+      checkmk_site:
+        type: str
+        description: Checkmk site name
+        default: cmk
+      checkmk_auto_activate:
+        type: bool
+        description: Auto-activate changes
+        default: true
+      checkmk_tls:
+        type: bool
+        description: Enable TLS for agent
+        default: true

+ 29 - 0
library/ansible/checkmk-manage-hosts/playbook.yaml.j2

@@ -0,0 +1,29 @@
+---
+- name: {{ playbook_name }}
+  hosts: {{ target_hosts }}
+{% if become %}
+  become: true
+{% endif %}
+{% if options_enabled and not gather_facts %}
+  gather_facts: false
+{% endif %}
+{% if secrets_enabled %}
+  vars_files:
+    - {{ secrets_file }}
+{% endif %}
+  vars:
+    server_url: {{ checkmk_server_url }}
+    site: {{ checkmk_site }}
+
+  tasks:
+    - name: "Create host"
+      checkmk.general.host:
+        server_url: {{ '{{' }} server_url {{ '}}' }}
+        site: {{ '{{' }} site {{ '}}' }}
+        automation_user: {{ '{{' }} automation_user {{ '}}' }}
+        automation_secret: {{ '{{' }} automation_secret {{ '}}' }}
+        name: {{ host_name }}
+        attributes:
+          ipaddress: {{ host_ip }}
+        folder: {{ host_folder }}
+        state: "present"

+ 63 - 0
library/ansible/checkmk-manage-hosts/template.yaml

@@ -0,0 +1,63 @@
+---
+kind: ansible
+schema: "1.0"
+metadata:
+  name: Manage Checkmk Hosts
+  description: >
+    Ansible playbook to manage hosts in Checkmk monitoring.
+    Create or update host configuration in Checkmk.
+
+
+    Project: https://checkmk.com
+
+    Documentation: https://docs.checkmk.com/
+  version: 2.4.0
+  author: Christian Lempa
+  date: '2025-11-11'
+spec:
+  general:
+    vars:
+      playbook_name:
+        default: Manage Checkmk hosts
+      target_hosts:
+        default: localhost
+      become:
+        default: false
+  options:
+    vars:
+      options_enabled:
+        default: true
+      gather_facts:
+        default: false
+  secrets:
+    vars:
+      secrets_enabled:
+        default: true
+  checkmk:
+    title: Checkmk Configuration
+    required: true
+    vars:
+      checkmk_server_url:
+        type: hostname
+        description: Checkmk server URL
+        default: checkmk.home.arpa
+      checkmk_site:
+        type: str
+        description: Checkmk site name
+        default: cmk
+  host:
+    title: Host Configuration
+    required: true
+    vars:
+      host_name:
+        type: str
+        description: Hostname to add to Checkmk
+        default: your-host-name
+      host_ip:
+        type: str
+        description: IP address of the host
+        default: host-ip-address
+      host_folder:
+        type: str
+        description: Folder path in Checkmk
+        default: /

+ 0 - 19
library/ansible/checkmk/activate-changes.yaml

@@ -1,19 +0,0 @@
----
-- name: "Activate Checkmk changes"
-  hosts: localhost
-  gather_facts: false
-  vars_files:
-    - secrets.yaml
-  vars:
-    server_url: "checkmk.home.arpa"
-    site: "cmk"
-
-  tasks:
-    - name: "Start activation on a specific site"
-      checkmk.general.activation:
-        server_url: "{{ server_url }}"
-        site: "{{ site }}"
-        automation_user: "{{ automation_user }}"
-        automation_secret: "{{ automation_secret }}"
-        sites:
-          - "{{ site }}"

+ 0 - 17
library/ansible/checkmk/install-agent.yaml

@@ -1,17 +0,0 @@
----
-- name: "Install Checkmk agent on all hosts"
-  hosts: "{{ my_hosts | d([]) }}"
-  roles:
-    - checkmk.general.agent
-  vars:
-    checkmk_agent_version: "2.4.0p4"
-    checkmk_agent_server: "checkmk.home.arpa"
-    checkmk_agent_server_protocol: https
-    checkmk_agent_site: "cmk"
-    checkmk_agent_auto_activate: true
-    checkmk_agent_tls: "true"  # NOTE: Register Agent to enable TLS
-    checkmk_agent_user: "{{ automation_user }}"
-    checkmk_agent_pass: "{{ automation_secret }}"
-    checkmk_agent_host_name: "{{ ansible_hostname }}"  # NOTE: Required to replace FQDN with hostname only
-  vars_files:
-    - secrets.yaml

+ 0 - 25
library/ansible/checkmk/lookup-rule.yaml

@@ -1,25 +0,0 @@
----
-- name: "Manage Checkmk rules"
-  hosts: localhost
-  gather_facts: false
-  vars_files:
-    - secrets.yaml
-  vars:
-    server_url: "checkmk.home.arpa"
-    site: "cmk"
-
-  tasks:
-    - name: Get a rule with a particular rule id
-      ansible.builtin.debug:
-        msg: "Rule: {{ extensions | to_nice_yaml }}"
-      vars:
-        extensions: "{{
-          lookup('checkmk.general.rule',
-            rule_id='checkmk-rule-id',
-            server_url=server_url,
-            site=site,
-            automation_user=automation_user,
-            automation_secret=automation_secret,
-            validate_certs=False
-            )
-          }}"

+ 0 - 22
library/ansible/checkmk/manage-hosts.yaml

@@ -1,22 +0,0 @@
----
-- name: "Manage Checkmk hosts"
-  hosts: localhost
-  gather_facts: false
-  vars_files:
-    - secrets.yaml
-  vars:
-    server_url: "checkmk.home.arpa"
-    site: "cmk"
-
-  tasks:
-    - name: "Create host"
-      checkmk.general.host:
-        server_url: "{{ server_url }}"
-        site: "{{ site }}"
-        automation_user: "{{ automation_user }}"
-        automation_secret: "{{ automation_secret }}"
-        name: "your-host-name"
-        attributes:
-          ipaddress: "host-ip-address"
-        folder: "/"
-        state: "present"

+ 0 - 71
library/ansible/checkmk/manage-rules.yaml

@@ -1,71 +0,0 @@
----
-- name: "Manage Checkmk rules"
-  hosts: localhost
-  gather_facts: false
-  vars_files:
-    - secrets.yaml
-  vars:
-    server_url: "checkmk.home.arpa"
-    site: "cmk"
-
-  tasks:
-    - name: Create DNS Check Rule
-      checkmk.general.rule:
-        server_url: "{{ server_url }}"
-        site: "{{ site }}"
-        automation_user: "{{ automation_user }}"
-        automation_secret: "{{ automation_secret }}"
-        ruleset: "active_checks:dns"
-        rule:
-          properties: {
-            "comment": "Ansible managed",
-            "description": "DNS DNS Monitoring",
-            "disabled": false,
-          }
-          conditions: {
-            "host_label_groups": [],
-            "host_name": {
-              "match_on": [
-                "your-dns-container-host"
-              ],
-              "operator": "one_of"
-            },
-            "host_tags": [],
-            "service_label_groups": []
-          }
-          "value_raw": {
-            "hostname": "hostname-to-query",
-            "server": "dns-server-ip",
-            "expected_addresses_list": [
-              "expected-ip-address"
-            ]
-          }
-          location:
-            folder: "/"
-            position: "top"
-        state: "present"
-
-    - name: Create NVME Temperature override rule
-      checkmk.general.rule:
-        server_url: "{{ server_url }}"
-        site: "{{ site }}"
-        automation_user: "{{ automation_user }}"
-        automation_secret: "{{ automation_secret }}"
-        ruleset: "checkgroup_parameters:temperature"
-        rule:
-          conditions:
-            host_label_groups: []
-            host_tags: []
-            service_description:
-              match_on:
-                - "DRIVE MODEL NAME*"
-              operator: "one_of"
-            service_label_groups: []
-          location:
-            folder: "/"
-            position: "top"
-          properties:
-            description: "NVME Temperature override"
-            disabled: false
-          value_raw: "{'levels': (60.0, 80.0)}"
-        state: present

+ 0 - 3
library/ansible/checkmk/secrets.yaml

@@ -1,3 +0,0 @@
----
-automation_user: "your-checkmk-user"
-automation_secret: "your-checkmk-password"

+ 0 - 76
library/ansible/discord/notify-discord.yaml

@@ -1,76 +0,0 @@
----
-# This Ansible playbook demonstrates how to send Discord notifications
-# using the `community.general.discord` module.
-# https://docs.ansible.com/ansible/latest/collections/community/general/discord_module.html
-#
-# If you need guidance how to create your own Discord server, see
-# https://support.discord.com/hc/en-us/articles/204849977-How-do-I-create-a-server
-#
-# In order to generate a webhook, please see
-# https://support.discord.com/hc/en-us/articles/360045093012-Server-Integrations-Page
-
-- name: Notify discord
-
-  hosts: "{{ my_hosts | d([]) }}"
-
-  vars:
-    # The name that will be shown as sender of the notification. Note
-    # that some usernames are blocked by Discord, for example it must
-    # not contain the word `discord`.
-    notify_discord_username: Ansible
-
-    # Your Discord webhook URL should have following format. Please
-    # extract following segments of the URL path and set it as value of
-    # the following variables:
-    #
-    # https://discord.com/api/webhooks/nnnnnnnnnn/xxxxxxxxxxxxxxxxxxxxxxxxxxx
-    #                                  |        | |                         |
-    #   notify_discord_webhook_id <----'--------' |                         |
-    #                                             |                         |
-    #   notify_discord_webhook_token <------------'-------------------------'
-    #
-    # Security advise: if you commit this data to a repository it is
-    # strongly recommended to encrypt `notify_discord_webhook_token` using
-    # Ansible Vault.
-    notify_discord_webhook_id: ''
-    notify_discord_webhook_token: ''
-
-    # Do not modify following regular expressions unless you know what
-    # you're doing. Those are to ensure that whatever you've set as
-    # `notify_discord_webhook_id` and `notify_discord_webhook_token`
-    # complies with the Discord API Specification (as of 2024-02-25).
-    #
-    # https://github.com/discord/discord-api-spec/blob/fe9917381e47285b56d98cb72ae3cfe7db9ea19c/specs/openapi.json#L7524-L7531
-    # https://github.com/discord/discord-api-spec/blob/fe9917381e47285b56d98cb72ae3cfe7db9ea19c/specs/openapi.json#L24817-L24821
-    notify_discord_webhook_id_regex: '^0|[1-9][0-9]*$'
-    # https://github.com/discord/discord-api-spec/blob/fe9917381e47285b56d98cb72ae3cfe7db9ea19c/specs/openapi.json#L7532-L7541
-    notify_discord_webhook_token_regex: '^[a-zA-Z0-9_-]+$'
-
-    # The content of the notification
-    notify_discord_webhook_content: |-
-      **Message from `{{ inventory_hostname }}` by *Ansible* ** :tada:
-      Just a test, adjust it to your liking.
-
-      You can use any Markdown formatting here [supported by Discord](
-      https://support.discord.com/hc/en-us/articles/210298617-Markdown-Text-101-Chat-Formatting-Bold-Italic-Underline).
-
-    # Delegate the sending of the Dicord notification to following host
-    # which must be able to access the public internet on destination
-    # port 443/tcp. When `localhost` is specified, this is sent from
-    # the Ansible Controller, but you can pick any host listed in the
-    # Ansible inventory.
-    notify_discord_send_from_host: localhost
-
-  tasks:
-    - name: Send Discord message
-      community.general.discord:
-        username: "{{ notify_discord_username }}"
-        webhook_id: "{{ notify_discord_webhook_id }}"
-        webhook_token: "{{ notify_discord_webhook_token }}"
-        content: "{{ notify_discord_webhook_content }}"
-      delegate_to: "{{ notify_discord_send_from_host }}"
-      when:
-        - notify_discord_webhook_id is match(notify_discord_webhook_id_regex)
-        - notify_discord_webhook_token is match(notify_discord_webhook_token_regex)
-        - notify_discord_webhook_content | length > 0
-        - notify_discord_send_from_host is in (['localhost'] + groups['all'])

+ 17 - 8
library/ansible/docker/docker-certs-enable.yaml → library/ansible/docker-certs-enable/playbook.yaml.j2

@@ -1,14 +1,23 @@
 ---
-- name: "Docker Certs enable"
-  hosts: "{{ my_hosts | d([]) }}"
+- name: {{ playbook_name }}
+  hosts: {{ target_hosts }}
+{% if become %}
   become: true
+{% endif %}
+{% if options_enabled and not gather_facts %}
+  gather_facts: false
+{% endif %}
+{% if secrets_enabled %}
+  vars_files:
+    - {{ secrets_file }}
+{% endif %}
   vars:
-    certs_path: "/root/docker-certs"
+    certs_path: {{ certs_path }}
 
   tasks:
     - name: Check if docker certs are existing
       ansible.builtin.stat:
-        path: "{{ certs_path }}"
+        path: {{ '{{' }} certs_path {{ '}}' }}
       register: certs_dir
 
     - name: Fail if docker certs are not existing
@@ -22,12 +31,12 @@
 
     - name: Set machine's primary internal ip address
       ansible.builtin.set_fact:
-        ip_address: "{{ ip_address.ansible_facts.ansible_default_ipv4.address }}"
+        ip_address: {{ '{{' }} ip_address.ansible_facts.ansible_default_ipv4.address {{ '}}' }}
 
     - name: Check if ip_address is a valid ip address
       ansible.builtin.assert:
         that:
-          - ip_address is match("^(?:[0-9]{1,3}\.){3}[0-9]{1,3}$")
+          - ip_address is match("^(?:[0-9]{1,3}\\.){3}[0-9]{1,3}$")
         fail_msg: "ip_address is not a valid ip address."
         success_msg: "ip_address is a valid ip address."
 
@@ -36,8 +45,8 @@
         path: /lib/systemd/system/docker.service
         line: >
           ExecStart=/usr/bin/dockerd -H fd:// --containerd=/run/containerd/containerd.sock
-          -H tcp://{{ ip_address }}:2376 --tlsverify --tlscacert={{ certs_path }}/ca.pem
-          --tlscert={{ certs_path }}/server-cert.pem --tlskey={{ certs_path }}/server-key.pem
+          -H tcp://{{ '{{' }} ip_address {{ '}}' }}:2376 --tlsverify --tlscacert={{ '{{' }} certs_path {{ '}}' }}/ca.pem
+          --tlscert={{ '{{' }} certs_path {{ '}}' }}/server-cert.pem --tlskey={{ '{{' }} certs_path {{ '}}' }}/server-key.pem
         regexp: '^ExecStart='
         state: present
 

+ 31 - 0
library/ansible/docker-certs-enable/template.yaml

@@ -0,0 +1,31 @@
+---
+kind: ansible
+schema: "1.0"
+metadata:
+  name: Enable Docker TLS
+  description: >
+    Ansible playbook to enable TLS on Docker daemon using existing certificates.
+    Configures Docker to use TLS for secure remote access.
+
+
+    Project: https://www.docker.com
+
+    Documentation: https://docs.docker.com/engine/security/protect-access/
+  version: 1.0.0
+  author: Christian Lempa
+  date: '2025-11-11'
+spec:
+  general:
+    vars:
+      playbook_name:
+        default: Docker Certs enable
+      become:
+        default: true
+  certificates:
+    title: Certificate Configuration
+    required: true
+    vars:
+      certs_path:
+        type: str
+        description: Path where certificates are stored
+        default: /root/docker-certs

+ 167 - 0
library/ansible/docker-certs/playbook.yaml.j2

@@ -0,0 +1,167 @@
+---
+- name: {{ playbook_name }}
+  hosts: {{ target_hosts }}
+{% if become %}
+  become: true
+{% endif %}
+{% if options_enabled and not gather_facts %}
+  gather_facts: false
+{% endif %}
+{% if secrets_enabled %}
+  vars_files:
+    - {{ secrets_file }}
+{% endif %}
+  vars:
+    certs_path: {{ certs_path }}
+    cert_validity_days: {{ cert_validity_days }}
+    cn_domain: {{ cn_domain }}
+
+  tasks:
+    - name: Check if docker certs are existing
+      ansible.builtin.stat:
+        path: {{ '{{' }} certs_path {{ '}}' }}
+      register: certs_dir
+
+    - name: Create docker certs directory (if needed)
+      ansible.builtin.file:
+        path: {{ '{{' }} certs_path {{ '}}' }}
+        state: directory
+        mode: '0700'
+      when: not certs_dir.stat.exists
+
+    - name: Check if docker certs directory is empty
+      ansible.builtin.command: ls -A {{ '{{' }} certs_path {{ '}}' }}
+      register: certs_list
+      when: certs_dir.stat.exists
+      changed_when: false
+      ignore_errors: true
+
+    - name: Fail if docker certs already exist
+      ansible.builtin.fail:
+        msg: "Docker certificates already exist in /root/docker-certs."
+      when: certs_list.stdout | default('') != ''
+
+    - name: Get machine's primary internal ip address from eth0 interface
+      ansible.builtin.setup:
+      register: ip_address
+
+    - name: Set machine's primary internal ip address
+      ansible.builtin.set_fact:
+        ip_address: {{ '{{' }} ip_address.ansible_facts.ansible_default_ipv4.address {{ '}}' }}
+
+    - name: Check if ip_address is a valid ip address
+      ansible.builtin.assert:
+        that:
+          - ip_address is match("^(?:[0-9]{1,3}\\.){3}[0-9]{1,3}$")
+        fail_msg: "ip_address is not a valid ip address."
+        success_msg: "ip_address is a valid ip address."
+
+    - name: Generate CA private key
+      ansible.builtin.command:
+        cmd: >
+          openssl genrsa -out {{ '{{' }} certs_path {{ '}}' }}/ca-key.pem 4096
+      args:
+        creates: {{ '{{' }} certs_path {{ '}}' }}/ca-key.pem
+
+    - name: Generate CA certificate
+      ansible.builtin.command:
+        cmd: >
+          openssl req -sha256 -new -x509
+            -subj "/CN={{ '{{' }} cn_domain {{ '}}' }}"
+            -days {{ '{{' }} cert_validity_days {{ '}}' }}
+            -key {{ '{{' }} certs_path {{ '}}' }}/ca-key.pem
+            -out {{ '{{' }} certs_path {{ '}}' }}/ca.pem
+      args:
+        creates: {{ '{{' }} certs_path {{ '}}' }}/ca.pem
+
+    - name: Generate server private key
+      ansible.builtin.command:
+        cmd: >
+          openssl genrsa -out {{ '{{' }} certs_path {{ '}}' }}/server-key.pem 4096
+        creates: {{ '{{' }} certs_path {{ '}}' }}/server-key.pem
+
+    - name: Generate server certificate signing request
+      ansible.builtin.command:
+        cmd: >
+          openssl req -sha256 -new
+            -subj "/CN={{ '{{' }} inventory_hostname {{ '}}' }}"
+            -key {{ '{{' }} certs_path {{ '}}' }}/server-key.pem
+            -out {{ '{{' }} certs_path {{ '}}' }}/server.csr
+        creates: {{ '{{' }} certs_path {{ '}}' }}/server.csr
+
+    - name: Generate server certificate extension file
+      ansible.builtin.shell: |
+        echo "subjectAltName = DNS:{{ '{{' }} inventory_hostname {{ '}}' }},IP:{{ '{{' }} ip_address {{ '}}' }},IP:127.0.0.1" >> {{ '{{' }} certs_path {{ '}}' }}/extfile.cnf
+        echo "extendedKeyUsage = serverAuth" >> {{ '{{' }} certs_path {{ '}}' }}/extfile.cnf
+      args:
+        creates: {{ '{{' }} certs_path {{ '}}' }}/extfile.cnf
+
+    - name: Generate server certificate
+      ansible.builtin.command:
+        cmd: >
+          openssl x509 -req -days {{ '{{' }} cert_validity_days {{ '}}' }} -sha256
+            -in {{ '{{' }} certs_path {{ '}}' }}/server.csr
+            -CA {{ '{{' }} certs_path {{ '}}' }}/ca.pem
+            -CAkey {{ '{{' }} certs_path {{ '}}' }}/ca-key.pem
+            -CAcreateserial -out {{ '{{' }} certs_path {{ '}}' }}/server-cert.pem
+            -extfile {{ '{{' }} certs_path {{ '}}' }}/extfile.cnf
+        creates: {{ '{{' }} certs_path {{ '}}' }}/server-cert.pem
+
+    - name: Generate client private key
+      ansible.builtin.command:
+        cmd: >
+          openssl genrsa -out {{ '{{' }} certs_path {{ '}}' }}/key.pem 4096
+        creates: {{ '{{' }} certs_path {{ '}}' }}/key.pem
+
+    - name: Generate client certificate signing request
+      ansible.builtin.command:
+        cmd: >
+          openssl req -sha256 -new
+            -subj "/CN=client"
+            -key {{ '{{' }} certs_path {{ '}}' }}/key.pem
+            -out {{ '{{' }} certs_path {{ '}}' }}/client.csr
+        creates: {{ '{{' }} certs_path {{ '}}' }}/client.csr
+
+    - name: Generate client certificate extension file
+      ansible.builtin.shell: |
+        echo "extendedKeyUsage = clientAuth" >> {{ '{{' }} certs_path {{ '}}' }}/client-extfile.cnf
+      args:
+        creates: {{ '{{' }} certs_path {{ '}}' }}/client-extfile.cnf
+
+    - name: Generate client certificate
+      ansible.builtin.command:
+        cmd: >
+          openssl x509 -req -days {{ '{{' }} cert_validity_days {{ '}}' }}
+            -sha256 -in {{ '{{' }} certs_path {{ '}}' }}/client.csr
+            -CA {{ '{{' }} certs_path {{ '}}' }}/ca.pem
+            -CAkey {{ '{{' }} certs_path {{ '}}' }}/ca-key.pem
+            -CAcreateserial -out {{ '{{' }} certs_path {{ '}}' }}/cert.pem
+            -extfile {{ '{{' }} certs_path {{ '}}' }}/client-extfile.cnf
+        creates: {{ '{{' }} certs_path {{ '}}' }}/cert.pem
+
+    - name: Remove client certificate signing request
+      ansible.builtin.file:
+        path: {{ '{{' }} certs_path {{ '}}' }}/server.csr
+        state: absent
+
+    - name: Remove client certificate signing request
+      ansible.builtin.file:
+        path: {{ '{{' }} certs_path {{ '}}' }}/client.csr
+        state: absent
+
+    - name: Remove server certificate extension file
+      ansible.builtin.file:
+        path: {{ '{{' }} certs_path {{ '}}' }}/extfile.cnf
+        state: absent
+
+    - name: Remove client certificate extension file
+      ansible.builtin.file:
+        path: {{ '{{' }} certs_path {{ '}}' }}/client-extfile.cnf
+        state: absent
+
+    - name: Set permissions for docker certs
+      ansible.builtin.file:
+        path: {{ '{{' }} certs_path {{ '}}' }}
+        mode: '0700'
+        recurse: true
+        follow: true

+ 39 - 0
library/ansible/docker-certs/template.yaml

@@ -0,0 +1,39 @@
+---
+kind: ansible
+schema: "1.0"
+metadata:
+  name: Generate Docker TLS Certificates
+  description: >
+    Ansible playbook to generate TLS certificates for Docker daemon.
+    Creates CA, server, and client certificates for secure Docker remote access.
+
+
+    Project: https://www.docker.com
+
+    Documentation: https://docs.docker.com/engine/security/protect-access/
+  version: 1.0.0
+  author: Christian Lempa
+  date: '2025-11-11'
+spec:
+  general:
+    vars:
+      playbook_name:
+        default: Docker Certs
+      become:
+        default: true
+  certificates:
+    title: Certificate Configuration
+    required: true
+    vars:
+      certs_path:
+        type: str
+        description: Path where certificates will be stored
+        default: /root/docker-certs
+      cert_validity_days:
+        type: int
+        description: Certificate validity period in days
+        default: 3650
+      cn_domain:
+        type: hostname
+        description: Common Name (CN) for the CA certificate
+        default: your-domain.tld

+ 12 - 3
library/ansible/docker/inst-docker-ubuntu.yaml → library/ansible/docker-install-ubuntu/playbook.yaml.j2

@@ -1,7 +1,16 @@
 ---
-- name: Install docker
-  hosts: "{{ my_hosts | d([]) }}"
+- name: {{ playbook_name }}
+  hosts: {{ target_hosts }}
+{% if become %}
   become: true
+{% endif %}
+{% if options_enabled and not gather_facts %}
+  gather_facts: false
+{% endif %}
+{% if secrets_enabled %}
+  vars_files:
+    - {{ secrets_file }}
+{% endif %}
 
   tasks:
     - name: Install docker dependencies
@@ -23,7 +32,7 @@
     - name: Add docker repository
       ansible.builtin.apt_repository:
         filename: docker
-        repo: deb [arch=amd64 signed-by=/etc/apt/keyrings/docker.gpg] https://download.docker.com/linux/ubuntu {{ ansible_lsb.codename | lower }} stable
+        repo: deb [arch=amd64 signed-by=/etc/apt/keyrings/docker.gpg] https://download.docker.com/linux/ubuntu {{ '{{' }} ansible_lsb.codename | lower {{ '}}' }} stable
         state: present
 
     - name: Install docker engine

+ 23 - 0
library/ansible/docker-install-ubuntu/template.yaml

@@ -0,0 +1,23 @@
+---
+kind: ansible
+schema: "1.0"
+metadata:
+  name: Install Docker on Ubuntu
+  description: >
+    Ansible playbook to install Docker Engine on Ubuntu systems.
+    Includes Docker CE, Buildx plugin, and Compose plugin.
+
+
+    Project: https://www.docker.com
+
+    Documentation: https://docs.docker.com/engine/install/ubuntu/
+  version: 27.5.1
+  author: Christian Lempa
+  date: '2025-11-11'
+spec:
+  general:
+    vars:
+      playbook_name:
+        default: Install docker
+      become:
+        default: true

+ 12 - 2
library/ansible/docker/maint-docker-clean.yaml → library/ansible/docker-prune/playbook.yaml.j2

@@ -1,6 +1,16 @@
 ---
-- name: Clean docker
-  hosts: "{{ my_hosts | d([]) }}"
+- name: {{ playbook_name }}
+  hosts: {{ target_hosts }}
+{% if become %}
+  become: true
+{% endif %}
+{% if options_enabled and not gather_facts %}
+  gather_facts: false
+{% endif %}
+{% if secrets_enabled %}
+  vars_files:
+    - {{ secrets_file }}
+{% endif %}
 
   tasks:
     - name: Prune non-dangling images

+ 23 - 0
library/ansible/docker-prune/template.yaml

@@ -0,0 +1,23 @@
+---
+kind: ansible
+schema: "1.0"
+metadata:
+  name: Docker Prune
+  description: >
+    Ansible playbook to clean up Docker resources.
+    Prunes non-dangling images to free up disk space.
+
+
+    Project: https://www.docker.com
+
+    Documentation: https://docs.docker.com/engine/reference/commandline/system_prune/
+  version: 1.0.0
+  author: Christian Lempa
+  date: '2025-11-11'
+spec:
+  general:
+    vars:
+      playbook_name:
+        default: Clean docker
+      become:
+        default: false

+ 0 - 158
library/ansible/docker/docker-certs.yaml

@@ -1,158 +0,0 @@
----
-- name: "Docker Certs"
-  hosts: "{{ my_hosts | d([]) }}"
-  become: true
-  vars:
-    certs_path: "/root/docker-certs"
-    cert_validity_days: 3650
-    cn_domain: "your-domain.tld"
-
-  tasks:
-    - name: Check if docker certs are existing
-      ansible.builtin.stat:
-        path: "{{ certs_path }}"
-      register: certs_dir
-
-    - name: Create docker certs directory (if needed)
-      ansible.builtin.file:
-        path: "{{ certs_path }}"
-        state: directory
-        mode: '0700'
-      when: not certs_dir.stat.exists
-
-    - name: Check if docker certs directory is empty
-      ansible.builtin.command: ls -A "{{ certs_path }}"
-      register: certs_list
-      when: certs_dir.stat.exists
-      changed_when: false
-      ignore_errors: true
-
-    - name: Fail if docker certs already exist
-      ansible.builtin.fail:
-        msg: "Docker certificates already exist in /root/docker-certs."
-      when: certs_list.stdout | default('') != ''
-
-    - name: Get machine's primary internal ip address from eth0 interface
-      ansible.builtin.setup:
-      register: ip_address
-
-    - name: Set machine's primary internal ip address
-      ansible.builtin.set_fact:
-        ip_address: "{{ ip_address.ansible_facts.ansible_default_ipv4.address }}"
-
-    - name: Check if ip_address is a valid ip address
-      ansible.builtin.assert:
-        that:
-          - ip_address is match("^(?:[0-9]{1,3}\.){3}[0-9]{1,3}$")
-        fail_msg: "ip_address is not a valid ip address."
-        success_msg: "ip_address is a valid ip address."
-
-    - name: Generate CA private key
-      ansible.builtin.command:
-        cmd: >
-          openssl genrsa -out "{{ certs_path }}/ca-key.pem" 4096
-      args:
-        creates: "{{ certs_path }}/ca-key.pem"
-
-    - name: Generate CA certificate
-      ansible.builtin.command:
-        cmd: >
-          openssl req -sha256 -new -x509
-            -subj "/CN={{ cn_domain }}"
-            -days "{{ cert_validity_days }}"
-            -key "{{ certs_path }}/ca-key.pem"
-            -out "{{ certs_path }}/ca.pem"
-      args:
-        creates: "{{ certs_path }}/ca.pem"
-
-    - name: Generate server private key
-      ansible.builtin.command:
-        cmd: >
-          openssl genrsa -out "{{ certs_path }}/server-key.pem" 4096
-        creates: "{{ certs_path }}/server-key.pem"
-
-    - name: Generate server certificate signing request
-      ansible.builtin.command:
-        cmd: >
-          openssl req -sha256 -new
-            -subj "/CN={{ inventory_hostname }}"
-            -key "{{ certs_path }}/server-key.pem"
-            -out "{{ certs_path }}/server.csr"
-        creates: "{{ certs_path }}/server.csr"
-
-    - name: Generate server certificate extension file
-      ansible.builtin.shell: |
-        echo "subjectAltName = DNS:{{ inventory_hostname }},IP:{{ ip_address }},IP:127.0.0.1" >> "{{ certs_path }}/extfile.cnf"
-        echo "extendedKeyUsage = serverAuth" >> "{{ certs_path }}/extfile.cnf"
-      args:
-        creates: "{{ certs_path }}/extfile.cnf"
-
-    - name: Generate server certificate
-      ansible.builtin.command:
-        cmd: >
-          openssl x509 -req -days "{{ cert_validity_days }}" -sha256
-            -in "{{ certs_path }}/server.csr"
-            -CA "{{ certs_path }}/ca.pem"
-            -CAkey "{{ certs_path }}/ca-key.pem"
-            -CAcreateserial -out "{{ certs_path }}/server-cert.pem"
-            -extfile "{{ certs_path }}/extfile.cnf"
-        creates: "{{ certs_path }}/server-cert.pem"
-
-    - name: Generate client private key
-      ansible.builtin.command:
-        cmd: >
-          openssl genrsa -out "{{ certs_path }}/key.pem" 4096
-        creates: "{{ certs_path }}/key.pem"
-
-    - name: Generate client certificate signing request
-      ansible.builtin.command:
-        cmd: >
-          openssl req -sha256 -new
-            -subj "/CN=client"
-            -key "{{ certs_path }}/key.pem"
-            -out "{{ certs_path }}/client.csr"
-        creates: "{{ certs_path }}/client.csr"
-
-    - name: Generate client certificate extension file
-      ansible.builtin.shell: |
-        echo "extendedKeyUsage = clientAuth" >> "{{ certs_path }}/client-extfile.cnf"
-      args:
-        creates: "{{ certs_path }}/client-extfile.cnf"
-
-    - name: Generate client certificate
-      ansible.builtin.command:
-        cmd: >
-          openssl x509 -req -days "{{ cert_validity_days }}"
-            -sha256 -in "{{ certs_path }}/client.csr"
-            -CA "{{ certs_path }}/ca.pem"
-            -CAkey "{{ certs_path }}/ca-key.pem"
-            -CAcreateserial -out "{{ certs_path }}/cert.pem"
-            -extfile "{{ certs_path }}/client-extfile.cnf"
-        creates: "{{ certs_path }}/cert.pem"
-
-    - name: Remove client certificate signing request
-      ansible.builtin.file:
-        path: "{{ certs_path }}/server.csr"
-        state: absent
-
-    - name: Remove client certificate signing request
-      ansible.builtin.file:
-        path: "{{ certs_path }}/client.csr"
-        state: absent
-
-    - name: Remove server certificate extension file
-      ansible.builtin.file:
-        path: "{{ certs_path }}/extfile.cnf"
-        state: absent
-
-    - name: Remove client certificate extension file
-      ansible.builtin.file:
-        path: "{{ certs_path }}/client-extfile.cnf"
-        state: absent
-
-    - name: Set permissions for docker certs
-      ansible.builtin.file:
-        path: "{{ certs_path }}"
-        mode: '0700'
-        recurse: true
-        follow: true

+ 0 - 46
library/ansible/kubernetes/README.md

@@ -1,46 +0,0 @@
-# Kubernetes (K8s) Installation Script
-
-- Introduction
-- Prerequisites
-- Execution Instructions
-
-## Introduction
-
-> The objective of this playbook is to automate the installation and setup of a kubernetes instance. The playbook consist of 3 main plays. For both controller and nodes, for controller only and for nodes only. It will ask user confirmation before moving on to each stage. By the end of the playbook two files will be created on the controller node named **worker_conn_string** and locally inside the playbook directory with the name **Remote_Files/worker_conn_string**. This will have the **connection string**. (Note:- If you want to join controllers or nodes manually later. For controllers use **--control-plane** flag)
-
-### References
-
-**Documentation** - [https://kubernetes.io/docs/setup/](https://kubernetes.io/docs/setup/)
-
-## Prerequisites
-
-- Atleast 2 VMs  (1 For Control Node and 1 For Worker Node).
-- Static IPs should be set along with unique host names.
-- Inventory should be in this format
-
-```ini
-    [controllers]
-    host_name ansible_ssh_host=<IP> ansible_user='<USERNAME>' ansible_become_pass='<PASSWORD>'
-
-    [nodes]
-
-    [instance:children]
-    controllers
-    nodes
-```
-
-(If you want to change this, don't forget to change the `inst-k8s` as well)
-
-## Execution Instructions
-
-```bash
-ansible-playbook -i <INVENTORY> <PLAYBOOK>
-```
-
-### Optional Flags
-
-| Flag  | Use Case |
-|-------|-----------|
-| --ask-vault-pass | If the vault is encrypted |
-| --start-at-task | If you want to start from a specific task|
-| --tags | If you want to only run a specific group of tasks|

+ 0 - 2
library/ansible/kubernetes/ansible.cfg

@@ -1,2 +0,0 @@
-[defaults]
-timeout = 25

+ 0 - 318
library/ansible/kubernetes/inst-k8s.yaml

@@ -1,318 +0,0 @@
----
-- name: Setup Prerequisites To Install Kubernetes
-  hosts: instance
-  become: true
-  vars:
-    kube_prereq_packages: [curl, ca-certificates, apt-transport-https]
-    kube_packages: [kubeadm, kubectl, kubelet]
-
-  tasks:
-    - name: Test Reacheability
-      ansible.builtin.ping:
-
-    - name: Update Cache
-      ansible.builtin.apt:
-        update_cache: true
-        autoclean: true
-
-    - name: 1. Upgrade All the Packages to the latest
-      ansible.builtin.apt:
-        upgrade: "full"
-
-    - name: 2. Install Qemu-Guest-Agent
-      ansible.builtin.apt:
-        name:
-          - qemu-guest-agent
-        state: present
-
-    - name: 3. Setup a Container Runtime
-      ansible.builtin.apt:
-        name:
-          - containerd
-        state: present
-
-    - name: 4. Start Containerd If Stopped
-      ansible.builtin.service:
-        name: containerd
-        state: started
-
-    - name: 5. Create Containerd Directory
-      ansible.builtin.file:
-        path: /etc/containerd
-        state: directory
-        mode: '0755'
-
-    - name: 6. Check config.toml Exists
-      ansible.builtin.stat:
-        path: /etc/containerd/config.toml
-      register: pre_file_exist_result
-
-    - name: 6.1 Delete config.toml Exists
-      ansible.builtin.file:
-        path: /etc/containerd/config.toml
-        state: absent
-      when: pre_file_exist_result.stat.exists
-
-    - name: 7. Place Default Containerd Config Inside It
-      ansible.builtin.shell: |
-        set -o pipefail
-        containerd config default | sudo tee /etc/containerd/config.toml
-      register: output
-      changed_when: output.rc != 0
-      args:
-        executable: /bin/bash
-      tags:
-        - containerd_config
-
-    - name: 7.1 Check If New config.toml Exists Now
-      ansible.builtin.stat:
-        path: /etc/containerd/config.toml
-      register: post_file_exist_result
-      tags:
-        - containerd_config
-
-    - name: 7.2 Exit The Play If config.toml Does Not Exist
-      ansible.builtin.meta: end_play
-      when: not post_file_exist_result.stat.exists
-      tags:
-        - containerd_config
-
-    - name: 8.1 Disable Swap
-      ansible.builtin.command: sudo swapoff -a
-      register: output
-      changed_when: output.rc != 0
-      tags:
-        - disable_swap
-
-    - name: 8.2 Disable Swap permanently
-      ansible.builtin.replace:
-        path: /etc/fstab
-        regexp: '^([^#].*?\sswap\s+sw\s+.*)$'
-        replace: '# \1'
-      tags:
-        - disable_swap
-
-    - name: 9. Edit config.toml
-      ansible.builtin.replace:
-        path: /etc/containerd/config.toml
-        after: \[plugins\."io\.containerd\.grpc\.v1\.cri"\.containerd\.runtimes\.runc\.options\]
-        regexp: SystemdCgroup = false
-        replace: SystemdCgroup = true
-
-    - name: 10. Enable Ipv4 Bridging
-      ansible.builtin.replace:
-        path: /etc/sysctl.conf
-        regexp: ^#net\.ipv4\.ip_forward=1$
-        replace: net.ipv4.ip_forward=1
-
-    - name: 11.1 Delete k8s Config If Exists
-      ansible.builtin.file:
-        path: /etc/modules-load.d/k8s.conf
-        state: absent
-      tags:
-        - kube_config
-
-    - name: 11.2 Add k8s.config and Edit It
-      ansible.builtin.lineinfile:
-        path: /etc/modules-load.d/k8s.conf
-        line: br_netfilter
-        create: true
-        mode: '0755'
-      tags:
-        - kube_config
-
-    - name: 12.1 Reboot
-      ansible.builtin.reboot:
-      register: system_reboot
-
-    - name: 12.2 Verify Reboot Success
-      ansible.builtin.ping:
-      when: system_reboot.rebooted
-
-    - name: 13.1 Update Cache
-      ansible.builtin.apt:
-        update_cache: true
-        autoclean: true
-      tags:
-        - install_pre_kube_packages
-
-    - name: 13.2 Remove apt lock file
-      ansible.builtin.file:
-        state: absent
-        path: "/var/lib/dpkg/lock"
-      tags:
-        - install_pre_kube_packages
-
-    - name: 13.3 Install Prerequisite Packages
-      ansible.builtin.apt:
-        name: '{{ kube_prereq_packages }}'
-      tags:
-        - install_pre_kube_packages
-
-    - name: 13.4 Remove GPG Keys If They Exist
-      ansible.builtin.file:
-        path: "{{ item }}"
-        state: absent
-      with_items:
-        - /usr/share/keyrings/kubernetes-apt-keyring.gpg
-        - /usr/share/keyrings/kubernetes-apt-keyring.gpg_armored
-      tags:
-        - install_pre_kube_packages
-
-    - name: 13.5 Download Kubernetes APT Key
-      ansible.builtin.get_url:
-        url: https://pkgs.k8s.io/core:/stable:/v1.28/deb/Release.key
-        dest: /usr/share/keyrings/kubernetes-apt-keyring.gpg_armored
-        mode: '0755'
-      tags:
-        - install_pre_kube_packages
-
-    - name: 13.6 De-Armor Kubernetes APT Key
-      ansible.builtin.shell: gpg --dearmor < /usr/share/keyrings/kubernetes-apt-keyring.gpg_armored > /etc/apt/keyrings/kubernetes-apt-keyring.gpg
-      no_log: true
-      args:
-        creates: /etc/apt/keyrings/kubernetes-apt-keyring.gpg
-      tags:
-        - install_pre_kube_packages
-
-    - name: 13.7 Add Kubernetes APT Key
-      ansible.builtin.shell: |
-        set -o pipefail
-        echo 'deb [signed-by=/etc/apt/keyrings/kubernetes-apt-keyring.gpg] https://pkgs.k8s.io/core:/stable:/v1.28/deb/ /' \
-        | sudo tee /etc/apt/sources.list.d/kubernetes.list
-      register: apt_output
-      changed_when: apt_output.rc != 0
-      args:
-        executable: /bin/bash
-      tags:
-        - install_pre_kube_packages
-
-    - name: 14.1 Update Cache
-      ansible.builtin.apt:
-        update_cache: true
-        autoclean: true
-      tags:
-        - install_kube_packages
-
-    - name: 14.2 Remove apt lock file
-      ansible.builtin.file:
-        state: absent
-        path: "/var/lib/dpkg/lock"
-      tags:
-        - install_kube_packages
-
-    - name: 14.3 Install Required Packages
-      ansible.builtin.apt:
-        name: '{{ kube_packages }}'
-      tags:
-        - install_kube_packages
-
-    - name: 14.4 Hold Packages
-      ansible.builtin.dpkg_selections:
-        name: '{{ item }}'
-        selection: hold
-      with_items: '{{ kube_packages }}'
-      tags:
-        - install_kube_packages
-
-    - name: Prompt To Continue On To Configuring Control Nodes
-      ansible.builtin.pause:
-        prompt: Press RETURN when you want to continue configuring the Control nodes!
-
-- name: Setup Controller Nodes
-  gather_facts: true
-  hosts: controllers
-  become: true
-
-  tasks:
-    - name: 1. Initialize Cluster
-      ansible.builtin.shell: |
-        set -o pipefail
-        sudo kubeadm init --control-plane-endpoint={{ hostvars[inventory_hostname]['ansible_default_ipv4']['address'] }} --pod-network-cidr=10.244.0.0/16
-      register: init_cluster_output
-      changed_when: init_cluster_output.rc != 0
-      args:
-        executable: /bin/bash
-
-    - name: 2.1 Create .kube Directory
-      ansible.builtin.file:
-        path: .kube
-        state: directory
-        mode: '0755'
-      tags:
-        - kube_admin_config
-
-    - name: 2.2 Copy Kubernetes Admin Config
-      ansible.builtin.copy:
-        remote_src: true
-        src: /etc/kubernetes/admin.conf
-        dest: .kube/config
-        mode: '0755'
-      tags:
-        - kube_admin_config
-
-    - name: 2.3 Change Config File Permission
-      ansible.builtin.command: chown {{ ansible_env.USER }}:{{ ansible_env.USER }} ".kube/config"
-      changed_when: false
-      when: not ansible_env.HOME is undefined
-      tags:
-        - kube_admin_config
-
-    - name: 3. Install An Overlay Network
-      ansible.builtin.shell: |
-        set -o pipefail
-        kubectl apply -f https://raw.githubusercontent.com/flannel-io/flannel/master/Documentation/kube-flannel.yml
-      register: init_cluster_output
-      become: false
-      changed_when: init_cluster_output.rc != 0
-      args:
-        executable: /bin/bash
-
-    - name: 4.1 Execute Join String Generation Command
-      ansible.builtin.command: kubeadm token create --print-join-command
-      become: false
-      register: join_output
-      changed_when: false
-      tags:
-        - join_string
-
-    - name: 4.2 Display Join String
-      ansible.builtin.debug:
-        msg: 'Join Command : {{ join_output.stdout }}'
-      tags:
-        - join_string
-
-    - name: Copy Connection String To A Remote File
-      ansible.builtin.template:
-        src: k8s_worker_node_connection.j2
-        dest: worker_conn_string
-        mode: '0755'
-
-    - name: Check Connection String File Exists
-      ansible.builtin.stat:
-        path: worker_conn_string
-      register: conn_file_path_remote
-
-    - name: Fetch The Remote File
-      ansible.builtin.fetch:
-        src: worker_conn_string
-        dest: Remote_Files/worker_conn_string
-        flat: true
-      when: conn_file_path_remote.stat.exists
-
-    - name: Prompt To Continue On To Configuring Worker Nodes
-      ansible.builtin.pause:
-        prompt: Press RETURN when you want to continue configuring the Worker nodes!
-
-- name: Join Worker Nodes
-  gather_facts: true
-  hosts: nodes
-  become: true
-  vars:
-    node_conn_string: "{{ lookup('ansible.builtin.file', 'Remote_Files/worker_conn_string') }}"
-
-  tasks:
-    - name: 1. Add Worker Nodes To The Controller
-      ansible.builtin.command: '{{ node_conn_string }}'
-      changed_when: false
-      throttle: 1

+ 0 - 1
library/ansible/kubernetes/k8s_worker_node_connection.j2

@@ -1 +0,0 @@
-{{ join_output.stdout }}

+ 0 - 21
library/ansible/portainer/deploy-portainer.yaml

@@ -1,21 +0,0 @@
----
-- name: Deploy portainer-ce latest
-  hosts: "{{ my_hosts | d([]) }}"
-  become: true
-  become_user: "{{ lookup('env','USER') }}"
-
-  tasks:
-    - name: Create new volume
-      community.docker.docker_volume:
-        name: portainer-data
-
-    - name: Deploy portainer
-      community.docker.docker_container:
-        name: portainer
-        image: "docker.io/portainer/portainer-ce"
-        ports:
-          - "9443:9443"
-        volumes:
-          - /run/docker.sock:/var/run/docker.sock
-          - portainer-data:/data
-        restart_policy: unless-stopped

+ 0 - 18
library/ansible/traefik/deploy-traefik.yaml

@@ -1,18 +0,0 @@
----
-- name: Deploy traefik v2.5
-  hosts: "{{ my_hosts | d([]) }}"
-
-  tasks:
-    - name: Deploy traefik
-      community.docker.docker_container:
-        name: traefik
-        image: "traefik:v2.5"
-        ports:
-          - "80:80"
-          - "443:443"
-        volumes:
-          - /run/docker.sock:/run/docker.sock
-          - /etc/traefik:/etc/traefik
-        restart_policy: unless-stopped
-      become: true
-      become_user: "{{ lookup('env', 'USER') }}"

+ 28 - 0
library/ansible/ubuntu-add-sshkey/playbook.yaml.j2

@@ -0,0 +1,28 @@
+---
+- name: {{ playbook_name }}
+  hosts: {{ target_hosts }}
+{% if become %}
+  become: true
+{% endif %}
+{% if options_enabled and not gather_facts %}
+  gather_facts: false
+{% endif %}
+{% if secrets_enabled %}
+  vars_files:
+    - {{ secrets_file }}
+{% endif %}
+
+  tasks:
+    - name: Install public keys
+      ansible.posix.authorized_key:
+        user: {{ '{{' }} lookup('env', 'USER') {{ '}}' }}
+        state: present
+        key: {{ '{{' }} lookup('file', '~/.ssh/id_rsa.pub') {{ '}}' }}
+
+    - name: Change sudoers file
+      ansible.builtin.lineinfile:
+        path: /etc/sudoers
+        state: present
+        regexp: '^%sudo'
+        line: '%sudo ALL=(ALL) NOPASSWD: ALL'
+        validate: /usr/sbin/visudo -cf %s

+ 23 - 0
library/ansible/ubuntu-add-sshkey/template.yaml

@@ -0,0 +1,23 @@
+---
+kind: ansible
+schema: "1.0"
+metadata:
+  name: Add SSH Key and Configure Sudoers
+  description: >
+    Ansible playbook to add SSH public key to authorized_keys.
+    Also configures passwordless sudo for sudo group.
+
+
+    Project: https://www.openssh.com
+
+    Documentation: https://www.openssh.com/manual.html
+  version: 1.0.0
+  author: Christian Lempa
+  date: '2025-11-11'
+spec:
+  general:
+    vars:
+      playbook_name:
+        default: Add ssh key
+      become:
+        default: true

+ 12 - 2
library/ansible/ubuntu/upd-apt.yaml → library/ansible/ubuntu-apt-update/playbook.yaml.j2

@@ -1,6 +1,16 @@
 ---
-- name: Update and upgrade apt packages
-  hosts: all
+- name: {{ playbook_name }}
+  hosts: {{ target_hosts }}
+{% if become %}
+  become: true
+{% endif %}
+{% if options_enabled and not gather_facts %}
+  gather_facts: false
+{% endif %}
+{% if secrets_enabled %}
+  vars_files:
+    - {{ secrets_file }}
+{% endif %}
 
   tasks:
     - name: Update packages with apt

+ 25 - 0
library/ansible/ubuntu-apt-update/template.yaml

@@ -0,0 +1,25 @@
+---
+kind: ansible
+schema: "1.0"
+metadata:
+  name: Update and Upgrade Ubuntu Packages
+  description: >
+    Ansible playbook to update and upgrade APT packages on Ubuntu systems.
+    Performs apt update and dist-upgrade.
+
+
+    Project: https://ubuntu.com
+
+    Documentation: https://ubuntu.com/server/docs
+  version: 1.0.0
+  author: Christian Lempa
+  date: '2025-11-11'
+spec:
+  general:
+    vars:
+      playbook_name:
+        default: Update and upgrade apt packages
+      target_hosts:
+        default: all
+      become:
+        default: false

+ 11 - 2
library/ansible/ubuntu/inst-vm-core.yaml → library/ansible/ubuntu-vm-core/playbook.yaml.j2

@@ -1,7 +1,16 @@
 ---
-- name: Install core packages for virtual machines
-  hosts: "{{ my_hosts | d([]) }}"
+- name: {{ playbook_name }}
+  hosts: {{ target_hosts }}
+{% if become %}
   become: true
+{% endif %}
+{% if options_enabled and not gather_facts %}
+  gather_facts: false
+{% endif %}
+{% if secrets_enabled %}
+  vars_files:
+    - {{ secrets_file }}
+{% endif %}
 
   tasks:
     - name: Install packages

+ 23 - 0
library/ansible/ubuntu-vm-core/template.yaml

@@ -0,0 +1,23 @@
+---
+kind: ansible
+schema: "1.0"
+metadata:
+  name: Install Ubuntu VM Core Packages
+  description: >
+    Ansible playbook to install essential packages for Ubuntu virtual machines.
+    Includes Prometheus node exporter, NFS client, and QEMU guest agent.
+
+
+    Project: https://ubuntu.com
+
+    Documentation: https://ubuntu.com/server/docs
+  version: 1.0.0
+  author: Christian Lempa
+  date: '2025-11-11'
+spec:
+  general:
+    vars:
+      playbook_name:
+        default: Install core packages for virtual machines
+      become:
+        default: true

+ 0 - 19
library/ansible/ubuntu/config-add-sshkey.yaml

@@ -1,19 +0,0 @@
----
-- name: Add ssh key
-  hosts: "{{ my_hosts | d([]) }}"
-  become: true
-
-  tasks:
-    - name: Install public keys
-      ansible.posix.authorized_key:
-        user: "{{ lookup('env', 'USER') }}"
-        state: present
-        key: "{{ lookup('file', '~/.ssh/id_rsa.pub') }}"
-
-    - name: Change sudoers file
-      ansible.builtin.lineinfile:
-        path: /etc/sudoers
-        state: present
-        regexp: '^%sudo'
-        line: '%sudo ALL=(ALL) NOPASSWD: ALL'
-        validate: /usr/sbin/visudo -cf %s

+ 0 - 11
library/ansible/ubuntu/inst-qemu-agent.yaml

@@ -1,11 +0,0 @@
----
-- name: Install qemu-guest-agent package
-  hosts: all
-  become: true
-  become_method: ansible.builtin.sudo
-
-  tasks:
-    - name: Install qemu-guest-agent
-      ansible.builtin.apt:
-        name: qemu-guest-agent
-        state: present

+ 0 - 12
library/ansible/ubuntu/inst-zsh.yaml

@@ -1,12 +0,0 @@
----
-- name: Install zsh
-  hosts: "{{ my_hosts | d([]) }}"
-  become: true
-
-  tasks:
-    - name: Install zsh
-      ansible.builtin.apt:
-        name: zsh
-        state: present
-        update_cache: true
-      become: true

+ 0 - 25
library/ansible/ubuntu/maint-diskspace.yaml

@@ -1,25 +0,0 @@
----
-- name: Check disk space
-  hosts: "{{ my_hosts | d([]) }}"
-
-  tasks:
-    - name: Check disk space available
-      ansible.builtin.shell:
-        cmd: |
-          set -euo pipefail
-          df -Ph / | awk 'NR==2 {print $5}'
-        executable: /bin/bash
-      changed_when: false
-      check_mode: false
-      register: disk_usage
-
-#   - name: Send discord message when disk space is over 80%
-#     uri:
-#       url: "your-webhook"
-#       method: POST
-#       body_format: json
-#       body: '{"content": "Disk space on {{ inventory_hostname }} is above 80%!"}'
-#       headers:
-#         Content-Type: application/json
-#       status_code: 204
-#     when: disk_usage.stdout[:-1]|int > 80

+ 0 - 16
library/ansible/ubuntu/maint-reboot-required.yaml

@@ -1,16 +0,0 @@
----
-- name: Check if system reboot is required
-  hosts: "{{ my_hosts | d([]) }}"
-  become: true
-
-  tasks:
-    - name: Check if system reboot is required
-      become: true
-      ansible.builtin.stat:
-        path: /run/reboot-required
-      register: reboot_required
-
-    - name: Report if reboot is required
-      ansible.builtin.debug:
-        msg: "Reboot is required"
-      when: reboot_required.stat.exists

+ 0 - 9
library/ansible/ubuntu/maint-reboot.yaml

@@ -1,9 +0,0 @@
----
-- name: Reboot machine
-  hosts: "{{ my_hosts | d([]) }}"
-  become: true
-
-  tasks:
-    - name: Reboot machine
-      ansible.builtin.reboot:
-        reboot_timeout: 3600

+ 0 - 16
library/ansible/wireguard/inst-wireguard.yaml

@@ -1,16 +0,0 @@
----
-- name: Install wireguard
-  hosts: "{{ my_hosts | d([]) }}"
-  become: true
-
-  tasks:
-    - name: Install wireguard
-      ansible.builtin.apt:
-        name: wireguard
-        update_cache: true
-
-    - name: Generate private and public keypair
-      ansible.builtin.shell: |
-        wg genkey | tee privatekey | wg pubkey > publickey
-        chmod 0400 privatekey
-        chmod 0400 publickey

+ 12 - 0
library/compose/grafana/.env.j2

@@ -0,0 +1,12 @@
+{% if not swarm_enabled %}
+{% if database_type == 'postgres' %}
+# Database Configuration
+GRAFANA_DB_PASSWORD={{ database_password }}
+{% endif %}
+
+{% if authentik_enabled %}
+# OAuth Configuration
+GRAFANA_OAUTH_CLIENT_ID={{ authentik_client_id }}
+GRAFANA_OAUTH_CLIENT_SECRET={{ authentik_client_secret }}
+{% endif %}
+{% endif %}

+ 1 - 0
library/compose/grafana/.env.secret.grafana_db_password.j2

@@ -0,0 +1 @@
+{% if swarm_enabled and database_type == 'postgres' %}{{ database_password }}{% endif %}

+ 1 - 0
library/compose/grafana/.env.secret.grafana_oauth_client_id.j2

@@ -0,0 +1 @@
+{% if swarm_enabled and authentik_enabled %}{{ authentik_client_id }}{% endif %}

+ 1 - 0
library/compose/grafana/.env.secret.grafana_oauth_client_secret.j2

@@ -0,0 +1 @@
+{% if swarm_enabled and authentik_enabled %}{{ authentik_client_secret }}{% endif %}

+ 81 - 2
library/compose/grafana/compose.yaml.j2

@@ -7,6 +7,42 @@ services:
     {% endif %}
     environment:
       - TZ={{ container_timezone }}
+      - UID={{ user_uid }}
+      - GID={{ user_gid }}
+      {% if database_type == 'postgres' %}
+      - GF_DATABASE_TYPE=postgres
+      - GF_DATABASE_HOST={{ database_host }}
+      - GF_DATABASE_NAME={{ database_name }}
+      - GF_DATABASE_USER={{ database_user }}
+      {% if swarm_enabled %}
+      - GF_DATABASE_PASSWORD__FILE=/run/secrets/{{ service_name }}_db_password
+      {% else %}
+      - GF_DATABASE_PASSWORD=${GRAFANA_DB_PASSWORD}
+      {% endif %}
+      - GF_DATABASE_SSL_MODE=disable
+      {% endif %}
+      {% if authentik_enabled %}
+      - GF_AUTH_GENERIC_OAUTH_ENABLED=true
+      - GF_AUTH_GENERIC_OAUTH_NAME={{ authentik_slug }}
+      {% if swarm_enabled %}
+      - GF_AUTH_GENERIC_OAUTH_CLIENT_ID__FILE=/run/secrets/{{ service_name }}_oauth_client_id
+      - GF_AUTH_GENERIC_OAUTH_CLIENT_SECRET__FILE=/run/secrets/{{ service_name }}_oauth_client_secret
+      {% else %}
+      - GF_AUTH_GENERIC_OAUTH_CLIENT_ID=${GRAFANA_OAUTH_CLIENT_ID}
+      - GF_AUTH_GENERIC_OAUTH_CLIENT_SECRET=${GRAFANA_OAUTH_CLIENT_SECRET}
+      {% endif %}
+      - GF_AUTH_GENERIC_OAUTH_SCOPES=openid profile email
+      - GF_AUTH_GENERIC_OAUTH_AUTH_URL={{ authentik_url }}/application/o/authorize/
+      - GF_AUTH_GENERIC_OAUTH_TOKEN_URL={{ authentik_url }}/application/o/token/
+      - GF_AUTH_GENERIC_OAUTH_API_URL={{ authentik_url }}/application/o/userinfo/
+      - GF_AUTH_SIGNOUT_REDIRECT_URL={{ authentik_url }}/application/o/{{ authentik_slug }}/end-session/
+      - GF_AUTH_OAUTH_AUTO_LOGIN=true
+      {% if traefik_enabled %}
+      - GF_SERVER_ROOT_URL=https://{{ traefik_host }}
+      {% endif %}
+      - GF_AUTH_OAUTH_ALLOW_INSECURE_EMAIL_LOOKUP=true
+      - GF_AUTH_GENERIC_OAUTH_SKIP_ORG_ROLE_SYNC=true
+      {% endif %}
     {% if network_mode == 'host' %}
     network_mode: host
     {% else %}
@@ -33,7 +69,21 @@ services:
       {% endif %}
     {% endif %}
     volumes:
-      - grafana-data:/var/lib/grafana
+      {% if volume_mode == 'mount' %}
+      - {{ volume_mount_path }}/data:/var/lib/grafana:rw
+      {% else %}
+      - {{ service_name }}-data:/var/lib/grafana
+      {% endif %}
+    {% if swarm_enabled %}
+    secrets:
+      {% if database_type == 'postgres' %}
+      - {{ service_name }}_db_password
+      {% endif %}
+      {% if authentik_enabled %}
+      - {{ service_name }}_oauth_client_id
+      - {{ service_name }}_oauth_client_secret
+      {% endif %}
+    {% endif %}
     {% if traefik_enabled and not swarm_enabled %}
     labels:
       - traefik.enable=true
@@ -56,6 +106,11 @@ services:
       {% if swarm_placement_mode == 'replicated' %}
       replicas: {{ swarm_replicas }}
       {% endif %}
+      {% if swarm_placement_host %}
+      placement:
+        constraints:
+          - node.hostname == {{ swarm_placement_host }}
+      {% endif %}
       restart_policy:
         condition: on-failure
       {% if traefik_enabled %}
@@ -76,9 +131,19 @@ services:
       {% endif %}
     {% endif %}
 
+{% if volume_mode == 'local' %}
+volumes:
+  {{ service_name }}-data:
+    driver: local
+{% elif volume_mode == 'nfs' %}
 volumes:
-  grafana-data:
+  {{ service_name }}-data:
     driver: local
+    driver_opts:
+      type: nfs
+      o: addr={{ volume_nfs_server }},{{ volume_nfs_options }}
+      device: ":{{ volume_nfs_path }}"
+{% endif %}
 
 {% if network_mode != 'host' %}
 networks:
@@ -107,3 +172,17 @@ networks:
     external: true
   {% endif %}
 {% endif %}
+
+{% if swarm_enabled %}
+secrets:
+  {% if database_type == 'postgres' %}
+  {{ service_name }}_db_password:
+    file: ./.env.secret.grafana_db_password
+  {% endif %}
+  {% if authentik_enabled %}
+  {{ service_name }}_oauth_client_id:
+    file: ./.env.secret.grafana_oauth_client_id
+  {{ service_name }}_oauth_client_secret:
+    file: ./.env.secret.grafana_oauth_client_secret
+  {% endif %}
+{% endif %}

+ 26 - 0
library/compose/grafana/template.yaml

@@ -18,6 +18,8 @@ metadata:
   date: '2025-09-28'
   tags:
     - traefik
+    - swarm
+    - authentik
 spec:
   general:
     vars:
@@ -31,6 +33,30 @@ spec:
         description: "Host port for HTTP (3000)"
         type: int
         default: 3000
+  database:
+    vars:
+      database_type:
+        description: "Database backend type"
+        options: ["sqlite", "postgres"]
+        default: "sqlite"
+        extra: "sqlite (default) or postgres for external database"
+      database_host:
+        default: "postgres:5432"
+        needs: "database_type=postgres"
+      database_name:
+        default: "grafana"
+        needs: "database_type=postgres"
+      database_user:
+        default: "grafana"
+        needs: "database_type=postgres"
+      database_password:
+        needs: "database_type=postgres"
+  authentik:
+    vars:
+      authentik_url:
+        default: "https://auth.home.arpa"
+      authentik_slug:
+        default: "grafana"
   traefik:
     vars:
       traefik_host:

+ 149 - 28
library/compose/loki/compose.yaml.j2

@@ -1,40 +1,161 @@
 services:
   {{ service_name }}:
+    {% if not swarm_enabled %}
     container_name: {{ container_name }}
+    hostname: {{ container_hostname }}
+    {% endif %}
     image: docker.io/grafana/loki:3.5.8
     command: "-config.file=/etc/loki/config.yaml"
+    environment:
+      - TZ={{ container_timezone }}
+      - UID={{ user_uid }}
+      - GID={{ user_gid }}
+    {% if network_mode == 'bridge' and not traefik_enabled %}
     ports:
-      # --> (Optional) Remove when using traefik...
-      - "3100:3100"
-      # <--
+      {% if swarm_enabled %}
+      - target: 3100
+        published: {{ ports_http }}
+        protocol: tcp
+        mode: host
+      {% else %}
+      - "{{ ports_http }}:3100"
+      {% endif %}
+    {% endif %}
     volumes:
+      {% if volume_mode == 'mount' %}
+      - {{ volume_mount_path }}/data:/loki:rw
+      {% else %}
+      - {{ service_name }}-data:/loki:rw
+      {% endif %}
+      {% if not swarm_enabled %}
+      {% if volume_mode == 'mount' %}
+      - {{ volume_mount_path }}/config/config.yaml:/etc/loki/config.yaml:ro
+      {% else %}
       - ./config/config.yaml:/etc/loki/config.yaml:ro
-      - data_loki:/loki:rw
-    # --> (Optional) When using traefik...
-    # labels:
-    #   - traefik.enable=true
-    #   - traefik.docker.network=frontend
-    #   # -- Traefik Services
-    #   - traefik.http.services.loki.loadbalancer.server.port=3100
-    #   # -- Traefik Routers
-    #   - traefik.http.routers.loki.entrypoints=websecure
-    #   - traefik.http.routers.loki.rule=Host(`loki-fqdn`)
-    #   - traefik.http.routers.loki.tls=true
-    #   - traefik.http.routers.loki.tls.certresolver=cloudflare
-    #   - traefik.http.routers.loki.service=loki
-    #   # -- (Optional) Authentication
-    #   # - traefik.http.routers.loki.middlewares=authentik-middleware@file
-    # networks:
-    #   - frontend
-    # <--
-    restart: unless-stopped
+      {% endif %}
+      {% endif %}
+    {% if network_mode == 'host' %}
+    network_mode: host
+    {% else %}
+    networks:
+      {% if traefik_enabled %}
+      {{ traefik_network }}:
+      {% endif %}
+      {% if network_mode == 'macvlan' %}
+      {{ network_name }}:
+        ipv4_address: {{ network_macvlan_ipv4_address }}
+      {% elif network_mode == 'bridge' %}
+      {{ network_name }}:
+      {% endif %}
+    {% endif %}
+    {% if swarm_enabled %}
+    configs:
+      - source: {{ service_name }}_config
+        target: /etc/loki/config.yaml
+    deploy:
+      mode: {{ swarm_placement_mode }}
+      {% if swarm_placement_mode == 'replicated' %}
+      replicas: {{ swarm_replicas }}
+      {% endif %}
+      {% if swarm_placement_host %}
+      placement:
+        constraints:
+          - node.hostname == {{ swarm_placement_host }}
+      {% endif %}
+      restart_policy:
+        condition: on-failure
+      {% if traefik_enabled %}
+      labels:
+        - traefik.enable=true
+        - traefik.docker.network={{ traefik_network }}
+        - traefik.http.services.{{ service_name }}-web.loadBalancer.server.port=3100
+        - traefik.http.routers.{{ service_name }}-http.service={{ service_name }}-web
+        - traefik.http.routers.{{ service_name }}-http.rule=Host(`{{ traefik_host }}`)
+        - traefik.http.routers.{{ service_name }}-http.entrypoints={{ traefik_entrypoint }}
+        {% if authentik_enabled %}
+        - traefik.http.routers.{{ service_name }}-http.middlewares={{ authentik_traefik_middleware }}
+        {% endif %}
+        {% if traefik_tls_enabled %}
+        - traefik.http.routers.{{ service_name }}-https.service={{ service_name }}-web
+        - traefik.http.routers.{{ service_name }}-https.rule=Host(`{{ traefik_host }}`)
+        - traefik.http.routers.{{ service_name }}-https.entrypoints={{ traefik_tls_entrypoint }}
+        - traefik.http.routers.{{ service_name }}-https.tls=true
+        - traefik.http.routers.{{ service_name }}-https.tls.certresolver={{ traefik_tls_certresolver }}
+        {% if authentik_enabled %}
+        - traefik.http.routers.{{ service_name }}-https.middlewares={{ authentik_traefik_middleware }}
+        {% endif %}
+        {% endif %}
+      {% endif %}
+    {% else %}
+    {% if traefik_enabled %}
+    labels:
+      - traefik.enable=true
+      - traefik.docker.network={{ traefik_network }}
+      - traefik.http.services.{{ service_name }}-web.loadBalancer.server.port=3100
+      - traefik.http.routers.{{ service_name }}-http.service={{ service_name }}-web
+      - traefik.http.routers.{{ service_name }}-http.rule=Host(`{{ traefik_host }}`)
+      - traefik.http.routers.{{ service_name }}-http.entrypoints={{ traefik_entrypoint }}
+      {% if authentik_enabled %}
+      - traefik.http.routers.{{ service_name }}-http.middlewares={{ authentik_traefik_middleware }}
+      {% endif %}
+      {% if traefik_tls_enabled %}
+      - traefik.http.routers.{{ service_name }}-https.service={{ service_name }}-web
+      - traefik.http.routers.{{ service_name }}-https.rule=Host(`{{ traefik_host }}`)
+      - traefik.http.routers.{{ service_name }}-https.entrypoints={{ traefik_tls_entrypoint }}
+      - traefik.http.routers.{{ service_name }}-https.tls=true
+      - traefik.http.routers.{{ service_name }}-https.tls.certresolver={{ traefik_tls_certresolver }}
+      {% if authentik_enabled %}
+      - traefik.http.routers.{{ service_name }}-https.middlewares={{ authentik_traefik_middleware }}
+      {% endif %}
+      {% endif %}
+    {% endif %}
+    restart: {{ restart_policy }}
+    {% endif %}
 
+{% if volume_mode == 'local' %}
 volumes:
-  data_loki:
+  {{ service_name }}-data:
     driver: local
+{% elif volume_mode == 'nfs' %}
+volumes:
+  {{ service_name }}-data:
+    driver: local
+    driver_opts:
+      type: nfs
+      o: addr={{ volume_nfs_server }},{{ volume_nfs_options }}
+      device: ":{{ volume_nfs_path }}"
+{% endif %}
+
+{% if network_mode != 'host' %}
+networks:
+  {{ network_name }}:
+    {% if network_external %}
+    external: true
+    {% else %}
+    {% if network_mode == 'macvlan' %}
+    driver: macvlan
+    driver_opts:
+      parent: {{ network_macvlan_parent_interface }}
+    ipam:
+      config:
+        - subnet: {{ network_macvlan_subnet }}
+          gateway: {{ network_macvlan_gateway }}
+    name: {{ network_name }}
+    {% elif swarm_enabled %}
+    driver: overlay
+    attachable: true
+    {% else %}
+    driver: bridge
+    {% endif %}
+    {% endif %}
+  {% if traefik_enabled %}
+  {{ traefik_network }}:
+    external: true
+  {% endif %}
+{% endif %}
 
-# --> (Optional) When using traefik...
-# networks:
-#   frontend:
-#     external: true
-# <--
+{% if swarm_enabled %}
+configs:
+  {{ service_name }}_config:
+    file: ./config/config.yaml
+{% endif %}

+ 19 - 4
library/compose/loki/template.yaml

@@ -18,6 +18,8 @@ metadata:
   date: '2025-11-07'
   tags:
     - traefik
+    - swarm
+    - authentik
 spec:
   general:
     vars:
@@ -25,7 +27,20 @@ spec:
         default: loki
       container_name:
         default: loki
-      loki_version:
-        type: str
-        description: Loki version
-        default: latest
+      container_hostname:
+        default: loki
+  ports:
+    vars:
+      ports_http:
+        description: "Loki HTTP API port"
+        type: int
+        default: 3100
+        needs: ["traefik_enabled=false", "network_mode=bridge"]
+  network:
+    vars:
+      network_name:
+        default: "loki_network"
+  traefik:
+    vars:
+      traefik_host:
+        default: "loki.home.arpa"

+ 20 - 19
library/compose/pihole/template.yaml

@@ -7,16 +7,13 @@ metadata:
     Network-wide advertisement and internet tracker blocking application that functions as a DNS blackhole.
     Provides DNS-level content filtering for all network devices, improving browsing performance, privacy, and security.
     Supports custom blocklists, whitelists, and seamless integration with existing network infrastructure.
-
     ##  Swarm Deployment Warning
     Pi-hole uses local storage and configuration files and does NOT support running multiple replicas.
     This template enforces a single replica with node placement constraints to ensure stable DNS resolution.
-
-    Project: https://pi-hole.net/
-
-    Documentation: https://docs.pi-hole.net/
-
-    GitHub: https://github.com/pi-hole/pi-hole
+    ## References
+    * **Project:** https://pi-hole.net/
+    * **Documentation:** https://docs.pi-hole.net/
+    * **GitHub:** https://github.com/pi-hole/pi-hole
   version: 2025.11.0
   author: Christian Lempa
   date: '2025-11-05'
@@ -24,21 +21,25 @@ metadata:
     - traefik
     - swarm
   next_steps: |
+    ### 1. Deploy the Service
     {% if swarm_enabled -%}
-    1. Deploy to Docker Swarm:
-       docker stack deploy -c compose.yaml pihole
-    2. Access the Web Interface:
-       {%- if traefik_enabled == True -%}https://{{ traefik_host }}/admin
-       {%- else -%}https://<your-swarm-node-ip>:{{ ports_https }}/admin{%- endif %}
-    3. Configure devices to use swarm node IP as DNS
+    Deploy to Docker Swarm:
+    ```bash
+    docker stack deploy -c compose.yaml pihole
+    ```
     {% else -%}
-    1. Deploy to Docker:
-       docker compose up -d
-    2. Access the Web Interface:
-       {%- if traefik_enabled == True -%}https://{{ traefik_host }}/admin
-       {%- else -%}https://<your-docker-host-ip>:{{ ports_https }}/admin{%- endif %}
-    3. Configure devices to use docker host IP as DNS
+    Start Pi-hole using Docker Compose:
+    ```bash
+    docker compose up -d
+    ```
     {% endif -%}
+    ### 2. Access the Web Interface
+    {% if traefik_enabled -%}
+    * Navigate to: **https://{{ traefik_host }}/admin**
+    {% else -%}
+    * Navigate to: **http://localhost:{{ ports_http }}/admin**
+    {% endif -%}
+    * Login using the admin password (check `.env.secret` file).
 spec:
   general:
     vars:

+ 80 - 17
library/compose/prometheus/compose.yaml.j2

@@ -16,6 +16,8 @@ services:
       {% endif %}
     environment:
       - TZ={{ container_timezone }}
+      - UID={{ user_uid }}
+      - GID={{ user_gid }}
     {% if network_mode == 'bridge' and not traefik_enabled %}
     ports:
       {% if swarm_enabled %}
@@ -28,79 +30,140 @@ services:
       {% endif %}
     {% endif %}
     volumes:
+      {% if volume_mode == 'mount' %}
+      - {{ volume_mount_path }}/data:/prometheus:rw
+      {% else %}
+      - {{ service_name }}-data:/prometheus
+      {% endif %}
+      {% if not swarm_enabled %}
+      {% if volume_mode == 'mount' %}
+      - {{ volume_mount_path }}/config/prometheus.yaml:/etc/prometheus/prometheus.yaml:ro
+      {% else %}
       - ./config/prometheus.yaml:/etc/prometheus/prometheus.yaml:ro
-      - prometheus-data:/prometheus
-    {% if network_mode == 'bridge' %}
-    networks:
-      {% if network_mode == 'bridge' and not traefik_enabled %}
-      - {{ network_name }}
       {% endif %}
+      {% endif %}
+    {% if network_mode == 'host' %}
+    network_mode: host
+    {% else %}
+    networks:
       {% if traefik_enabled %}
-      - {{ traefik_network }}
+      {{ traefik_network }}:
+      {% endif %}
+      {% if network_mode == 'macvlan' %}
+      {{ network_name }}:
+        ipv4_address: {{ network_macvlan_ipv4_address }}
+      {% elif network_mode == 'bridge' %}
+      {{ network_name }}:
       {% endif %}
     {% endif %}
     {% if swarm_enabled %}
+    configs:
+      - source: {{ service_name }}_config
+        target: /etc/prometheus/prometheus.yaml
     deploy:
-      mode: replicated
-      replicas: 1
+      mode: {{ swarm_placement_mode }}
+      {% if swarm_placement_mode == 'replicated' %}
+      replicas: {{ swarm_replicas }}
+      {% endif %}
+      {% if swarm_placement_host %}
       placement:
         constraints:
           - node.hostname == {{ swarm_placement_host }}
+      {% endif %}
+      restart_policy:
+        condition: on-failure
       {% if traefik_enabled %}
       labels:
         - traefik.enable=true
-        - traefik.http.services.{{ service_name }}-web.loadbalancer.server.port=9090
+        - traefik.docker.network={{ traefik_network }}
+        - traefik.http.services.{{ service_name }}-web.loadBalancer.server.port=9090
         - traefik.http.routers.{{ service_name }}-http.service={{ service_name }}-web
         - traefik.http.routers.{{ service_name }}-http.rule=Host(`{{ traefik_host }}`)
         - traefik.http.routers.{{ service_name }}-http.entrypoints={{ traefik_entrypoint }}
+        {% if authentik_enabled %}
+        - traefik.http.routers.{{ service_name }}-http.middlewares={{ authentik_traefik_middleware }}
+        {% endif %}
         {% if traefik_tls_enabled %}
+        - traefik.http.routers.{{ service_name }}-https.service={{ service_name }}-web
         - traefik.http.routers.{{ service_name }}-https.rule=Host(`{{ traefik_host }}`)
         - traefik.http.routers.{{ service_name }}-https.entrypoints={{ traefik_tls_entrypoint }}
         - traefik.http.routers.{{ service_name }}-https.tls=true
         - traefik.http.routers.{{ service_name }}-https.tls.certresolver={{ traefik_tls_certresolver }}
+        {% if authentik_enabled %}
+        - traefik.http.routers.{{ service_name }}-https.middlewares={{ authentik_traefik_middleware }}
+        {% endif %}
         {% endif %}
       {% endif %}
     {% else %}
     {% if traefik_enabled %}
     labels:
       - traefik.enable=true
-      - traefik.http.services.{{ service_name }}-web.loadbalancer.server.port=9090
+      - traefik.docker.network={{ traefik_network }}
+      - traefik.http.services.{{ service_name }}-web.loadBalancer.server.port=9090
       - traefik.http.routers.{{ service_name }}-http.service={{ service_name }}-web
       - traefik.http.routers.{{ service_name }}-http.rule=Host(`{{ traefik_host }}`)
       - traefik.http.routers.{{ service_name }}-http.entrypoints={{ traefik_entrypoint }}
+      {% if authentik_enabled %}
+      - traefik.http.routers.{{ service_name }}-http.middlewares={{ authentik_traefik_middleware }}
+      {% endif %}
       {% if traefik_tls_enabled %}
+      - traefik.http.routers.{{ service_name }}-https.service={{ service_name }}-web
       - traefik.http.routers.{{ service_name }}-https.rule=Host(`{{ traefik_host }}`)
       - traefik.http.routers.{{ service_name }}-https.entrypoints={{ traefik_tls_entrypoint }}
       - traefik.http.routers.{{ service_name }}-https.tls=true
       - traefik.http.routers.{{ service_name }}-https.tls.certresolver={{ traefik_tls_certresolver }}
+      {% if authentik_enabled %}
+      - traefik.http.routers.{{ service_name }}-https.middlewares={{ authentik_traefik_middleware }}
+      {% endif %}
       {% endif %}
     {% endif %}
     restart: {{ restart_policy }}
     {% endif %}
 
+{% if volume_mode == 'local' %}
 volumes:
-  prometheus-data:
+  {{ service_name }}-data:
     driver: local
+{% elif volume_mode == 'nfs' %}
+volumes:
+  {{ service_name }}-data:
+    driver: local
+    driver_opts:
+      type: nfs
+      o: addr={{ volume_nfs_server }},{{ volume_nfs_options }}
+      device: ":{{ volume_nfs_path }}"
+{% endif %}
 
-{% if network_mode == 'bridge' %}
+{% if network_mode != 'host' %}
 networks:
-  {% if network_mode == 'bridge' and not traefik_enabled %}
   {{ network_name }}:
     {% if network_external %}
     external: true
     {% else %}
-    {% if swarm_enabled %}
+    {% if network_mode == 'macvlan' %}
+    driver: macvlan
+    driver_opts:
+      parent: {{ network_macvlan_parent_interface }}
+    ipam:
+      config:
+        - subnet: {{ network_macvlan_subnet }}
+          gateway: {{ network_macvlan_gateway }}
+    name: {{ network_name }}
+    {% elif swarm_enabled %}
     driver: overlay
     attachable: true
     {% else %}
     driver: bridge
     {% endif %}
     {% endif %}
-  {% endif %}
   {% if traefik_enabled %}
   {{ traefik_network }}:
-    {% if traefik_network_external %}
     external: true
-    {% endif %}
   {% endif %}
 {% endif %}
+
+{% if swarm_enabled %}
+configs:
+  {{ service_name }}_config:
+    file: ./config/prometheus.yaml
+{% endif %}

+ 2 - 0
library/compose/prometheus/template.yaml

@@ -23,6 +23,7 @@ metadata:
   tags:
     - traefik
     - swarm
+    - authentik
   next_steps: |
     {% if swarm_enabled -%}
     1. Deploy to Docker Swarm:
@@ -66,6 +67,7 @@ spec:
         type: str
         description: "External URL for generating links (optional)"
         default: ""
+        optional: true
         extra: "Use if behind reverse proxy, e.g., https://prometheus.example.com"
   ports:
     vars:

+ 2 - 2
library/compose/traefik/config/files/middlewares.yaml.j2

@@ -22,8 +22,8 @@ http:
 {% endif -%}
 {% if authentik_enabled -%}
     # Authentik Forward Auth Middleware
-    # Use in service labels: traefik.http.routers.myservice.middlewares={{ traefik_authentik_middleware_name }}@file
-    {{ traefik_authentik_middleware_name }}:
+    # Use in service labels: traefik.http.routers.myservice.middlewares={{ authentik_traefik_middleware }}
+    {{ authentik_traefik_middleware.split('@')[0] }}:
       forwardAuth:
         address: {{ authentik_outpost_url }}/outpost.goauthentik.io/auth/traefik
         trustForwardHeader: true

+ 3 - 10
library/compose/traefik/template.yaml

@@ -161,18 +161,11 @@ spec:
     title: "Authentik Middleware"
     description: "Enable Authentik SSO integration for Traefik"
     vars:
-      authentik_enabled:
-        type: "bool"
-        description: "Enable Authentik SSO integration"
-        default: false
       authentik_outpost_url:
         type: "url"
         description: "Authentik outpost URL (e.g., http://authentik-outpost:9000)"
         default: "http://authentik-outpost:9000"
         needs: "authentik_enabled"
-      traefik_authentik_middleware_name:
-        type: "str"
-        description: "Name of the Authentik middleware"
-        default: "authentik"
-        needs: "authentik_enabled"
-        extra: "Reference in router labels as '{name}@file'"
+      authentik_traefik_middleware:
+        default: "authentik-middleware@file"
+        extra: "Use this value in service router labels to enable authentication"

+ 1 - 1
library/helm/authentik/values.yaml.j2

@@ -30,7 +30,7 @@ server:
     ingressClassName: traefik
 {% if traefik_tls_enabled and traefik_tls_certmanager %}
     annotations:
-      cert-manager.io/cluster-issuer: {{ traefik_tls_certmanager_issuer }}
+      cert-manager.io/cluster-issuer: {{ certmanager_issuer }}
 {% endif %}
     hosts:
       - {{ traefik_host }}

+ 15 - 0
library/kubernetes/certmanager-certificate/certificate.yaml.j2

@@ -0,0 +1,15 @@
+---
+apiVersion: cert-manager.io/v1
+kind: Certificate
+metadata:
+  name: {{ resource_name }}
+  namespace: {{ namespace }}
+spec:
+  secretName: {{ secret_name }}
+  issuerRef:
+    name: {{ certmanager_issuer }}
+    kind: {{ certmanager_issuer_kind }}
+  dnsNames:
+{%- for dns_name in dns_names.split(',') %}
+    - {{ dns_name.strip() }}
+{%- endfor %}

+ 0 - 15
library/kubernetes/certmanager-certificate/certmanager-certificate.yaml

@@ -1,15 +0,0 @@
----
-# --> (Example) Create a Certificate for your hostname...
-# apiVersion: cert-manager.io/v1
-# kind: Certificate
-# metadata:
-#   name: your-certificate  # <-- Replace with your certificate name
-#   namespace: your-namespace  # <-- Replace with your namespace
-# spec:
-#   secretName: your-secret  # <-- Replace with your secret name
-#   issuerRef:
-#     name: clusterissuer  # <-- Replace with your issuer name
-#     kind: ClusterIssuer
-#   dnsNames:
-#     - your-hostname  # <-- Replace with your hostname
-# <--

+ 37 - 0
library/kubernetes/certmanager-certificate/template.yaml

@@ -0,0 +1,37 @@
+---
+kind: kubernetes
+schema: "1.0"
+metadata:
+  name: Cert-Manager Certificate
+  description: >
+    Cert-manager Certificate resource for requesting TLS certificates from an Issuer or ClusterIssuer.
+
+
+    The certificate will be stored in a Kubernetes secret.
+
+
+    Requires cert-manager to be installed in the cluster.
+
+
+    Project: https://cert-manager.io
+
+    Documentation: https://cert-manager.io/docs/usage/certificate/
+  version: 1.16.2
+  author: Christian Lempa
+  date: '2025-01-11'
+spec:
+  general:
+    vars:
+      resource_name:
+        default: tls-certificate
+      secret_name:
+        type: str
+        description: Name of secret to store the certificate
+  dns:
+    title: DNS Settings
+    vars:
+      dns_names:
+        type: str
+        description: DNS names for certificate (comma-separated, e.g., example.com,*.example.com)
+  certmanager:
+    toggle: []

+ 0 - 17
library/kubernetes/certmanager-clusterissuer/certmanager-clusterissuer.yaml

@@ -1,17 +0,0 @@
----
-apiVersion: cert-manager.io/v1
-kind: ClusterIssuer
-metadata:
-  name: cloudflare-clusterissuer
-spec:
-  acme:
-    email: your-email@address  # <-- Replace with your email address
-    server: https://acme-v02.api.letsencrypt.org/directory
-    privateKeySecretRef:
-      name: cloudflare-clusterissuer-account-key
-    solvers:
-      - dns01:
-          cloudflare:
-            apiTokenSecretRef:
-              name: cloudflare-api-token-secret
-              key: api-token

+ 17 - 0
library/kubernetes/certmanager-clusterissuer/clusterissuer.yaml.j2

@@ -0,0 +1,17 @@
+---
+apiVersion: cert-manager.io/v1
+kind: ClusterIssuer
+metadata:
+  name: {{ resource_name }}
+spec:
+  acme:
+    email: {{ acme_email }}
+    server: {{ acme_server }}
+    privateKeySecretRef:
+      name: {{ privatekey_secret_name }}
+    solvers:
+      - dns01:
+          cloudflare:
+            apiTokenSecretRef:
+              name: {{ api_token_secret_name }}
+              key: {{ api_token_secret_key }}

+ 42 - 0
library/kubernetes/certmanager-clusterissuer/template.yaml

@@ -0,0 +1,42 @@
+---
+kind: kubernetes
+schema: "1.0"
+metadata:
+  name: Cert-Manager ClusterIssuer (Cloudflare)
+  description: >
+    Cert-manager ClusterIssuer for automatic TLS certificate management with Let's Encrypt and Cloudflare DNS-01 challenge.
+
+
+    Requires cert-manager to be installed in the cluster.
+
+
+    Project: https://cert-manager.io
+
+    Documentation: https://cert-manager.io/docs/configuration/acme/dns01/cloudflare/
+  version: 1.16.2
+  author: Christian Lempa
+  date: '2025-01-11'
+spec:
+  general:
+    vars:
+      resource_name:
+        default: cloudflare-clusterissuer
+      acme_email:
+        type: email
+        description: Email address for ACME account registration
+      acme_server:
+        type: url
+        description: ACME server URL
+        default: https://acme-v02.api.letsencrypt.org/directory
+      privatekey_secret_name:
+        type: str
+        description: Name of secret to store ACME account private key
+        default: cloudflare-clusterissuer-account-key
+      api_token_secret_name:
+        type: str
+        description: Name of secret containing Cloudflare API token
+        default: cloudflare-api-token-secret
+      api_token_secret_key:
+        type: str
+        description: Key name in secret containing Cloudflare API token
+        default: api-token

+ 18 - 0
library/kubernetes/certmanager-issuer/issuer.yaml.j2

@@ -0,0 +1,18 @@
+---
+apiVersion: cert-manager.io/v1
+kind: Issuer
+metadata:
+  name: {{ resource_name }}
+  namespace: {{ namespace }}
+spec:
+  acme:
+    email: {{ acme_email }}
+    server: {{ acme_server }}
+    privateKeySecretRef:
+      name: {{ privatekey_secret_name }}
+    solvers:
+      - dns01:
+          cloudflare:
+            apiTokenSecretRef:
+              name: {{ api_token_secret_name }}
+              key: {{ api_token_secret_key }}

+ 45 - 0
library/kubernetes/certmanager-issuer/template.yaml

@@ -0,0 +1,45 @@
+---
+kind: kubernetes
+schema: "1.0"
+metadata:
+  name: Cert-Manager Issuer (Cloudflare)
+  description: >
+    Cert-manager Issuer for automatic TLS certificate management with Let's Encrypt and Cloudflare DNS-01 challenge.
+
+
+    Issuer is namespace-scoped (unlike ClusterIssuer which is cluster-wide).
+
+
+    Requires cert-manager to be installed in the cluster.
+
+
+    Project: https://cert-manager.io
+
+    Documentation: https://cert-manager.io/docs/configuration/acme/dns01/cloudflare/
+  version: 1.16.2
+  author: Christian Lempa
+  date: '2025-01-11'
+spec:
+  general:
+    vars:
+      resource_name:
+        default: cloudflare-issuer
+      acme_email:
+        type: email
+        description: Email address for ACME account registration
+      acme_server:
+        type: url
+        description: ACME server URL
+        default: https://acme-v02.api.letsencrypt.org/directory
+      privatekey_secret_name:
+        type: str
+        description: Name of secret to store ACME account private key
+        default: cloudflare-issuer-account-key
+      api_token_secret_name:
+        type: str
+        description: Name of secret containing Cloudflare API token
+        default: cloudflare-api-token-secret
+      api_token_secret_key:
+        type: str
+        description: Key name in secret containing Cloudflare API token
+        default: api-token

+ 12 - 0
library/kubernetes/core-configmap/configmap.yaml.j2

@@ -0,0 +1,12 @@
+---
+apiVersion: v1
+kind: ConfigMap
+metadata:
+  name: {{ resource_name }}
+  namespace: {{ namespace }}
+data:
+  # Add your configuration key-value pairs here
+  # Example:
+  # app.properties: |
+  #   color.good=green
+  #   color.bad=red

+ 21 - 0
library/kubernetes/core-configmap/template.yaml

@@ -0,0 +1,21 @@
+---
+kind: kubernetes
+schema: "1.0"
+metadata:
+  name: Kubernetes ConfigMap
+  description: >
+    Kubernetes ConfigMap resource for storing non-sensitive configuration data as key-value pairs.
+
+
+    ConfigMaps allow you to decouple configuration from container images.
+
+
+    Documentation: https://kubernetes.io/docs/concepts/configuration/configmap/
+  version: 1.31.0
+  author: Christian Lempa
+  date: '2025-01-11'
+spec:
+  general:
+    vars:
+      resource_name:
+        default: app-config

+ 19 - 0
library/kubernetes/core-ingress/ingress.yaml.j2

@@ -0,0 +1,19 @@
+---
+apiVersion: networking.k8s.io/v1
+kind: Ingress
+metadata:
+  name: {{ resource_name }}
+  namespace: {{ namespace }}
+spec:
+  ingressClassName: {{ ingress_class }}
+  rules:
+    - host: {{ ingress_host }}
+      http:
+        paths:
+          - path: /
+            pathType: Prefix
+            backend:
+              service:
+                name: {{ service_name }}
+                port:
+                  number: {{ service_port }}

+ 39 - 0
library/kubernetes/core-ingress/template.yaml

@@ -0,0 +1,39 @@
+---
+kind: kubernetes
+schema: "1.0"
+metadata:
+  name: Kubernetes Ingress
+  description: >
+    Kubernetes Ingress resource for HTTP/HTTPS routing to services.
+
+
+    Requires an Ingress controller (e.g., nginx-ingress, Traefik) to be installed.
+
+
+    Documentation: https://kubernetes.io/docs/concepts/services-networking/ingress/
+  version: 1.31.0
+  author: Christian Lempa
+  date: '2025-01-11'
+spec:
+  general:
+    vars:
+      resource_name:
+        default: app-ingress
+      ingress_class:
+        type: str
+        description: Ingress class name
+        default: nginx
+      ingress_host:
+        type: hostname
+        description: Hostname for the ingress
+      service_name:
+        type: str
+        description: Backend service name
+      service_port:
+        type: int
+        description: Backend service port
+        default: 80
+  traefik:
+    vars:
+      traefik_enabled:
+        default: false

+ 11 - 0
library/kubernetes/core-ingressclass/ingressclass.yaml.j2

@@ -0,0 +1,11 @@
+---
+apiVersion: networking.k8s.io/v1
+kind: IngressClass
+metadata:
+  name: {{ resource_name }}
+{% if is_default %}
+  annotations:
+    ingressclass.kubernetes.io/is-default-class: "true"
+{% endif %}
+spec:
+  controller: {{ controller }}

+ 29 - 0
library/kubernetes/core-ingressclass/template.yaml

@@ -0,0 +1,29 @@
+---
+kind: kubernetes
+schema: "1.0"
+metadata:
+  name: Kubernetes IngressClass
+  description: >
+    Kubernetes IngressClass for specifying which Ingress controller should handle Ingress resources.
+
+
+    IngressClass is cluster-scoped and defines ingress controller implementations.
+
+
+    Documentation: https://kubernetes.io/docs/concepts/services-networking/ingress/#ingress-class
+  version: 1.31.0
+  author: Christian Lempa
+  date: '2025-01-11'
+spec:
+  general:
+    vars:
+      resource_name:
+        default: nginx
+      controller:
+        type: str
+        description: Ingress controller identifier (e.g., k8s.io/ingress-nginx)
+        default: k8s.io/ingress-nginx
+      is_default:
+        type: bool
+        description: Set as default IngressClass
+        default: false

+ 16 - 0
library/kubernetes/core-persistentvolume/pv.yaml.j2

@@ -0,0 +1,16 @@
+---
+apiVersion: v1
+kind: PersistentVolume
+metadata:
+  name: {{ resource_name }}
+spec:
+  capacity:
+    storage: {{ storage_size }}
+{% if storage_class %}
+  storageClassName: {{ storage_class }}
+{% endif %}
+  accessModes:
+    - {{ access_mode }}
+  persistentVolumeReclaimPolicy: {{ reclaim_policy }}
+  hostPath:
+    path: {{ host_path }}

+ 50 - 0
library/kubernetes/core-persistentvolume/template.yaml

@@ -0,0 +1,50 @@
+---
+kind: kubernetes
+schema: "1.0"
+metadata:
+  name: Kubernetes PersistentVolume
+  description: >
+    Kubernetes PersistentVolume for cluster-wide storage resources.
+
+
+    PVs are cluster-scoped and typically provisioned by administrators.
+
+
+    Documentation: https://kubernetes.io/docs/concepts/storage/persistent-volumes/
+  version: 1.31.0
+  author: Christian Lempa
+  date: '2025-01-11'
+spec:
+  general:
+    vars:
+      resource_name:
+        default: pv-nfs
+      storage_class:
+        type: str
+        description: Storage class name
+        default: ""
+      storage_size:
+        type: str
+        description: Storage capacity (e.g., 10Gi, 1Ti)
+        default: 10Gi
+      access_mode:
+        type: enum
+        description: Access mode
+        options:
+          - ReadWriteOnce
+          - ReadOnlyMany
+          - ReadWriteMany
+          - ReadWriteOncePod
+        default: ReadWriteMany
+      reclaim_policy:
+        type: enum
+        description: Reclaim policy
+        options:
+          - Retain
+          - Recycle
+          - Delete
+        default: Retain
+      host_path:
+        type: str
+        description: Host path for local storage (e.g., /mnt/data)
+        default: /mnt/data

+ 15 - 0
library/kubernetes/core-persistentvolumeclaim/pvc.yaml.j2

@@ -0,0 +1,15 @@
+---
+apiVersion: v1
+kind: PersistentVolumeClaim
+metadata:
+  name: {{ resource_name }}
+  namespace: {{ namespace }}
+spec:
+{% if storage_class %}
+  storageClassName: {{ storage_class }}
+{% endif %}
+  accessModes:
+    - {{ access_mode }}
+  resources:
+    requests:
+      storage: {{ storage_size }}

+ 38 - 0
library/kubernetes/core-persistentvolumeclaim/template.yaml

@@ -0,0 +1,38 @@
+---
+kind: kubernetes
+schema: "1.0"
+metadata:
+  name: Kubernetes PersistentVolumeClaim
+  description: >
+    Kubernetes PersistentVolumeClaim for requesting persistent storage.
+
+
+    PVCs are used by Pods to claim durable storage.
+
+
+    Documentation: https://kubernetes.io/docs/concepts/storage/persistent-volumes/
+  version: 1.31.0
+  author: Christian Lempa
+  date: '2025-01-11'
+spec:
+  general:
+    vars:
+      resource_name:
+        default: app-pvc
+      storage_class:
+        type: str
+        description: Storage class name (leave empty for default)
+        default: ""
+      storage_size:
+        type: str
+        description: Storage size (e.g., 10Gi, 1Ti)
+        default: 10Gi
+      access_mode:
+        type: enum
+        description: Access mode
+        options:
+          - ReadWriteOnce
+          - ReadOnlyMany
+          - ReadWriteMany
+          - ReadWriteOncePod
+        default: ReadWriteOnce

+ 0 - 9
library/kubernetes/core-secret/core-secret.yaml

@@ -1,9 +0,0 @@
----
-apiVersion: v1
-kind: Secret
-metadata:
-  name: cloudflare-api-token-secret
-  namespace: cert-manager
-type: Opaque
-data:
-  api-token: your-api-token  # <-- Replace with your Cloudflare API token

+ 31 - 0
library/kubernetes/core-secret/template.yaml

@@ -0,0 +1,31 @@
+---
+kind: kubernetes
+schema: "1.0"
+metadata:
+  name: Kubernetes Secret (Opaque)
+  description: >
+    Basic Kubernetes Secret with Opaque type for storing sensitive data like API tokens, passwords, or keys.
+
+
+    Data must be base64 encoded.
+
+
+    Documentation: https://kubernetes.io/docs/concepts/configuration/secret/
+  version: 1.31.0
+  author: Christian Lempa
+  date: '2025-01-11'
+spec:
+  general:
+    vars:
+      resource_name:
+        default: api-token-secret
+      secret_type:
+        type: str
+        description: Secret type
+        default: Opaque
+      api_token:
+        type: str
+        description: API token value (plain text, Kubernetes will encode it)
+        default: ""
+        sensitive: true
+        autogenerated: true

+ 15 - 0
library/kubernetes/core-service/service.yaml.j2

@@ -0,0 +1,15 @@
+---
+apiVersion: v1
+kind: Service
+metadata:
+  name: {{ resource_name }}
+  namespace: {{ namespace }}
+spec:
+  type: {{ service_type }}
+  selector:
+    app.kubernetes.io/name: {{ app_selector }}
+  ports:
+    - name: {{ protocol | lower }}
+      protocol: {{ protocol }}
+      port: {{ service_port }}
+      targetPort: {{ target_port }}

+ 49 - 0
library/kubernetes/core-service/template.yaml

@@ -0,0 +1,49 @@
+---
+kind: kubernetes
+schema: "1.0"
+metadata:
+  name: Kubernetes Service
+  description: >
+    Kubernetes Service resource for exposing applications running on a set of Pods.
+
+
+    Services provide stable network endpoints and load balancing.
+
+
+    Documentation: https://kubernetes.io/docs/concepts/services-networking/service/
+  version: 1.31.0
+  author: Christian Lempa
+  date: '2025-01-11'
+spec:
+  general:
+    vars:
+      resource_name:
+        default: app-service
+      service_type:
+        type: enum
+        description: Service type
+        options:
+          - ClusterIP
+          - NodePort
+          - LoadBalancer
+          - ExternalName
+        default: ClusterIP
+      service_port:
+        type: int
+        description: Service port
+        default: 80
+      target_port:
+        type: int
+        description: Target port on pods
+        default: 8080
+      protocol:
+        type: enum
+        description: Protocol
+        options:
+          - TCP
+          - UDP
+          - SCTP
+        default: TCP
+      app_selector:
+        type: str
+        description: App label selector (e.g., app.kubernetes.io/name value)

+ 6 - 0
library/kubernetes/core-serviceaccount/serviceaccount.yaml.j2

@@ -0,0 +1,6 @@
+---
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+  name: {{ resource_name }}
+  namespace: {{ namespace }}

+ 21 - 0
library/kubernetes/core-serviceaccount/template.yaml

@@ -0,0 +1,21 @@
+---
+kind: kubernetes
+schema: "1.0"
+metadata:
+  name: Kubernetes ServiceAccount
+  description: >
+    Kubernetes ServiceAccount for providing an identity for processes that run in Pods.
+
+
+    ServiceAccounts are used to control permissions and access to the Kubernetes API.
+
+
+    Documentation: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/
+  version: 1.31.0
+  author: Christian Lempa
+  date: '2025-01-11'
+spec:
+  general:
+    vars:
+      resource_name:
+        default: app-serviceaccount

+ 8 - 0
library/kubernetes/core-storageclass/storageclass.yaml.j2

@@ -0,0 +1,8 @@
+---
+apiVersion: storage.k8s.io/v1
+kind: StorageClass
+metadata:
+  name: {{ resource_name }}
+provisioner: {{ provisioner }}
+volumeBindingMode: {{ volume_binding_mode }}
+reclaimPolicy: {{ reclaim_policy }}

+ 39 - 0
library/kubernetes/core-storageclass/template.yaml

@@ -0,0 +1,39 @@
+---
+kind: kubernetes
+schema: "1.0"
+metadata:
+  name: Kubernetes StorageClass
+  description: >
+    Kubernetes StorageClass for defining different types of storage that can be dynamically provisioned.
+
+
+    StorageClass allows administrators to describe different storage "classes" available.
+
+
+    Documentation: https://kubernetes.io/docs/concepts/storage/storage-classes/
+  version: 1.31.0
+  author: Christian Lempa
+  date: '2025-01-11'
+spec:
+  general:
+    vars:
+      resource_name:
+        default: standard
+      provisioner:
+        type: str
+        description: Storage provisioner (e.g., kubernetes.io/no-provisioner, longhorn)
+        default: kubernetes.io/no-provisioner
+      volume_binding_mode:
+        type: enum
+        description: Volume binding mode
+        options:
+          - Immediate
+          - WaitForFirstConsumer
+        default: WaitForFirstConsumer
+      reclaim_policy:
+        type: enum
+        description: Reclaim policy for volumes
+        options:
+          - Retain
+          - Delete
+        default: Delete

برخی فایل ها در این مقایسه diff نمایش داده نمی شوند زیرا تعداد فایل ها بسیار زیاد است