فهرست منبع

prepare migration for other modules

xcad 3 ماه پیش
والد
کامیت
57819b2d10
89فایلهای تغییر یافته به همراه3263 افزوده شده و 89 حذف شده
  1. 35 0
      --output-dir/values.yaml
  2. 1 0
      .gitignore
  3. 1 0
      CHANGELOG.md
  4. 30 89
      cli/core/module/base_commands.py
  5. 2 0
      cli/core/prompt.py
  6. 161 0
      cli/modules/helm/spec_v1_0.py
  7. 19 0
      library/ansible/checkmk/activate-changes.yaml
  8. 17 0
      library/ansible/checkmk/install-agent.yaml
  9. 25 0
      library/ansible/checkmk/lookup-rule.yaml
  10. 22 0
      library/ansible/checkmk/manage-hosts.yaml
  11. 71 0
      library/ansible/checkmk/manage-rules.yaml
  12. 3 0
      library/ansible/checkmk/secrets.yaml
  13. 76 0
      library/ansible/discord/notify-discord.yaml
  14. 52 0
      library/ansible/docker/docker-certs-enable.yaml
  15. 158 0
      library/ansible/docker/docker-certs.yaml
  16. 35 0
      library/ansible/docker/inst-docker-ubuntu.yaml
  17. 14 0
      library/ansible/docker/maint-docker-clean.yaml
  18. 46 0
      library/ansible/kubernetes/README.md
  19. 2 0
      library/ansible/kubernetes/ansible.cfg
  20. 318 0
      library/ansible/kubernetes/inst-k8s.yaml
  21. 1 0
      library/ansible/kubernetes/k8s_worker_node_connection.j2
  22. 21 0
      library/ansible/portainer/deploy-portainer.yaml
  23. 18 0
      library/ansible/traefik/deploy-traefik.yaml
  24. 19 0
      library/ansible/ubuntu/config-add-sshkey.yaml
  25. 11 0
      library/ansible/ubuntu/inst-qemu-agent.yaml
  26. 19 0
      library/ansible/ubuntu/inst-vm-core.yaml
  27. 12 0
      library/ansible/ubuntu/inst-zsh.yaml
  28. 25 0
      library/ansible/ubuntu/maint-diskspace.yaml
  29. 16 0
      library/ansible/ubuntu/maint-reboot-required.yaml
  30. 9 0
      library/ansible/ubuntu/maint-reboot.yaml
  31. 14 0
      library/ansible/ubuntu/upd-apt.yaml
  32. 16 0
      library/ansible/wireguard/inst-wireguard.yaml
  33. 9 0
      library/helm/authentik/secrets.yaml.j2
  34. 73 0
      library/helm/authentik/template.yaml
  35. 47 0
      library/helm/authentik/values.yaml.j2
  36. 45 0
      library/helm/certmanager/template.yaml
  37. 21 0
      library/helm/certmanager/values.yaml.j2
  38. 49 0
      library/helm/longhorn/template.yaml
  39. 51 0
      library/helm/longhorn/values.yaml.j2
  40. 40 0
      library/helm/portainer/template.yaml
  41. 27 0
      library/helm/portainer/values.yaml.j2
  42. 76 0
      library/helm/traefik/template.yaml
  43. 41 0
      library/helm/traefik/values.yaml.j2
  44. 15 0
      library/kubernetes/certmanager-certificate/certmanager-certificate.yaml
  45. 17 0
      library/kubernetes/certmanager-clusterissuer/certmanager-clusterissuer.yaml
  46. 9 0
      library/kubernetes/core-secret/core-secret.yaml
  47. 12 0
      library/kubernetes/twingate-connector/twingate_connector.yaml
  48. 10 0
      library/kubernetes/twingate-operator/helm/values.yaml
  49. 1 0
      library/packer/proxmox-ubuntu/files/99-pve.cfg
  50. 1 0
      library/packer/proxmox-ubuntu/http/meta-data
  51. 33 0
      library/packer/proxmox-ubuntu/http/user-data
  52. 159 0
      library/packer/proxmox-ubuntu/ubuntu-server-noble.pkr.hcl
  53. 9 0
      library/terraform/civo/credentials.tf
  54. 5 0
      library/terraform/civo/firewall.tf
  55. 23 0
      library/terraform/civo/kubernetes.tf
  56. 3 0
      library/terraform/civo/network.tf
  57. 20 0
      library/terraform/civo/provider.tf
  58. 139 0
      library/terraform/civo/query.tf
  59. 14 0
      library/terraform/civo/server.tf
  60. 14 0
      library/terraform/civo/ssh_key.tf
  61. 13 0
      library/terraform/cloudflare/credentials.tf
  62. 12 0
      library/terraform/cloudflare/dns.tf
  63. 19 0
      library/terraform/cloudflare/provider.tf
  64. 31 0
      library/terraform/helm/certmanager.tf
  65. 46 0
      library/terraform/helm/traefik.tf
  66. 7 0
      library/terraform/kubectl/manifest.tf
  67. 24 0
      library/terraform/kubectl/provider.tf
  68. 41 0
      library/terraform/kubernetes/deployment.tf
  69. 39 0
      library/terraform/kubernetes/ingress.tf
  70. 23 0
      library/terraform/kubernetes/provider.tf
  71. 19 0
      library/terraform/kubernetes/service.tf
  72. 40 0
      library/terraform/proxmox/provider.tf
  73. 102 0
      library/terraform/proxmox/vm_qemu.tf
  74. 15 0
      library/terraform/templates/cloud-deployment-example/civo.tf
  75. 9 0
      library/terraform/templates/cloud-deployment-example/cloudflare.tf
  76. 40 0
      library/terraform/templates/cloud-deployment-example/main.tf
  77. 78 0
      library/terraform/templates/kubernetes-automation-example/certmanager.tf
  78. 77 0
      library/terraform/templates/kubernetes-automation-example/civo.tf
  79. 20 0
      library/terraform/templates/kubernetes-automation-example/cloudflare.tf
  80. 153 0
      library/terraform/templates/kubernetes-automation-example/nginx1.tf
  81. 76 0
      library/terraform/templates/kubernetes-automation-example/provider.tf
  82. 47 0
      library/terraform/templates/kubernetes-automation-example/traefik.tf
  83. 24 0
      library/terraform/templates/simple-docker-example/main.tf
  84. 20 0
      library/terraform/twingate/provider.tf
  85. 7 0
      library/terraform/twingate/twingate_group.tf
  86. 7 0
      library/terraform/twingate/twingate_remote_network.tf
  87. 28 0
      library/terraform/twingate/twingate_resource.tf
  88. 3 0
      library/terraform/twingate/twingate_security_policy.tf
  89. 11 0
      library/terraform/twingate/twingate_user.tf

+ 35 - 0
--output-dir/values.yaml

@@ -0,0 +1,35 @@
+---
+global:
+  image:
+    repository: "ghcr.io/goauthentik/server"
+    tag: "2025.6.3"
+    pullPolicy: IfNotPresent
+authentik:
+  secret_key: ojUjEz2wv3O44cLvEdJSAbhJCo0NMDeg
+  postgresql:
+    host: postgres.local
+    name: authentik
+    user: authentik
+    password: lyaPwh0qC87rIbzBv8aifnCzXqklblUt
+    port: 5432
+  error_reporting:
+    enabled: false
+  log_level: error
+server:
+  service:
+    type: ClusterIP
+  ingress:
+    enabled: true
+    ingressClassName: traefik
+    annotations:
+      cert-manager.io/cluster-issuer: cloudflare-issuer
+    hosts:
+      - authentik.example.com
+    tls:
+      - secretName: traefik-tls
+        hosts:
+          - authentik.example.com
+postgresql:
+  enabled: false
+redis:
+  enabled: true

+ 1 - 0
.gitignore

@@ -17,6 +17,7 @@
 **/*.pyd
 **/.venv
 **/venv/
+**/.ruff_cache/
 
 # Packaging
 *.egg-info/

+ 1 - 0
CHANGELOG.md

@@ -23,6 +23,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
 - Refactored code quality (#1364) for all core modules from single files to package structure with specific submodules
 - Improved debug logging to capture module discovery and registration during initialization
 - Enhanced debug logging for better troubleshooting
+- Simplified dry-run output to show only essential information (files, sizes, status)
 
 ### Fixed
 - CLI --var flag now properly converts boolean and numeric strings to appropriate Python types (#1522)

+ 30 - 89
cli/core/module/base_commands.py

@@ -233,38 +233,23 @@ def get_generation_confirmation(ctx: ConfirmationContext) -> bool:
     if not ctx.interactive:
         return True
 
-    # Use templates.render_file_generation_confirmation directly for now
-    ctx.display.templates.render_file_generation_confirmation(
-        ctx.output_dir, ctx.rendered_files, ctx.existing_files if ctx.existing_files else None
-    )
+    # Skip file confirmation display in dry-run mode
+    if not ctx.dry_run:
+        # Use templates.render_file_generation_confirmation directly for now
+        ctx.display.templates.render_file_generation_confirmation(
+            ctx.output_dir, ctx.rendered_files, ctx.existing_files if ctx.existing_files else None
+        )
 
-    # Final confirmation (only if we didn't already ask about overwriting)
-    if not ctx.dir_not_empty and not ctx.dry_run:
-        input_mgr = InputManager()
-        if not input_mgr.confirm("Generate these files?", default=True):
-            ctx.display.info("Generation cancelled")
-            return False
+        # Final confirmation (only if we didn't already ask about overwriting)
+        if not ctx.dir_not_empty:
+            input_mgr = InputManager()
+            if not input_mgr.confirm("Generate these files?", default=True):
+                ctx.display.info("Generation cancelled")
+                return False
 
     return True
 
 
-def _check_directory_permissions(output_dir: Path, display: DisplayManager) -> None:
-    """Check directory existence and write permissions."""
-    if output_dir.exists():
-        display.success(f"Output directory exists: [cyan]{output_dir}[/cyan]")
-        if os.access(output_dir, os.W_OK):
-            display.success("Write permission verified")
-        else:
-            display.warning("Write permission may be denied")
-    else:
-        display.info(f"  [dim]→[/dim] Would create output directory: [cyan]{output_dir}[/cyan]")
-        parent = output_dir.parent
-        if parent.exists() and os.access(parent, os.W_OK):
-            display.success("Parent directory writable")
-        else:
-            display.warning("Parent directory may not be writable")
-
-
 def _collect_subdirectories(rendered_files: dict[str, str]) -> set[Path]:
     """Collect unique subdirectories from file paths."""
     subdirs = set()
@@ -317,73 +302,29 @@ def execute_dry_run(
     show_files: bool,
     display: DisplayManager,
 ) -> None:
-    """Execute dry run mode with comprehensive simulation."""
-    display.info("")
-    display.info("[bold cyan]Dry Run Mode - Simulating File Generation[/bold cyan]")
-    display.info("")
-
-    # Simulate directory creation
-    display.heading("Directory Operations")
-    _check_directory_permissions(output_dir, display)
-
-    # Collect and display subdirectories
-    subdirs = _collect_subdirectories(rendered_files)
-    if subdirs:
-        display.info(f"  [dim]→[/dim] Would create {len(subdirs)} subdirectory(ies)")
-        for subdir in sorted(subdirs):
-            display.info(f"    [dim]📁[/dim] {subdir}/")
-
-    display.info("")
-
-    # Display file operations in a table
-    display.heading("File Operations")
+    """Execute dry run mode - preview files without writing."""
     file_operations, total_size, new_files, overwrite_files = _analyze_file_operations(output_dir, rendered_files)
-    # Use data_table for file operations
-    display.data_table(
-        columns=[
-            {"name": "File", "no_wrap": False},
-            {"name": "Size", "justify": "right", "style": "dim"},
-            {"name": "Status", "style": "yellow"},
-        ],
-        rows=file_operations,
-        row_formatter=lambda row: (
-            str(row[0]),
-            display.format_file_size(row[1]),
-            row[2],
-        ),
-    )
-    display.info("")
-
-    # Summary statistics
     size_str = _format_size(total_size)
-    summary_rows = [
-        ("Total files:", str(len(rendered_files))),
-        ("New files:", str(new_files)),
-        ("Files to overwrite:", str(overwrite_files)),
-        ("Total size:", size_str),
-    ]
-    display.table(
-        headers=None,
-        rows=summary_rows,
-        title="Summary",
-        show_header=False,
-        borderless=True,
-    )
-    display.info("")
 
     # Show file contents if requested
     if show_files:
-        display.info("[bold cyan]Generated File Contents:[/bold cyan]")
-        display.info("")
+        display.text("")
+        display.heading("File Contents")
         for file_path, content in sorted(rendered_files.items()):
-            display.info(f"[cyan]File:[/cyan] {file_path}")
-            display.info(f"{'─' * 80}")
-            display.info(content)
-            display.info("")  # Add blank line after content
-        display.info("")
-
-    display.success("Dry run complete - no files were written")
-    display.info(f"[dim]Files would have been generated in '{output_dir}'[/dim]")
+            display.text(f"\n[cyan]{file_path}[/cyan]")
+            display.text(f"{'─' * 80}")
+            display.text(content)
+        display.text("")
+
+    # Show summary message
+    display.text("")
+    if overwrite_files > 0:
+        display.warning(
+            f"Dry run: {len(rendered_files)} files ({size_str}) would be written to '{output_dir}' ({overwrite_files} files would be overwritten)"
+        )
+    else:
+        display.success(f"Dry run: {len(rendered_files)} files ({size_str}) would be written to '{output_dir}'")
+
     logger.info(f"Dry run completed for template '{id}' - {len(rendered_files)} files, {total_size} bytes")
 
 
@@ -481,7 +422,7 @@ def generate_template(module_instance, config: GenerationConfig) -> None:
         module_instance.display.templates.render_file_tree(template)
         # Display variables table
         module_instance.display.variables.render_variables_table(template)
-        module_instance.display.info("")
+        module_instance.display.text("")
 
     try:
         rendered_files, variable_values = _render_template(template, config.id, display, config.interactive)

+ 2 - 0
cli/core/prompt.py

@@ -30,8 +30,10 @@ class PromptHandler:
             Dict of variable names to collected values
         """
         if not Confirm.ask("Customize any settings?", default=False):
+            self.console.print("")  # Add blank line after prompt
             logger.info("User opted to keep all default values")
             return {}
+        self.console.print("")  # Add blank line after prompt
 
         collected: dict[str, Any] = {}
 

+ 161 - 0
cli/modules/helm/spec_v1_0.py

@@ -11,6 +11,167 @@ spec = OrderedDict(
                     "description": "Helm release name",
                     "type": "str",
                 },
+                "namespace": {
+                    "description": "Kubernetes namespace for the Helm release",
+                    "type": "str",
+                    "default": "default",
+                },
+            },
+        },
+        "networking": {
+            "title": "Networking",
+            "vars": {
+                "network_mode": {
+                    "description": "Kubernetes service type",
+                    "type": "enum",
+                    "options": ["ClusterIP", "NodePort", "LoadBalancer"],
+                    "default": "ClusterIP",
+                },
+            },
+        },
+        "traefik": {
+            "title": "Traefik Ingress",
+            "toggle": "traefik_enabled",
+            "needs": "network_mode=ClusterIP",
+            "description": "Traefik routes external traffic to your service.",
+            "vars": {
+                "traefik_enabled": {
+                    "description": "Enable Traefik Ingress/IngressRoute",
+                    "type": "bool",
+                    "default": False,
+                },
+                "traefik_host": {
+                    "description": "Hostname for Traefik ingress",
+                    "type": "hostname",
+                },
+            },
+        },
+        "traefik_tls": {
+            "title": "Traefik TLS/SSL",
+            "toggle": "traefik_tls_enabled",
+            "needs": "traefik_enabled=true;network_mode=ClusterIP",
+            "description": "Enable HTTPS/TLS for Traefik with certificate management.",
+            "vars": {
+                "traefik_tls_enabled": {
+                    "description": "Enable HTTPS/TLS",
+                    "type": "bool",
+                    "default": True,
+                },
+                "traefik_tls_secret": {
+                    "description": "TLS secret name",
+                    "type": "str",
+                    "default": "traefik-tls",
+                },
+                "traefik_tls_certmanager": {
+                    "description": "Use cert-manager for automatic certificate provisioning",
+                    "type": "bool",
+                    "default": False,
+                },
+                "traefik_tls_certmanager_issuer": {
+                    "description": "Cert-manager ClusterIssuer name",
+                    "type": "str",
+                    "default": "letsencrypt-prod",
+                    "needs": "traefik_tls_certmanager=true",
+                },
+            },
+        },
+        "volumes": {
+            "title": "Volumes",
+            "vars": {
+                "volumes_mode": {
+                    "description": "Persistent volume mode",
+                    "type": "enum",
+                    "options": ["default", "existing-pvc"],
+                    "default": "default",
+                },
+                "volumes_pvc_name": {
+                    "description": "Name of existing PVC",
+                    "type": "str",
+                    "needs": "volumes_mode=existing-pvc",
+                },
+            },
+        },
+        "database": {
+            "title": "Database",
+            "toggle": "database_enabled",
+            "vars": {
+                "database_enabled": {
+                    "description": "Enable database configuration",
+                    "type": "bool",
+                    "default": False,
+                },
+                "database_type": {
+                    "description": "Database type",
+                    "type": "enum",
+                    "options": ["postgres", "mysql", "mariadb"],
+                    "default": "postgres",
+                },
+                "database_host": {
+                    "description": "Database host",
+                    "type": "hostname",
+                },
+                "database_port": {
+                    "description": "Database port",
+                    "type": "int",
+                },
+                "database_name": {
+                    "description": "Database name",
+                    "type": "str",
+                },
+                "database_user": {
+                    "description": "Database user",
+                    "type": "str",
+                },
+                "database_password": {
+                    "description": "Database password",
+                    "type": "str",
+                    "sensitive": True,
+                    "autogenerated": True,
+                },
+            },
+        },
+        "email": {
+            "title": "Email Server",
+            "toggle": "email_enabled",
+            "description": "Configure email server for notifications and user management.",
+            "vars": {
+                "email_enabled": {
+                    "description": "Enable email server configuration",
+                    "type": "bool",
+                    "default": False,
+                },
+                "email_host": {
+                    "description": "SMTP server hostname",
+                    "type": "hostname",
+                },
+                "email_port": {
+                    "description": "SMTP server port",
+                    "type": "int",
+                    "default": 587,
+                },
+                "email_username": {
+                    "description": "SMTP username",
+                    "type": "str",
+                },
+                "email_password": {
+                    "description": "SMTP password",
+                    "type": "str",
+                    "sensitive": True,
+                },
+                "email_from": {
+                    "description": "From email address",
+                    "type": "email",
+                },
+                "email_use_tls": {
+                    "description": "Use TLS encryption",
+                    "type": "bool",
+                    "default": True,
+                },
+                "email_use_ssl": {
+                    "description": "Use SSL encryption",
+                    "type": "bool",
+                    "default": False,
+                },
             },
         },
     }

+ 19 - 0
library/ansible/checkmk/activate-changes.yaml

@@ -0,0 +1,19 @@
+---
+- name: "Activate Checkmk changes"
+  hosts: localhost
+  gather_facts: false
+  vars_files:
+    - secrets.yaml
+  vars:
+    server_url: "checkmk.home.arpa"
+    site: "cmk"
+
+  tasks:
+    - name: "Start activation on a specific site"
+      checkmk.general.activation:
+        server_url: "{{ server_url }}"
+        site: "{{ site }}"
+        automation_user: "{{ automation_user }}"
+        automation_secret: "{{ automation_secret }}"
+        sites:
+          - "{{ site }}"

+ 17 - 0
library/ansible/checkmk/install-agent.yaml

@@ -0,0 +1,17 @@
+---
+- name: "Install Checkmk agent on all hosts"
+  hosts: "{{ my_hosts | d([]) }}"
+  roles:
+    - checkmk.general.agent
+  vars:
+    checkmk_agent_version: "2.4.0p4"
+    checkmk_agent_server: "checkmk.home.arpa"
+    checkmk_agent_server_protocol: https
+    checkmk_agent_site: "cmk"
+    checkmk_agent_auto_activate: true
+    checkmk_agent_tls: "true"  # NOTE: Register Agent to enable TLS
+    checkmk_agent_user: "{{ automation_user }}"
+    checkmk_agent_pass: "{{ automation_secret }}"
+    checkmk_agent_host_name: "{{ ansible_hostname }}"  # NOTE: Required to replace FQDN with hostname only
+  vars_files:
+    - secrets.yaml

+ 25 - 0
library/ansible/checkmk/lookup-rule.yaml

@@ -0,0 +1,25 @@
+---
+- name: "Manage Checkmk rules"
+  hosts: localhost
+  gather_facts: false
+  vars_files:
+    - secrets.yaml
+  vars:
+    server_url: "checkmk.home.arpa"
+    site: "cmk"
+
+  tasks:
+    - name: Get a rule with a particular rule id
+      ansible.builtin.debug:
+        msg: "Rule: {{ extensions | to_nice_yaml }}"
+      vars:
+        extensions: "{{
+          lookup('checkmk.general.rule',
+            rule_id='checkmk-rule-id',
+            server_url=server_url,
+            site=site,
+            automation_user=automation_user,
+            automation_secret=automation_secret,
+            validate_certs=False
+            )
+          }}"

+ 22 - 0
library/ansible/checkmk/manage-hosts.yaml

@@ -0,0 +1,22 @@
+---
+- name: "Manage Checkmk hosts"
+  hosts: localhost
+  gather_facts: false
+  vars_files:
+    - secrets.yaml
+  vars:
+    server_url: "checkmk.home.arpa"
+    site: "cmk"
+
+  tasks:
+    - name: "Create host"
+      checkmk.general.host:
+        server_url: "{{ server_url }}"
+        site: "{{ site }}"
+        automation_user: "{{ automation_user }}"
+        automation_secret: "{{ automation_secret }}"
+        name: "your-host-name"
+        attributes:
+          ipaddress: "host-ip-address"
+        folder: "/"
+        state: "present"

+ 71 - 0
library/ansible/checkmk/manage-rules.yaml

@@ -0,0 +1,71 @@
+---
+- name: "Manage Checkmk rules"
+  hosts: localhost
+  gather_facts: false
+  vars_files:
+    - secrets.yaml
+  vars:
+    server_url: "checkmk.home.arpa"
+    site: "cmk"
+
+  tasks:
+    - name: Create DNS Check Rule
+      checkmk.general.rule:
+        server_url: "{{ server_url }}"
+        site: "{{ site }}"
+        automation_user: "{{ automation_user }}"
+        automation_secret: "{{ automation_secret }}"
+        ruleset: "active_checks:dns"
+        rule:
+          properties: {
+            "comment": "Ansible managed",
+            "description": "DNS DNS Monitoring",
+            "disabled": false,
+          }
+          conditions: {
+            "host_label_groups": [],
+            "host_name": {
+              "match_on": [
+                "your-dns-container-host"
+              ],
+              "operator": "one_of"
+            },
+            "host_tags": [],
+            "service_label_groups": []
+          }
+          "value_raw": {
+            "hostname": "hostname-to-query",
+            "server": "dns-server-ip",
+            "expected_addresses_list": [
+              "expected-ip-address"
+            ]
+          }
+          location:
+            folder: "/"
+            position: "top"
+        state: "present"
+
+    - name: Create NVME Temperature override rule
+      checkmk.general.rule:
+        server_url: "{{ server_url }}"
+        site: "{{ site }}"
+        automation_user: "{{ automation_user }}"
+        automation_secret: "{{ automation_secret }}"
+        ruleset: "checkgroup_parameters:temperature"
+        rule:
+          conditions:
+            host_label_groups: []
+            host_tags: []
+            service_description:
+              match_on:
+                - "DRIVE MODEL NAME*"
+              operator: "one_of"
+            service_label_groups: []
+          location:
+            folder: "/"
+            position: "top"
+          properties:
+            description: "NVME Temperature override"
+            disabled: false
+          value_raw: "{'levels': (60.0, 80.0)}"
+        state: present

+ 3 - 0
library/ansible/checkmk/secrets.yaml

@@ -0,0 +1,3 @@
+---
+automation_user: "your-checkmk-user"
+automation_secret: "your-checkmk-password"

+ 76 - 0
library/ansible/discord/notify-discord.yaml

@@ -0,0 +1,76 @@
+---
+# This Ansible playbook demonstrates how to send Discord notifications
+# using the `community.general.discord` module.
+# https://docs.ansible.com/ansible/latest/collections/community/general/discord_module.html
+#
+# If you need guidance how to create your own Discord server, see
+# https://support.discord.com/hc/en-us/articles/204849977-How-do-I-create-a-server
+#
+# In order to generate a webhook, please see
+# https://support.discord.com/hc/en-us/articles/360045093012-Server-Integrations-Page
+
+- name: Notify discord
+
+  hosts: "{{ my_hosts | d([]) }}"
+
+  vars:
+    # The name that will be shown as sender of the notification. Note
+    # that some usernames are blocked by Discord, for example it must
+    # not contain the word `discord`.
+    notify_discord_username: Ansible
+
+    # Your Discord webhook URL should have following format. Please
+    # extract following segments of the URL path and set it as value of
+    # the following variables:
+    #
+    # https://discord.com/api/webhooks/nnnnnnnnnn/xxxxxxxxxxxxxxxxxxxxxxxxxxx
+    #                                  |        | |                         |
+    #   notify_discord_webhook_id <----'--------' |                         |
+    #                                             |                         |
+    #   notify_discord_webhook_token <------------'-------------------------'
+    #
+    # Security advise: if you commit this data to a repository it is
+    # strongly recommended to encrypt `notify_discord_webhook_token` using
+    # Ansible Vault.
+    notify_discord_webhook_id: ''
+    notify_discord_webhook_token: ''
+
+    # Do not modify following regular expressions unless you know what
+    # you're doing. Those are to ensure that whatever you've set as
+    # `notify_discord_webhook_id` and `notify_discord_webhook_token`
+    # complies with the Discord API Specification (as of 2024-02-25).
+    #
+    # https://github.com/discord/discord-api-spec/blob/fe9917381e47285b56d98cb72ae3cfe7db9ea19c/specs/openapi.json#L7524-L7531
+    # https://github.com/discord/discord-api-spec/blob/fe9917381e47285b56d98cb72ae3cfe7db9ea19c/specs/openapi.json#L24817-L24821
+    notify_discord_webhook_id_regex: '^0|[1-9][0-9]*$'
+    # https://github.com/discord/discord-api-spec/blob/fe9917381e47285b56d98cb72ae3cfe7db9ea19c/specs/openapi.json#L7532-L7541
+    notify_discord_webhook_token_regex: '^[a-zA-Z0-9_-]+$'
+
+    # The content of the notification
+    notify_discord_webhook_content: |-
+      **Message from `{{ inventory_hostname }}` by *Ansible* ** :tada:
+      Just a test, adjust it to your liking.
+
+      You can use any Markdown formatting here [supported by Discord](
+      https://support.discord.com/hc/en-us/articles/210298617-Markdown-Text-101-Chat-Formatting-Bold-Italic-Underline).
+
+    # Delegate the sending of the Dicord notification to following host
+    # which must be able to access the public internet on destination
+    # port 443/tcp. When `localhost` is specified, this is sent from
+    # the Ansible Controller, but you can pick any host listed in the
+    # Ansible inventory.
+    notify_discord_send_from_host: localhost
+
+  tasks:
+    - name: Send Discord message
+      community.general.discord:
+        username: "{{ notify_discord_username }}"
+        webhook_id: "{{ notify_discord_webhook_id }}"
+        webhook_token: "{{ notify_discord_webhook_token }}"
+        content: "{{ notify_discord_webhook_content }}"
+      delegate_to: "{{ notify_discord_send_from_host }}"
+      when:
+        - notify_discord_webhook_id is match(notify_discord_webhook_id_regex)
+        - notify_discord_webhook_token is match(notify_discord_webhook_token_regex)
+        - notify_discord_webhook_content | length > 0
+        - notify_discord_send_from_host is in (['localhost'] + groups['all'])

+ 52 - 0
library/ansible/docker/docker-certs-enable.yaml

@@ -0,0 +1,52 @@
+---
+- name: "Docker Certs enable"
+  hosts: "{{ my_hosts | d([]) }}"
+  become: true
+  vars:
+    certs_path: "/root/docker-certs"
+
+  tasks:
+    - name: Check if docker certs are existing
+      ansible.builtin.stat:
+        path: "{{ certs_path }}"
+      register: certs_dir
+
+    - name: Fail if docker certs are not existing
+      ansible.builtin.fail:
+        msg: "Docker certificates are not existing in /root/docker-certs."
+      when: not certs_dir.stat.exists
+
+    - name: Get machine's primary internal ip address from eth0 interface
+      ansible.builtin.setup:
+      register: ip_address
+
+    - name: Set machine's primary internal ip address
+      ansible.builtin.set_fact:
+        ip_address: "{{ ip_address.ansible_facts.ansible_default_ipv4.address }}"
+
+    - name: Check if ip_address is a valid ip address
+      ansible.builtin.assert:
+        that:
+          - ip_address is match("^(?:[0-9]{1,3}\.){3}[0-9]{1,3}$")
+        fail_msg: "ip_address is not a valid ip address."
+        success_msg: "ip_address is a valid ip address."
+
+    - name: Change docker daemon to use certs
+      ansible.builtin.lineinfile:
+        path: /lib/systemd/system/docker.service
+        line: >
+          ExecStart=/usr/bin/dockerd -H fd:// --containerd=/run/containerd/containerd.sock
+          -H tcp://{{ ip_address }}:2376 --tlsverify --tlscacert={{ certs_path }}/ca.pem
+          --tlscert={{ certs_path }}/server-cert.pem --tlskey={{ certs_path }}/server-key.pem
+        regexp: '^ExecStart='
+        state: present
+
+    - name: Reload systemd daemon
+      ansible.builtin.systemd:
+        daemon_reload: true
+
+    - name: Restart docker daemon
+      ansible.builtin.systemd:
+        name: docker
+        state: restarted
+        enabled: true

+ 158 - 0
library/ansible/docker/docker-certs.yaml

@@ -0,0 +1,158 @@
+---
+- name: "Docker Certs"
+  hosts: "{{ my_hosts | d([]) }}"
+  become: true
+  vars:
+    certs_path: "/root/docker-certs"
+    cert_validity_days: 3650
+    cn_domain: "your-domain.tld"
+
+  tasks:
+    - name: Check if docker certs are existing
+      ansible.builtin.stat:
+        path: "{{ certs_path }}"
+      register: certs_dir
+
+    - name: Create docker certs directory (if needed)
+      ansible.builtin.file:
+        path: "{{ certs_path }}"
+        state: directory
+        mode: '0700'
+      when: not certs_dir.stat.exists
+
+    - name: Check if docker certs directory is empty
+      ansible.builtin.command: ls -A "{{ certs_path }}"
+      register: certs_list
+      when: certs_dir.stat.exists
+      changed_when: false
+      ignore_errors: true
+
+    - name: Fail if docker certs already exist
+      ansible.builtin.fail:
+        msg: "Docker certificates already exist in /root/docker-certs."
+      when: certs_list.stdout | default('') != ''
+
+    - name: Get machine's primary internal ip address from eth0 interface
+      ansible.builtin.setup:
+      register: ip_address
+
+    - name: Set machine's primary internal ip address
+      ansible.builtin.set_fact:
+        ip_address: "{{ ip_address.ansible_facts.ansible_default_ipv4.address }}"
+
+    - name: Check if ip_address is a valid ip address
+      ansible.builtin.assert:
+        that:
+          - ip_address is match("^(?:[0-9]{1,3}\.){3}[0-9]{1,3}$")
+        fail_msg: "ip_address is not a valid ip address."
+        success_msg: "ip_address is a valid ip address."
+
+    - name: Generate CA private key
+      ansible.builtin.command:
+        cmd: >
+          openssl genrsa -out "{{ certs_path }}/ca-key.pem" 4096
+      args:
+        creates: "{{ certs_path }}/ca-key.pem"
+
+    - name: Generate CA certificate
+      ansible.builtin.command:
+        cmd: >
+          openssl req -sha256 -new -x509
+            -subj "/CN={{ cn_domain }}"
+            -days "{{ cert_validity_days }}"
+            -key "{{ certs_path }}/ca-key.pem"
+            -out "{{ certs_path }}/ca.pem"
+      args:
+        creates: "{{ certs_path }}/ca.pem"
+
+    - name: Generate server private key
+      ansible.builtin.command:
+        cmd: >
+          openssl genrsa -out "{{ certs_path }}/server-key.pem" 4096
+        creates: "{{ certs_path }}/server-key.pem"
+
+    - name: Generate server certificate signing request
+      ansible.builtin.command:
+        cmd: >
+          openssl req -sha256 -new
+            -subj "/CN={{ inventory_hostname }}"
+            -key "{{ certs_path }}/server-key.pem"
+            -out "{{ certs_path }}/server.csr"
+        creates: "{{ certs_path }}/server.csr"
+
+    - name: Generate server certificate extension file
+      ansible.builtin.shell: |
+        echo "subjectAltName = DNS:{{ inventory_hostname }},IP:{{ ip_address }},IP:127.0.0.1" >> "{{ certs_path }}/extfile.cnf"
+        echo "extendedKeyUsage = serverAuth" >> "{{ certs_path }}/extfile.cnf"
+      args:
+        creates: "{{ certs_path }}/extfile.cnf"
+
+    - name: Generate server certificate
+      ansible.builtin.command:
+        cmd: >
+          openssl x509 -req -days "{{ cert_validity_days }}" -sha256
+            -in "{{ certs_path }}/server.csr"
+            -CA "{{ certs_path }}/ca.pem"
+            -CAkey "{{ certs_path }}/ca-key.pem"
+            -CAcreateserial -out "{{ certs_path }}/server-cert.pem"
+            -extfile "{{ certs_path }}/extfile.cnf"
+        creates: "{{ certs_path }}/server-cert.pem"
+
+    - name: Generate client private key
+      ansible.builtin.command:
+        cmd: >
+          openssl genrsa -out "{{ certs_path }}/key.pem" 4096
+        creates: "{{ certs_path }}/key.pem"
+
+    - name: Generate client certificate signing request
+      ansible.builtin.command:
+        cmd: >
+          openssl req -sha256 -new
+            -subj "/CN=client"
+            -key "{{ certs_path }}/key.pem"
+            -out "{{ certs_path }}/client.csr"
+        creates: "{{ certs_path }}/client.csr"
+
+    - name: Generate client certificate extension file
+      ansible.builtin.shell: |
+        echo "extendedKeyUsage = clientAuth" >> "{{ certs_path }}/client-extfile.cnf"
+      args:
+        creates: "{{ certs_path }}/client-extfile.cnf"
+
+    - name: Generate client certificate
+      ansible.builtin.command:
+        cmd: >
+          openssl x509 -req -days "{{ cert_validity_days }}"
+            -sha256 -in "{{ certs_path }}/client.csr"
+            -CA "{{ certs_path }}/ca.pem"
+            -CAkey "{{ certs_path }}/ca-key.pem"
+            -CAcreateserial -out "{{ certs_path }}/cert.pem"
+            -extfile "{{ certs_path }}/client-extfile.cnf"
+        creates: "{{ certs_path }}/cert.pem"
+
+    - name: Remove client certificate signing request
+      ansible.builtin.file:
+        path: "{{ certs_path }}/server.csr"
+        state: absent
+
+    - name: Remove client certificate signing request
+      ansible.builtin.file:
+        path: "{{ certs_path }}/client.csr"
+        state: absent
+
+    - name: Remove server certificate extension file
+      ansible.builtin.file:
+        path: "{{ certs_path }}/extfile.cnf"
+        state: absent
+
+    - name: Remove client certificate extension file
+      ansible.builtin.file:
+        path: "{{ certs_path }}/client-extfile.cnf"
+        state: absent
+
+    - name: Set permissions for docker certs
+      ansible.builtin.file:
+        path: "{{ certs_path }}"
+        mode: '0700'
+        recurse: true
+        follow: true

+ 35 - 0
library/ansible/docker/inst-docker-ubuntu.yaml

@@ -0,0 +1,35 @@
+---
+- name: Install docker
+  hosts: "{{ my_hosts | d([]) }}"
+  become: true
+
+  tasks:
+    - name: Install docker dependencies
+      ansible.builtin.apt:
+        name:
+          - apt-transport-https
+          - ca-certificates
+          - curl
+          - gnupg-agent
+          - software-properties-common
+        update_cache: true
+
+    - name: Add docker gpg key
+      ansible.builtin.apt_key:
+        url: https://download.docker.com/linux/ubuntu/gpg
+        state: present
+        keyring: /etc/apt/keyrings/docker.gpg
+
+    - name: Add docker repository
+      ansible.builtin.apt_repository:
+        filename: docker
+        repo: deb [arch=amd64 signed-by=/etc/apt/keyrings/docker.gpg] https://download.docker.com/linux/ubuntu {{ ansible_lsb.codename | lower }} stable
+        state: present
+
+    - name: Install docker engine
+      ansible.builtin.apt:
+        name:
+          - docker-ce
+          - docker-buildx-plugin
+          - docker-compose-plugin
+        update_cache: true

+ 14 - 0
library/ansible/docker/maint-docker-clean.yaml

@@ -0,0 +1,14 @@
+---
+- name: Clean docker
+  hosts: "{{ my_hosts | d([]) }}"
+
+  tasks:
+    - name: Prune non-dangling images
+      community.docker.docker_prune:
+        containers: false
+        images: true
+        images_filters:
+          dangling: false
+        networks: false
+        volumes: false
+        builder_cache: false

+ 46 - 0
library/ansible/kubernetes/README.md

@@ -0,0 +1,46 @@
+# Kubernetes (K8s) Installation Script
+
+- Introduction
+- Prerequisites
+- Execution Instructions
+
+## Introduction
+
+> The objective of this playbook is to automate the installation and setup of a kubernetes instance. The playbook consist of 3 main plays. For both controller and nodes, for controller only and for nodes only. It will ask user confirmation before moving on to each stage. By the end of the playbook two files will be created on the controller node named **worker_conn_string** and locally inside the playbook directory with the name **Remote_Files/worker_conn_string**. This will have the **connection string**. (Note:- If you want to join controllers or nodes manually later. For controllers use **--control-plane** flag)
+
+### References
+
+**Documentation** - [https://kubernetes.io/docs/setup/](https://kubernetes.io/docs/setup/)
+
+## Prerequisites
+
+- Atleast 2 VMs  (1 For Control Node and 1 For Worker Node).
+- Static IPs should be set along with unique host names.
+- Inventory should be in this format
+
+```ini
+    [controllers]
+    host_name ansible_ssh_host=<IP> ansible_user='<USERNAME>' ansible_become_pass='<PASSWORD>'
+
+    [nodes]
+
+    [instance:children]
+    controllers
+    nodes
+```
+
+(If you want to change this, don't forget to change the `inst-k8s` as well)
+
+## Execution Instructions
+
+```bash
+ansible-playbook -i <INVENTORY> <PLAYBOOK>
+```
+
+### Optional Flags
+
+| Flag  | Use Case |
+|-------|-----------|
+| --ask-vault-pass | If the vault is encrypted |
+| --start-at-task | If you want to start from a specific task|
+| --tags | If you want to only run a specific group of tasks|

+ 2 - 0
library/ansible/kubernetes/ansible.cfg

@@ -0,0 +1,2 @@
+[defaults]
+timeout = 25

+ 318 - 0
library/ansible/kubernetes/inst-k8s.yaml

@@ -0,0 +1,318 @@
+---
+- name: Setup Prerequisites To Install Kubernetes
+  hosts: instance
+  become: true
+  vars:
+    kube_prereq_packages: [curl, ca-certificates, apt-transport-https]
+    kube_packages: [kubeadm, kubectl, kubelet]
+
+  tasks:
+    - name: Test Reacheability
+      ansible.builtin.ping:
+
+    - name: Update Cache
+      ansible.builtin.apt:
+        update_cache: true
+        autoclean: true
+
+    - name: 1. Upgrade All the Packages to the latest
+      ansible.builtin.apt:
+        upgrade: "full"
+
+    - name: 2. Install Qemu-Guest-Agent
+      ansible.builtin.apt:
+        name:
+          - qemu-guest-agent
+        state: present
+
+    - name: 3. Setup a Container Runtime
+      ansible.builtin.apt:
+        name:
+          - containerd
+        state: present
+
+    - name: 4. Start Containerd If Stopped
+      ansible.builtin.service:
+        name: containerd
+        state: started
+
+    - name: 5. Create Containerd Directory
+      ansible.builtin.file:
+        path: /etc/containerd
+        state: directory
+        mode: '0755'
+
+    - name: 6. Check config.toml Exists
+      ansible.builtin.stat:
+        path: /etc/containerd/config.toml
+      register: pre_file_exist_result
+
+    - name: 6.1 Delete config.toml Exists
+      ansible.builtin.file:
+        path: /etc/containerd/config.toml
+        state: absent
+      when: pre_file_exist_result.stat.exists
+
+    - name: 7. Place Default Containerd Config Inside It
+      ansible.builtin.shell: |
+        set -o pipefail
+        containerd config default | sudo tee /etc/containerd/config.toml
+      register: output
+      changed_when: output.rc != 0
+      args:
+        executable: /bin/bash
+      tags:
+        - containerd_config
+
+    - name: 7.1 Check If New config.toml Exists Now
+      ansible.builtin.stat:
+        path: /etc/containerd/config.toml
+      register: post_file_exist_result
+      tags:
+        - containerd_config
+
+    - name: 7.2 Exit The Play If config.toml Does Not Exist
+      ansible.builtin.meta: end_play
+      when: not post_file_exist_result.stat.exists
+      tags:
+        - containerd_config
+
+    - name: 8.1 Disable Swap
+      ansible.builtin.command: sudo swapoff -a
+      register: output
+      changed_when: output.rc != 0
+      tags:
+        - disable_swap
+
+    - name: 8.2 Disable Swap permanently
+      ansible.builtin.replace:
+        path: /etc/fstab
+        regexp: '^([^#].*?\sswap\s+sw\s+.*)$'
+        replace: '# \1'
+      tags:
+        - disable_swap
+
+    - name: 9. Edit config.toml
+      ansible.builtin.replace:
+        path: /etc/containerd/config.toml
+        after: \[plugins\."io\.containerd\.grpc\.v1\.cri"\.containerd\.runtimes\.runc\.options\]
+        regexp: SystemdCgroup = false
+        replace: SystemdCgroup = true
+
+    - name: 10. Enable Ipv4 Bridging
+      ansible.builtin.replace:
+        path: /etc/sysctl.conf
+        regexp: ^#net\.ipv4\.ip_forward=1$
+        replace: net.ipv4.ip_forward=1
+
+    - name: 11.1 Delete k8s Config If Exists
+      ansible.builtin.file:
+        path: /etc/modules-load.d/k8s.conf
+        state: absent
+      tags:
+        - kube_config
+
+    - name: 11.2 Add k8s.config and Edit It
+      ansible.builtin.lineinfile:
+        path: /etc/modules-load.d/k8s.conf
+        line: br_netfilter
+        create: true
+        mode: '0755'
+      tags:
+        - kube_config
+
+    - name: 12.1 Reboot
+      ansible.builtin.reboot:
+      register: system_reboot
+
+    - name: 12.2 Verify Reboot Success
+      ansible.builtin.ping:
+      when: system_reboot.rebooted
+
+    - name: 13.1 Update Cache
+      ansible.builtin.apt:
+        update_cache: true
+        autoclean: true
+      tags:
+        - install_pre_kube_packages
+
+    - name: 13.2 Remove apt lock file
+      ansible.builtin.file:
+        state: absent
+        path: "/var/lib/dpkg/lock"
+      tags:
+        - install_pre_kube_packages
+
+    - name: 13.3 Install Prerequisite Packages
+      ansible.builtin.apt:
+        name: '{{ kube_prereq_packages }}'
+      tags:
+        - install_pre_kube_packages
+
+    - name: 13.4 Remove GPG Keys If They Exist
+      ansible.builtin.file:
+        path: "{{ item }}"
+        state: absent
+      with_items:
+        - /usr/share/keyrings/kubernetes-apt-keyring.gpg
+        - /usr/share/keyrings/kubernetes-apt-keyring.gpg_armored
+      tags:
+        - install_pre_kube_packages
+
+    - name: 13.5 Download Kubernetes APT Key
+      ansible.builtin.get_url:
+        url: https://pkgs.k8s.io/core:/stable:/v1.28/deb/Release.key
+        dest: /usr/share/keyrings/kubernetes-apt-keyring.gpg_armored
+        mode: '0755'
+      tags:
+        - install_pre_kube_packages
+
+    - name: 13.6 De-Armor Kubernetes APT Key
+      ansible.builtin.shell: gpg --dearmor < /usr/share/keyrings/kubernetes-apt-keyring.gpg_armored > /etc/apt/keyrings/kubernetes-apt-keyring.gpg
+      no_log: true
+      args:
+        creates: /etc/apt/keyrings/kubernetes-apt-keyring.gpg
+      tags:
+        - install_pre_kube_packages
+
+    - name: 13.7 Add Kubernetes APT Key
+      ansible.builtin.shell: |
+        set -o pipefail
+        echo 'deb [signed-by=/etc/apt/keyrings/kubernetes-apt-keyring.gpg] https://pkgs.k8s.io/core:/stable:/v1.28/deb/ /' \
+        | sudo tee /etc/apt/sources.list.d/kubernetes.list
+      register: apt_output
+      changed_when: apt_output.rc != 0
+      args:
+        executable: /bin/bash
+      tags:
+        - install_pre_kube_packages
+
+    - name: 14.1 Update Cache
+      ansible.builtin.apt:
+        update_cache: true
+        autoclean: true
+      tags:
+        - install_kube_packages
+
+    - name: 14.2 Remove apt lock file
+      ansible.builtin.file:
+        state: absent
+        path: "/var/lib/dpkg/lock"
+      tags:
+        - install_kube_packages
+
+    - name: 14.3 Install Required Packages
+      ansible.builtin.apt:
+        name: '{{ kube_packages }}'
+      tags:
+        - install_kube_packages
+
+    - name: 14.4 Hold Packages
+      ansible.builtin.dpkg_selections:
+        name: '{{ item }}'
+        selection: hold
+      with_items: '{{ kube_packages }}'
+      tags:
+        - install_kube_packages
+
+    - name: Prompt To Continue On To Configuring Control Nodes
+      ansible.builtin.pause:
+        prompt: Press RETURN when you want to continue configuring the Control nodes!
+
+- name: Setup Controller Nodes
+  gather_facts: true
+  hosts: controllers
+  become: true
+
+  tasks:
+    - name: 1. Initialize Cluster
+      ansible.builtin.shell: |
+        set -o pipefail
+        sudo kubeadm init --control-plane-endpoint={{ hostvars[inventory_hostname]['ansible_default_ipv4']['address'] }} --pod-network-cidr=10.244.0.0/16
+      register: init_cluster_output
+      changed_when: init_cluster_output.rc != 0
+      args:
+        executable: /bin/bash
+
+    - name: 2.1 Create .kube Directory
+      ansible.builtin.file:
+        path: .kube
+        state: directory
+        mode: '0755'
+      tags:
+        - kube_admin_config
+
+    - name: 2.2 Copy Kubernetes Admin Config
+      ansible.builtin.copy:
+        remote_src: true
+        src: /etc/kubernetes/admin.conf
+        dest: .kube/config
+        mode: '0755'
+      tags:
+        - kube_admin_config
+
+    - name: 2.3 Change Config File Permission
+      ansible.builtin.command: chown {{ ansible_env.USER }}:{{ ansible_env.USER }} ".kube/config"
+      changed_when: false
+      when: not ansible_env.HOME is undefined
+      tags:
+        - kube_admin_config
+
+    - name: 3. Install An Overlay Network
+      ansible.builtin.shell: |
+        set -o pipefail
+        kubectl apply -f https://raw.githubusercontent.com/flannel-io/flannel/master/Documentation/kube-flannel.yml
+      register: init_cluster_output
+      become: false
+      changed_when: init_cluster_output.rc != 0
+      args:
+        executable: /bin/bash
+
+    - name: 4.1 Execute Join String Generation Command
+      ansible.builtin.command: kubeadm token create --print-join-command
+      become: false
+      register: join_output
+      changed_when: false
+      tags:
+        - join_string
+
+    - name: 4.2 Display Join String
+      ansible.builtin.debug:
+        msg: 'Join Command : {{ join_output.stdout }}'
+      tags:
+        - join_string
+
+    - name: Copy Connection String To A Remote File
+      ansible.builtin.template:
+        src: k8s_worker_node_connection.j2
+        dest: worker_conn_string
+        mode: '0755'
+
+    - name: Check Connection String File Exists
+      ansible.builtin.stat:
+        path: worker_conn_string
+      register: conn_file_path_remote
+
+    - name: Fetch The Remote File
+      ansible.builtin.fetch:
+        src: worker_conn_string
+        dest: Remote_Files/worker_conn_string
+        flat: true
+      when: conn_file_path_remote.stat.exists
+
+    - name: Prompt To Continue On To Configuring Worker Nodes
+      ansible.builtin.pause:
+        prompt: Press RETURN when you want to continue configuring the Worker nodes!
+
+- name: Join Worker Nodes
+  gather_facts: true
+  hosts: nodes
+  become: true
+  vars:
+    node_conn_string: "{{ lookup('ansible.builtin.file', 'Remote_Files/worker_conn_string') }}"
+
+  tasks:
+    - name: 1. Add Worker Nodes To The Controller
+      ansible.builtin.command: '{{ node_conn_string }}'
+      changed_when: false
+      throttle: 1

+ 1 - 0
library/ansible/kubernetes/k8s_worker_node_connection.j2

@@ -0,0 +1 @@
+{{ join_output.stdout }}

+ 21 - 0
library/ansible/portainer/deploy-portainer.yaml

@@ -0,0 +1,21 @@
+---
+- name: Deploy portainer-ce latest
+  hosts: "{{ my_hosts | d([]) }}"
+  become: true
+  become_user: "{{ lookup('env','USER') }}"
+
+  tasks:
+    - name: Create new volume
+      community.docker.docker_volume:
+        name: portainer-data
+
+    - name: Deploy portainer
+      community.docker.docker_container:
+        name: portainer
+        image: "docker.io/portainer/portainer-ce"
+        ports:
+          - "9443:9443"
+        volumes:
+          - /run/docker.sock:/var/run/docker.sock
+          - portainer-data:/data
+        restart_policy: unless-stopped

+ 18 - 0
library/ansible/traefik/deploy-traefik.yaml

@@ -0,0 +1,18 @@
+---
+- name: Deploy traefik v2.5
+  hosts: "{{ my_hosts | d([]) }}"
+
+  tasks:
+    - name: Deploy traefik
+      community.docker.docker_container:
+        name: traefik
+        image: "traefik:v2.5"
+        ports:
+          - "80:80"
+          - "443:443"
+        volumes:
+          - /run/docker.sock:/run/docker.sock
+          - /etc/traefik:/etc/traefik
+        restart_policy: unless-stopped
+      become: true
+      become_user: "{{ lookup('env', 'USER') }}"

+ 19 - 0
library/ansible/ubuntu/config-add-sshkey.yaml

@@ -0,0 +1,19 @@
+---
+- name: Add ssh key
+  hosts: "{{ my_hosts | d([]) }}"
+  become: true
+
+  tasks:
+    - name: Install public keys
+      ansible.posix.authorized_key:
+        user: "{{ lookup('env', 'USER') }}"
+        state: present
+        key: "{{ lookup('file', '~/.ssh/id_rsa.pub') }}"
+
+    - name: Change sudoers file
+      ansible.builtin.lineinfile:
+        path: /etc/sudoers
+        state: present
+        regexp: '^%sudo'
+        line: '%sudo ALL=(ALL) NOPASSWD: ALL'
+        validate: /usr/sbin/visudo -cf %s

+ 11 - 0
library/ansible/ubuntu/inst-qemu-agent.yaml

@@ -0,0 +1,11 @@
+---
+- name: Install qemu-guest-agent package
+  hosts: all
+  become: true
+  become_method: ansible.builtin.sudo
+
+  tasks:
+    - name: Install qemu-guest-agent
+      ansible.builtin.apt:
+        name: qemu-guest-agent
+        state: present

+ 19 - 0
library/ansible/ubuntu/inst-vm-core.yaml

@@ -0,0 +1,19 @@
+---
+- name: Install core packages for virtual machines
+  hosts: "{{ my_hosts | d([]) }}"
+  become: true
+
+  tasks:
+    - name: Install packages
+      ansible.builtin.apt:
+        name:
+          - prometheus-node-exporter
+          - nfs-common
+          - qemu-guest-agent
+        update_cache: true
+
+    - name: Start guest qemu-guest-agent
+      ansible.builtin.service:
+        name: qemu-guest-agent
+        state: started
+        enabled: true

+ 12 - 0
library/ansible/ubuntu/inst-zsh.yaml

@@ -0,0 +1,12 @@
+---
+- name: Install zsh
+  hosts: "{{ my_hosts | d([]) }}"
+  become: true
+
+  tasks:
+    - name: Install zsh
+      ansible.builtin.apt:
+        name: zsh
+        state: present
+        update_cache: true
+      become: true

+ 25 - 0
library/ansible/ubuntu/maint-diskspace.yaml

@@ -0,0 +1,25 @@
+---
+- name: Check disk space
+  hosts: "{{ my_hosts | d([]) }}"
+
+  tasks:
+    - name: Check disk space available
+      ansible.builtin.shell:
+        cmd: |
+          set -euo pipefail
+          df -Ph / | awk 'NR==2 {print $5}'
+        executable: /bin/bash
+      changed_when: false
+      check_mode: false
+      register: disk_usage
+
+#   - name: Send discord message when disk space is over 80%
+#     uri:
+#       url: "your-webhook"
+#       method: POST
+#       body_format: json
+#       body: '{"content": "Disk space on {{ inventory_hostname }} is above 80%!"}'
+#       headers:
+#         Content-Type: application/json
+#       status_code: 204
+#     when: disk_usage.stdout[:-1]|int > 80

+ 16 - 0
library/ansible/ubuntu/maint-reboot-required.yaml

@@ -0,0 +1,16 @@
+---
+- name: Check if system reboot is required
+  hosts: "{{ my_hosts | d([]) }}"
+  become: true
+
+  tasks:
+    - name: Check if system reboot is required
+      become: true
+      ansible.builtin.stat:
+        path: /run/reboot-required
+      register: reboot_required
+
+    - name: Report if reboot is required
+      ansible.builtin.debug:
+        msg: "Reboot is required"
+      when: reboot_required.stat.exists

+ 9 - 0
library/ansible/ubuntu/maint-reboot.yaml

@@ -0,0 +1,9 @@
+---
+- name: Reboot machine
+  hosts: "{{ my_hosts | d([]) }}"
+  become: true
+
+  tasks:
+    - name: Reboot machine
+      ansible.builtin.reboot:
+        reboot_timeout: 3600

+ 14 - 0
library/ansible/ubuntu/upd-apt.yaml

@@ -0,0 +1,14 @@
+---
+- name: Update and upgrade apt packages
+  hosts: all
+
+  tasks:
+    - name: Update packages with apt
+      when: ansible_pkg_mgr == 'apt'
+      ansible.builtin.apt:
+        update_cache: true
+
+    - name: Upgrade packages with apt
+      when: ansible_pkg_mgr == 'apt'
+      ansible.builtin.apt:
+        upgrade: dist

+ 16 - 0
library/ansible/wireguard/inst-wireguard.yaml

@@ -0,0 +1,16 @@
+---
+- name: Install wireguard
+  hosts: "{{ my_hosts | d([]) }}"
+  become: true
+
+  tasks:
+    - name: Install wireguard
+      ansible.builtin.apt:
+        name: wireguard
+        update_cache: true
+
+    - name: Generate private and public keypair
+      ansible.builtin.shell: |
+        wg genkey | tee privatekey | wg pubkey > publickey
+        chmod 0400 privatekey
+        chmod 0400 publickey

+ 9 - 0
library/helm/authentik/secrets.yaml.j2

@@ -0,0 +1,9 @@
+---
+authentik:
+  secret_key: {{ authentik_secret_key }}
+  postgresql:
+    password: {{ database_password }}
+{% if email_enabled %}
+  email:
+    password: {{ email_password }}
+{% endif %}

+ 73 - 0
library/helm/authentik/template.yaml

@@ -0,0 +1,73 @@
+---
+kind: helm
+schema: "1.0"
+metadata:
+  name: Authentik
+  description: >
+    Helm values template for Authentik, an open-source Identity Provider focused
+    on flexibility and versatility with support for various protocols (OAuth2, SAML, LDAP).
+
+
+    Chart Repository: https://charts.goauthentik.io
+
+    Chart Name: authentik
+
+    Chart Version: Compatible with Authentik 2025.6.3
+
+
+    Project: https://goauthentik.io/
+
+    Documentation: https://docs.goauthentik.io/
+  version: 2025.6.3
+  author: Christian Lempa
+  date: '2025-01-11'
+spec:
+  general:
+    vars:
+      release_name:
+        default: authentik
+  networking:
+    vars:
+      network_mode:
+        default: ClusterIP
+  authentik:
+    title: Authentik Configuration
+    description: Configure Authentik application settings
+    required: true
+    vars:
+      authentik_secret_key:
+        type: str
+        description: Secret Key
+        extra: Used for cookie signing and unique user IDs
+        sensitive: true
+        autogenerated: true
+      authentik_log_level:
+        type: enum
+        description: Authentik log level
+        options:
+          - trace
+          - debug
+          - info
+          - warning
+          - error
+        default: error
+      authentik_error_reporting:
+        type: bool
+        description: Enable error reporting to Authentik developers
+        default: false
+  database:
+    vars:
+      database_enabled:
+        default: true
+      database_type:
+        default: postgres
+      database_name:
+        default: authentik
+      database_user:
+        default: authentik
+      database_port:
+        default: 5432
+  traefik:
+    vars:
+      traefik_host:
+        default: authentik.home.arpa

+ 47 - 0
library/helm/authentik/values.yaml.j2

@@ -0,0 +1,47 @@
+---
+global:
+  image:
+    repository: "ghcr.io/goauthentik/server"
+    tag: "2025.6.3"
+    pullPolicy: IfNotPresent
+authentik:
+  postgresql:
+    host: {{ database_host }}
+    name: {{ database_name }}
+    user: {{ database_user }}
+    port: {{ database_port }}
+{% if email_enabled %}
+  email:
+    host: {{ email_host }}
+    port: {{ email_port }}
+    username: {{ email_username }}
+    use_tls: {{ email_use_tls | lower }}
+    from: {{ email_from }}
+{% endif %}
+  error_reporting:
+    enabled: {{ authentik_error_reporting | lower }}
+  log_level: {{ authentik_log_level }}
+server:
+  service:
+    type: {{ network_mode }}
+{% if traefik_enabled %}
+  ingress:
+    enabled: true
+    ingressClassName: traefik
+{% if traefik_tls_enabled and traefik_tls_certmanager %}
+    annotations:
+      cert-manager.io/cluster-issuer: {{ traefik_tls_certmanager_issuer }}
+{% endif %}
+    hosts:
+      - {{ traefik_host }}
+{% if traefik_tls_enabled %}
+    tls:
+      - secretName: {{ traefik_tls_secret }}
+        hosts:
+          - {{ traefik_host }}
+{% endif %}
+{% endif %}
+postgresql:
+  enabled: false
+redis:
+  enabled: true

+ 45 - 0
library/helm/certmanager/template.yaml

@@ -0,0 +1,45 @@
+---
+kind: helm
+schema: "1.0"
+metadata:
+  name: Cert-Manager
+  description: >
+    Helm values template for cert-manager, a Kubernetes add-on to automate the management
+    and issuance of TLS certificates from various sources.
+
+
+    Chart Repository: https://charts.jetstack.io
+
+    Chart Name: cert-manager
+
+    Chart Version: Compatible with cert-manager v1.18.2
+
+
+    Project: https://cert-manager.io/
+
+    Documentation: https://cert-manager.io/docs/
+  version: 1.18.2
+  author: Christian Lempa
+  date: '2025-01-11'
+spec:
+  general:
+    vars:
+      release_name:
+        default: cert-manager
+      namespace:
+        default: cert-manager
+  dns:
+    title: DNS Configuration
+    vars:
+      dns_recursive_nameservers_only:
+        type: bool
+        description: Use only recursive nameservers for DNS01 challenges
+        default: true
+      dns_nameserver_1:
+        type: str
+        description: Primary DNS nameserver for DNS01 challenges
+        default: 1.1.1.1:53
+      dns_nameserver_2:
+        type: str
+        description: Secondary DNS nameserver for DNS01 challenges
+        default: 1.0.0.1:53

+ 21 - 0
library/helm/certmanager/values.yaml.j2

@@ -0,0 +1,21 @@
+---
+image:
+  repository: quay.io/jetstack/cert-manager-controller
+  tag: v1.18.2
+webhook:
+  image:
+    repository: quay.io/jetstack/cert-manager-webhook
+    tag: v1.18.2
+cainjector:
+  image:
+    repository: quay.io/jetstack/cert-manager-cainjector
+    tag: v1.18.2
+
+crds:
+  enabled: true
+{% if dns_recursive_nameservers_only %}
+
+extraArgs:
+  - --dns01-recursive-nameservers-only
+  - --dns01-recursive-nameservers={{ dns_nameserver_1 }},{{ dns_nameserver_2 }}
+{% endif %}

+ 49 - 0
library/helm/longhorn/template.yaml

@@ -0,0 +1,49 @@
+---
+kind: helm
+schema: "1.0"
+metadata:
+  name: Longhorn
+  description: >
+    Helm values template for Longhorn, a distributed block storage system for Kubernetes
+    that provides persistent storage with built-in backup and disaster recovery.
+
+
+    Chart Repository: https://charts.longhorn.io
+
+    Chart Name: longhorn
+
+    Chart Version: Compatible with Longhorn v1.9.1
+
+
+    Project: https://longhorn.io/
+
+    Documentation: https://longhorn.io/docs/
+  version: 1.9.1
+  author: Christian Lempa
+  date: '2025-01-11'
+spec:
+  general:
+    vars:
+      release_name:
+        default: longhorn
+      namespace:
+        default: longhorn-system
+  ui:
+    title: Longhorn UI
+    vars:
+      ui_replicas:
+        type: int
+        description: Number of Longhorn UI replicas
+        default: 1
+  backup:
+    title: Backup Configuration
+    toggle: backup_enabled
+    vars:
+      backup_enabled:
+        type: bool
+        description: Enable backup target configuration
+        default: false
+      backup_target:
+        type: str
+        description: Backup target URL (e.g., s3://bucket or nfs://server/path)
+        prompt: Backup target URL

+ 51 - 0
library/helm/longhorn/values.yaml.j2

@@ -0,0 +1,51 @@
+---
+image:
+  longhorn:
+    engine:
+      repository: "longhornio/longhorn-engine"
+      tag: "v1.9.1"
+    manager:
+      repository: "longhornio/longhorn-manager"
+      tag: "v1.9.1"
+    ui:
+      repository: "longhornio/longhorn-ui"
+      tag: "v1.9.1"
+    instanceManager:
+      repository: "longhornio/longhorn-instance-manager"
+      tag: "v1.9.1"
+    shareManager:
+      repository: "longhornio/longhorn-share-manager"
+      tag: "v1.9.1"
+    backingImageManager:
+      repository: "longhornio/backing-image-manager"
+      tag: "v1.9.1"
+    supportBundleKit:
+      repository: "longhornio/support-bundle-kit"
+      tag: "v0.0.60"
+  csi:
+    attacher:
+      repository: "longhornio/csi-attacher"
+      tag: "v4.9.0"
+    provisioner:
+      repository: "longhornio/csi-provisioner"
+      tag: "v5.3.0"
+    nodeDriverRegistrar:
+      repository: "longhornio/csi-node-driver-registrar"
+      tag: "v2.14.0"
+    resizer:
+      repository: "longhornio/csi-resizer"
+      tag: "v1.14.0"
+    snapshotter:
+      repository: "longhornio/csi-snapshotter"
+      tag: "v8.3.0"
+    livenessProbe:
+      repository: "longhornio/livenessprobe"
+      tag: "v2.16.0"
+
+longhornUI:
+  replicas: {{ ui_replicas }}
+{% if backup_enabled %}
+
+defaultSettings:
+  backupTarget: {{ backup_target }}
+{% endif %}

+ 40 - 0
library/helm/portainer/template.yaml

@@ -0,0 +1,40 @@
+---
+kind: helm
+schema: "1.0"
+metadata:
+  name: Portainer CE
+  description: >
+    Helm values template for Portainer Community Edition, a container management platform
+    that simplifies Docker and Kubernetes management.
+
+
+    Chart Repository: https://portainer.github.io/k8s/
+
+    Chart Name: portainer
+
+    Chart Version: Compatible with Portainer CE 2.34.0
+
+
+    Project: https://www.portainer.io/
+
+    Documentation: https://docs.portainer.io/
+  version: 2.34.0
+  author: Christian Lempa
+  date: '2025-01-11'
+spec:
+  general:
+    vars:
+      release_name:
+        default: portainer
+  networking:
+    vars:
+      network_mode:
+        default: ClusterIP
+  traefik:
+    vars:
+      traefik_host:
+        default: portainer.home.arpa
+  volumes:
+    vars:
+      volumes_pvc_name:
+        default: portainer

+ 27 - 0
library/helm/portainer/values.yaml.j2

@@ -0,0 +1,27 @@
+---
+image:
+  repository: portainer/portainer-ce
+  tag: 2.34.0
+  pullPolicy: IfNotPresent
+
+service:
+  type: {{ network_mode }}
+{% if traefik_enabled %}
+
+ingress:
+  enabled: true
+  hosts:
+    - host: {{ traefik_host }}
+      paths:
+        - path: /
+          port: "9000"
+  tls:
+    - secretName: {{ traefik_tls_secret }}
+      hosts:
+        - {{ traefik_host }}
+{% endif %}
+{% if volumes_mode == 'existing-pvc' %}
+
+persistence:
+  existingClaim: {{ volumes_pvc_name }}
+{% endif %}

+ 76 - 0
library/helm/traefik/template.yaml

@@ -0,0 +1,76 @@
+---
+kind: helm
+schema: "1.0"
+metadata:
+  name: Traefik Ingress Controller
+  description: >
+    Helm values template for Traefik v3, a modern HTTP reverse proxy and load balancer
+    designed for microservices.
+
+
+    Chart Repository: https://traefik.github.io/charts
+
+    Chart Name: traefik
+
+    Chart Version: Compatible with Traefik v3.5.3
+
+
+    Project: https://traefik.io/
+
+    Documentation: https://doc.traefik.io/traefik/
+  version: 3.5.3
+  author: Christian Lempa
+  date: '2025-01-11'
+spec:
+  general:
+    vars:
+      release_name:
+        default: traefik
+  traefik_config:
+    title: Traefik Settings
+    vars:
+      dashboard_enabled:
+        type: bool
+        description: Enable Traefik dashboard
+        default: false
+        extra: "WARNING: Don't use in production!"
+      accesslog_enabled:
+        type: bool
+        description: Enable Traefik access log
+        default: false
+      prometheus_enabled:
+        type: bool
+        description: Enable Prometheus metrics
+        default: false
+  http_redirect:
+    title: HTTP to HTTPS Redirect
+    toggle: http_redirect_enabled
+    vars:
+      http_redirect_enabled:
+        type: bool
+        default: true
+        description: Automatically redirect HTTP traffic to HTTPS
+      http_redirect_permanent:
+        type: bool
+        default: true
+        description: Use permanent redirect (301) instead of temporary (302)
+  dashboard:
+    title: Dashboard IngressRoute
+    toggle: dashboard_ingressroute_enabled
+    vars:
+      dashboard_ingressroute_enabled:
+        type: bool
+        default: false
+        description: Create IngressRoute for Traefik dashboard
+      dashboard_host:
+        type: hostname
+        description: FQDN for the Traefik dashboard
+        prompt: Dashboard hostname (e.g., traefik.example.com)
+      dashboard_middleware:
+        type: str
+        description: Authentication middleware name for dashboard protection
+        default: traefik-dashboard-auth
+      dashboard_tls_secret:
+        type: str
+        description: TLS secret name for dashboard
+        default: traefik-dashboard-tls

+ 41 - 0
library/helm/traefik/values.yaml.j2

@@ -0,0 +1,41 @@
+---
+image:
+  repository: traefik
+  tag: v3.5.3
+  pullPolicy: IfNotPresent
+{% if http_redirect_enabled %}
+
+ports:
+  web:
+    redirections:
+      entryPoint:
+        to: websecure
+        scheme: https
+        permanent: {{ http_redirect_permanent | lower }}
+{% endif %}
+{% if dashboard_enabled or accesslog_enabled or prometheus_enabled %}
+
+additionalArguments:
+{% if dashboard_enabled %}
+  - "--api.dashboard=true"
+{% endif %}
+{% if accesslog_enabled %}
+  - "--accesslog=true"
+{% endif %}
+{% if prometheus_enabled %}
+  - "--metrics.prometheus=true"
+{% endif %}
+{% endif %}
+{% if dashboard_ingressroute_enabled %}
+
+ingressRoute:
+  dashboard:
+    enabled: true
+    entryPoints:
+      - websecure
+    matchRule: Host(`{{ dashboard_host }}`)
+    middlewares:
+      - name: {{ dashboard_middleware }}
+    tls:
+      secretName: {{ dashboard_tls_secret }}
+{% endif %}

+ 15 - 0
library/kubernetes/certmanager-certificate/certmanager-certificate.yaml

@@ -0,0 +1,15 @@
+---
+# --> (Example) Create a Certificate for your hostname...
+# apiVersion: cert-manager.io/v1
+# kind: Certificate
+# metadata:
+#   name: your-certificate  # <-- Replace with your certificate name
+#   namespace: your-namespace  # <-- Replace with your namespace
+# spec:
+#   secretName: your-secret  # <-- Replace with your secret name
+#   issuerRef:
+#     name: clusterissuer  # <-- Replace with your issuer name
+#     kind: ClusterIssuer
+#   dnsNames:
+#     - your-hostname  # <-- Replace with your hostname
+# <--

+ 17 - 0
library/kubernetes/certmanager-clusterissuer/certmanager-clusterissuer.yaml

@@ -0,0 +1,17 @@
+---
+apiVersion: cert-manager.io/v1
+kind: ClusterIssuer
+metadata:
+  name: cloudflare-clusterissuer
+spec:
+  acme:
+    email: your-email@address  # <-- Replace with your email address
+    server: https://acme-v02.api.letsencrypt.org/directory
+    privateKeySecretRef:
+      name: cloudflare-clusterissuer-account-key
+    solvers:
+      - dns01:
+          cloudflare:
+            apiTokenSecretRef:
+              name: cloudflare-api-token-secret
+              key: api-token

+ 9 - 0
library/kubernetes/core-secret/core-secret.yaml

@@ -0,0 +1,9 @@
+---
+apiVersion: v1
+kind: Secret
+metadata:
+  name: cloudflare-api-token-secret
+  namespace: cert-manager
+type: Opaque
+data:
+  api-token: your-api-token  # <-- Replace with your Cloudflare API token

+ 12 - 0
library/kubernetes/twingate-connector/twingate_connector.yaml

@@ -0,0 +1,12 @@
+---
+apiVersion: twingate.com/v1beta
+kind: TwingateConnector  # NOTE This requires the Twingate Kubernetes Operator to be installed in your Kubernetes cluster.
+metadata:
+  name: twingate_connector
+  namespace: twingate
+spec:
+  image:
+    repository: "twingate/connector"
+    tag: "1.74.0"
+  name: twingate_connector
+  hasStatusNotificationsEnabled: false

+ 10 - 0
library/kubernetes/twingate-operator/helm/values.yaml

@@ -0,0 +1,10 @@
+---
+image:
+  repository: twingate/kubernetes-operator
+  pullPolicy: IfNotPresent
+  tag: "0.25.2"
+twingateOperator:
+  network: ""  # FIXME Add your Twingate network name here
+  remoteNetworkId: ""  # FIXME Add your Twingate remote network ID here
+  logFormat: "plain"
+  logVerbosity: "quiet"

+ 1 - 0
library/packer/proxmox-ubuntu/files/99-pve.cfg

@@ -0,0 +1 @@
+datasource_list: [ConfigDrive, NoCloud]

+ 1 - 0
library/packer/proxmox-ubuntu/http/meta-data

@@ -0,0 +1 @@
+

+ 33 - 0
library/packer/proxmox-ubuntu/http/user-data

@@ -0,0 +1,33 @@
+#cloud-config
+autoinstall:
+  version: 1
+  locale: en_US
+  keyboard:
+    layout: de
+  ssh:
+    install-server: true
+    allow-pw: true
+    disable_root: true
+    ssh_quiet_keygen: true
+    allow_public_ssh_keys: true
+  packages:
+    - qemu-guest-agent
+    - sudo
+  storage:
+    layout:
+      name: direct
+    swap:
+      size: 0
+  user-data:
+    package_upgrade: false
+    timezone: Europe/Berlin
+    users:
+      - name: your-user-name
+        groups: [adm, sudo]
+        lock-passwd: false
+        sudo: ALL=(ALL) NOPASSWD:ALL
+        shell: /bin/bash
+        # passwd: your-password
+        # - or -
+        # ssh_authorized_keys:
+        #   - your-ssh-key

+ 159 - 0
library/packer/proxmox-ubuntu/ubuntu-server-noble.pkr.hcl

@@ -0,0 +1,159 @@
+# Ubuntu Server Noble (24.04.x)
+# ---
+# Packer Template to create an Ubuntu Server (Noble 24.04.x) on Proxmox
+
+# Variable Definitions
+variable "proxmox_api_url" {
+    type = string
+}
+
+variable "proxmox_api_token_id" {
+    type = string
+}
+
+variable "proxmox_api_token_secret" {
+    type      = string
+    sensitive = true
+}
+
+locals {
+    disk_storage = "local-lvm"
+}
+
+# Resource Definiation for the VM Template
+source "proxmox-iso" "ubuntu-server-noble" {
+
+    # Proxmox Connection Settings
+    proxmox_url = "${var.proxmox_api_url}"
+    username    = "${var.proxmox_api_token_id}"
+    token       = "${var.proxmox_api_token_secret}"
+    # (Optional) Skip TLS Verification
+    # insecure_skip_tls_verify = true
+
+    # VM General Settings
+    node                 = "your-proxmox-node"
+    vm_id                = "100"
+    vm_name              = "ubuntu-server-noble"
+    template_description = "Ubuntu Server Noble Image"
+
+    # VM OS Settings
+    # (Option 1) Local ISO File
+    # boot_iso {
+    #     type         = "scsi"
+    #     iso_file     = "local:iso/ubuntu-24.04-live-server-amd64.iso"
+    #     unmount      = true
+    #     iso_checksum = "e240e4b801f7bb68c20d1356b60968ad0c33a41d00d828e74ceb3364a0317be9"
+    # }
+    # (Option 2) Download ISO
+    # boot_iso {
+    #     type             = "scsi"
+    #     iso_url          = "https://releases.ubuntu.com/24.04/ubuntu-24.04-live-server-amd64.iso"
+    #     unmount          = true
+    #     iso_storage_pool = "local"
+    #     iso_checksum     = "file:https://releases.ubuntu.com/noble/SHA256SUMS"
+    # }
+
+    # VM System Settings
+    qemu_agent = true
+
+    # VM Hard Disk Settings
+    scsi_controller = "virtio-scsi-pci"
+
+    disks {
+        disk_size         = "25G"
+        format            = "qcow2"
+        storage_pool      = ${local.disk_storage}
+        type              = "virtio"
+    }
+
+    # VM CPU Settings
+    cores = "1"
+
+    # VM Memory Settings
+    memory = "2048"
+
+    # VM Network Settings
+    network_adapters {
+        model    = "virtio"
+        bridge   = "vmbr0"
+        firewall = "false"
+    }
+
+    # VM Cloud-Init Settings
+    cloud_init              = true
+    cloud_init_storage_pool = ${local.disk_storage}
+
+    # PACKER Boot Commands
+    boot         = "c"
+    boot_wait    = "10s"
+    communicator = "ssh"
+    boot_command = [
+        "<esc><wait>",
+        "e<wait>",
+        "<down><down><down><end>",
+        "<bs><bs><bs><bs><wait>",
+        "autoinstall ds=nocloud-net\\;s=http://{{ .HTTPIP }}:{{ .HTTPPort }}/ ---<wait>",
+        "<f10><wait>"
+    ]
+    # Useful for debugging
+    # Sometimes lag will require this
+    # boot_key_interval = "500ms"
+
+
+    # PACKER Autoinstall Settings
+    http_directory          = "http"
+
+    # (Optional) Bind IP Address and Port
+    # http_bind_address       = "0.0.0.0"
+    # http_port_min           = 8802
+    # http_port_max           = 8802
+
+    ssh_username            = "your-user-name"
+
+    # (Option 1) Add your Password here
+    # ssh_password        = "your-password"
+    # - or -
+    # (Option 2) Add your Private SSH KEY file here
+    # ssh_private_key_file    = "~/.ssh/id_rsa"
+
+    # Raise the timeout, when installation takes longer
+    ssh_timeout             = "30m"
+    ssh_pty                 = true
+}
+
+# Build Definition to create the VM Template
+build {
+
+    name    = "ubuntu-server-noble"
+    sources = ["source.proxmox-iso.ubuntu-server-noble"]
+
+    # Provisioning the VM Template for Cloud-Init Integration in Proxmox #1
+    provisioner "shell" {
+        inline = [
+            "while [ ! -f /var/lib/cloud/instance/boot-finished ]; do echo 'Waiting for cloud-init...'; sleep 1; done",
+            "sudo rm /etc/ssh/ssh_host_*",
+            "sudo truncate -s 0 /etc/machine-id",
+            "sudo apt -y autoremove --purge",
+            "sudo apt -y clean",
+            "sudo apt -y autoclean",
+            "sudo cloud-init clean",
+            "sudo rm -f /etc/cloud/cloud.cfg.d/subiquity-disable-cloudinit-networking.cfg",
+            "sudo rm -f /etc/netplan/00-installer-config.yaml",
+            "sudo sync"
+        ]
+    }
+
+    # Provisioning the VM Template for Cloud-Init Integration in Proxmox #2
+    provisioner "file" {
+        source      = "files/99-pve.cfg"
+        destination = "/tmp/99-pve.cfg"
+    }
+
+    # Provisioning the VM Template for Cloud-Init Integration in Proxmox #3
+    provisioner "shell" {
+        inline = [ "sudo cp /tmp/99-pve.cfg /etc/cloud/cloud.cfg.d/99-pve.cfg" ]
+    }
+
+    # Add additional provisioning scripts here
+    # ...
+}

+ 9 - 0
library/terraform/civo/credentials.tf

@@ -0,0 +1,9 @@
+# CIVO Credentials
+# ---
+# Credential Variables needed for CIVO
+
+# Civo Config
+variable "civo_token" {
+    description = "Civo API Token"
+    type = string
+}

+ 5 - 0
library/terraform/civo/firewall.tf

@@ -0,0 +1,5 @@
+resource "civo_firewall" "your_firewall" {
+  name       = "your-firewall-name"
+  network_id = civo_network.your_network.id
+  create_default_rules = true
+}

+ 23 - 0
library/terraform/civo/kubernetes.tf

@@ -0,0 +1,23 @@
+# CIVO Kubernetes
+# ---
+# Templates to create a Kubernetes Cluster on CIVO
+
+# Create a new Kubernetes Cluster
+resource "civo_kubernetes_cluster" "your-kubernetes-cluster" {
+  name = "your-kubernetes-cluster"
+  applications = ""
+  firewall_id = civo_firewall.your_firewall.id
+  network_id = civo_network.your_network.id
+  pools {
+    size = element(data.civo_size.k8s_std_small.sizes, 0).name
+    node_count = 3
+  }
+  # (Optional) add depenencies on other resources
+  depends_on = [ civo_firewall.your_firewall, civo_network.your_network ]
+}
+
+# (Optional) Time Sleep elements for other Objects that need to wait a few seconds after the Cluster deployment
+# resource "time_sleep" "wait_for_kubernetes" {
+#   depends_on = [civo_kubernetes_cluster.your-kubernetes-cluster]
+#   create_duration = "20s"
+# }

+ 3 - 0
library/terraform/civo/network.tf

@@ -0,0 +1,3 @@
+resource "civo_network" "your_network" {
+  label = "your-network-label"
+}

+ 20 - 0
library/terraform/civo/provider.tf

@@ -0,0 +1,20 @@
+# CIVO Provider
+# ---
+# Initial Provider Configuration for CIVO
+
+terraform {
+  required_version = ">= 1.5.0"
+
+  required_providers {
+    civo = {
+      source = "civo/civo"
+      version = "~> 1.1.0"
+    }
+  }
+}
+
+provider "civo" {
+    token = var.civo_token
+    # (optional): Specify your region
+    # region = "FRA1"
+}

+ 139 - 0
library/terraform/civo/query.tf

@@ -0,0 +1,139 @@
+# CIVO Queries
+# ---
+# Query commonly used cloud resources from CIVO API
+
+# CIVO Instance Sizes
+data "civo_size" "instance_xsmall" {
+    filter {
+        key = "name"
+        values = ["g3.xsmall"]
+        match_by = "re"
+    }
+}
+
+data "civo_size" "instance_small" {
+    filter {
+        key = "name"
+        values = ["g3.small"]
+        match_by = "re"
+    }
+}
+
+data "civo_size" "instance_medium" {
+    filter {
+        key = "name"
+        values = ["g3.medium"]
+        match_by = "re"
+    }
+}
+
+data "civo_size" "instance_large" {
+    filter {
+        key = "name"
+        values = ["g3.large"]
+        match_by = "re"
+    }
+}
+
+data "civo_size" "instance_xlarge" {
+    filter {
+        key = "name"
+        values = ["g3.xlarge"]
+        match_by = "re"
+    }
+}
+
+data "civo_size" "instance_2xlarge" {
+    filter {
+        key = "name"
+        values = ["g3.2xlarge"]
+        match_by = "re"
+    }
+}
+
+
+# CIVO Kubernetes Standard Sizes
+data "civo_size" "k8s_std_xsmall" {
+    filter {
+        key = "name"
+        values = ["g3.k3s.xsmall"]
+        match_by = "re"
+    }
+}
+
+data "civo_size" "k8s_std_small" {
+    filter {
+        key = "name"
+        values = ["g3.k3s.small"]
+        match_by = "re"
+    }
+}
+
+data "civo_size" "k8s_std_medium" {
+    filter {
+        key = "name"
+        values = ["g3.k3s.medium"]
+        match_by = "re"
+    }
+}
+
+data "civo_size" "k8s_std_large" {
+    filter {
+        key = "name"
+        values = ["g3.k3s.large"]
+        match_by = "re"
+    }
+}
+
+data "civo_size" "k8s_std_xlarge" {
+    filter {
+        key = "name"
+        values = ["g3.k3s.xlarge"]
+        match_by = "re"
+    }
+}
+
+data "civo_size" "k8s_std_2xlarge" {
+    filter {
+        key = "name"
+        values = ["g3.k3s.2xlarge"]
+        match_by = "re"
+    }
+}
+
+
+# CIVO Instance Diskimages
+data "civo_disk_image" "debian" {
+   filter {
+        key = "name"
+        values = ["debian-10"]
+   }
+}
+
+data "civo_disk_image" "debian_9" {
+   filter {
+        key = "name"
+        values = ["debian-9"]
+   }
+}
+
+data "civo_disk_image" "ubuntu" {
+   filter {
+        key = "name"
+        values = ["ubuntu-focal"]
+   }
+}
+
+data "civo_disk_image" "ubuntu_bionic" {
+   filter {
+        key = "name"
+        values = ["ubuntu-bionic"]
+   }
+}
+
+data "civo_disk_image" "centos" {
+   filter {
+        key = "name"
+        values = ["centos-7"]
+   }
+}

+ 14 - 0
library/terraform/civo/server.tf

@@ -0,0 +1,14 @@
+# CIVO Servers
+# ---
+# Templates to create a Linux Server on CIVO
+
+# CIVO Instance Server
+resource "civo_instance" "your-server" {
+    hostname = "your-fqdn-server-name"
+    size = data.civo_size.instance_xsmall.sizes.0.name
+    disk_image = data.civo_disk_image.debian.diskimages.0.id
+    # initial_user = "your-initial-user"
+    # sshkey_id = data.civo_ssh_key.your-ssh-key.id
+    # reverse_dns = "your-server.your-domain"
+}
+

+ 14 - 0
library/terraform/civo/ssh_key.tf

@@ -0,0 +1,14 @@
+# CIVO SSH Keys
+# ---
+# Query or Create SSH Keys to authenticate to Servers on CIVO
+
+# Query existing CIVO SSH Key
+data "civo_ssh_key" "your-ssh-key" {
+  name = "your-ssh-key-name"
+}
+
+# Create new SSH Key
+resource "civo_ssh_key" "your-ssh-key"{
+    name = "your-ssh-key-name"
+    public_key = file("~/.ssh/id_rsa.pub")
+}

+ 13 - 0
library/terraform/cloudflare/credentials.tf

@@ -0,0 +1,13 @@
+# Cloudflare Credentials
+# ---
+# Credential Variables needed for Cloudflare
+
+# Cloudflare Config
+variable "cloudflare_email" {
+    description = "The email address for your Cloudflare account"
+    type = string
+}
+variable "cloudflare_api_key" {
+    description = "The API key for your Cloudflare account"
+    type = string
+}

+ 12 - 0
library/terraform/cloudflare/dns.tf

@@ -0,0 +1,12 @@
+# Cloudflare DNS
+# ---
+# Templates to manage DNS Records on Cloudflare
+
+# A Record
+resource "cloudflare_record" "your-dns-record-name" {
+    zone_id = "your-zone-id"
+    name = "your-public-dns-value"
+    value =  "your-public-ip-address"
+    type = "A"
+    proxied = false  # set to true, to hide public IP
+}

+ 19 - 0
library/terraform/cloudflare/provider.tf

@@ -0,0 +1,19 @@
+# Cloudflare Provider
+# ---
+# Initial Provider Configuration for Cloudflare
+
+terraform {
+  required_version = ">= 0.13.0"
+
+  required_providers {
+    cloudflare = {
+      source = "cloudflare/cloudflare"
+      version = "~> 4.0"
+    }
+  }
+}
+
+provider "cloudflare" {
+    email = var.cloudflare_email
+    api_key =  var.cloudflare_api_key
+}

+ 31 - 0
library/terraform/helm/certmanager.tf

@@ -0,0 +1,31 @@
+resource "kubernetes_namespace" "certmanager" {
+
+    metadata {
+        name = "certmanager"
+    }
+}
+
+resource "helm_release" "certmanager" {
+
+    depends_on = [kubernetes_namespace.certmanager]
+
+    name = "certmanager"
+    namespace = "certmanager"
+
+    repository = "https://charts.jetstack.io"
+    chart      = "cert-manager"
+
+    # Install Kubernetes CRDs
+    set {
+        name  = "installCRDs"
+        value = "true"
+    }
+}
+
+# (Optional) Create a Time-Sleep for Certificates and Issuer Manifests to deploy later
+# resource "time_sleep" "wait_for_certmanager" {
+#
+#     depends_on = [helm_release.certmanager]
+#
+#     create_duration = "10s"
+# }

+ 46 - 0
library/terraform/helm/traefik.tf

@@ -0,0 +1,46 @@
+resource "kubernetes_namespace" "traefik" {
+
+    metadata {
+        name = "traefik"
+    }
+
+}
+
+resource "helm_release" "traefik" {
+
+    depends_on = [kubernetes_namespace.traefik]
+
+    name = "traefik"
+    namespace = "traefik"
+
+    repository = "https://helm.traefik.io/traefik"
+    chart      = "traefik"
+
+    # Set Traefik as the Default Ingress Controller
+    set {
+        name  = "ingressClass.enabled"
+        value = "true"
+    }
+    set {
+        name  = "ingressClass.isDefaultClass"
+        value = "true"
+    }
+
+    # Default Redirect
+    set {
+        name  = "ports.web.redirectTo"
+        value = "websecure"
+    }
+
+    # Enable TLS on Websecure
+    set {
+        name  = "ports.websecure.tls.enabled"
+        value = "true"
+    }
+
+    # TLS Options (that's not working for some reason...)
+    set {
+        name  = "tlsOptions.default.minVersion"
+        value = "VersionTLS12"
+    }
+}

+ 7 - 0
library/terraform/kubectl/manifest.tf

@@ -0,0 +1,7 @@
+resource "kubectl_manifest" "your-manifest-file" {
+
+    yaml_body = <<YAML
+# Put your Manifest-file Content right here...
+# ...
+    YAML
+}

+ 24 - 0
library/terraform/kubectl/provider.tf

@@ -0,0 +1,24 @@
+# Kubectl Provider
+# ---
+# Initial Provider Configuration for Kubectl
+
+terraform {
+
+    required_version = ">= 0.13.0"
+
+    required_providers {
+        kubectl = {
+            source = "gavinbunney/kubectl"
+            version = "1.19.0"
+        }
+    }
+}
+
+# Dynamic Configuration from CIVO Kubernetes deployment
+# provider "kubectl" {
+#     host = "${yamldecode(civo_kubernetes_cluster.your-kubernetes-cluster.kubeconfig).clusters.0.cluster.server}"
+#     client_certificate = "${base64decode(yamldecode(civo_kubernetes_cluster.your-kubernetes-cluster.kubeconfig).users.0.user.client-certificate-data)}"
+#     client_key = "${base64decode(yamldecode(civo_kubernetes_cluster.your-kubernetes-cluster.kubeconfig).users.0.user.client-key-data)}"
+#     cluster_ca_certificate = "${base64decode(yamldecode(civo_kubernetes_cluster.your-kubernetes-cluster.kubeconfig).clusters.0.cluster.certificate-authority-data)}"
+#     load_config_file = false
+# }

+ 41 - 0
library/terraform/kubernetes/deployment.tf

@@ -0,0 +1,41 @@
+resource "kubernetes_deployment" "your-deployment" {
+
+    depends_on = [kubernetes_namespace.your-namespace]
+
+    metadata {
+        name = "your-deployment"
+        namespace = "your-namespace"
+        labels = {
+            app = "your-app-selector"
+        }
+    }
+
+    spec {
+        replicas = 1
+
+        selector {
+            match_labels = {
+                app = "your-app-selector"
+            }
+        }
+
+        template {
+            metadata {
+                labels = {
+                    app = "your-app-selector"
+                }
+            }
+
+            spec {
+                container {
+                    image = "your-image:latest"
+                    name  = "your-container"
+
+                    port {
+                        container_port = 80
+                    }
+                }
+            }
+        }
+    }
+}

+ 39 - 0
library/terraform/kubernetes/ingress.tf

@@ -0,0 +1,39 @@
+resource "kubernetes_ingress_v1" "your-ingress" {
+
+    depends_on = [kubernetes_namespace.your-namespace]
+
+    metadata {
+        name = "your-ingress"
+        namespace = "your-namespace"
+    }
+
+    spec {
+        rule {
+
+            host = "your-domain"
+
+            http {
+
+                path {
+                    path = "/"
+
+                    backend {
+                        service {
+                            name = "your-service"
+                            port {
+                                number = 80
+                            }
+                        }
+                    }
+
+                }
+            }
+        }
+
+        # (Optional) Add an SSL Certificate
+        # tls {
+        #     secret_name = "ssl-certificate-object"
+        #     hosts = ["your-domain"]
+        # }
+    }
+}

+ 23 - 0
library/terraform/kubernetes/provider.tf

@@ -0,0 +1,23 @@
+# Kubernetes Provider
+# ---
+# Initial Provider Configuration for Kubernetes
+
+terraform {
+
+    required_version = ">= 0.13.0"
+
+    required_providers {
+        kubernetes = {
+            source = "hashicorp/kubernetes"
+            version = "2.37.1"
+        }
+    }
+}
+
+# Dynamic Configuration from CIVO Kubernetes deployment
+# provider "kubernetes" {
+#     host = "${yamldecode(civo_kubernetes_cluster.your-kubernetes-cluster.kubeconfig).clusters.0.cluster.server}"
+#     client_certificate = "${base64decode(yamldecode(civo_kubernetes_cluster.your-kubernetes-cluster.kubeconfig).users.0.user.client-certificate-data)}"
+#     client_key = "${base64decode(yamldecode(civo_kubernetes_cluster.your-kubernetes-cluster.kubeconfig).users.0.user.client-key-data)}"
+#     cluster_ca_certificate = "${base64decode(yamldecode(civo_kubernetes_cluster.your-kubernetes-cluster.kubeconfig).clusters.0.cluster.certificate-authority-data)}"
+# }

+ 19 - 0
library/terraform/kubernetes/service.tf

@@ -0,0 +1,19 @@
+resource "kubernetes_service" "your-service" {
+
+    depends_on = [kubernetes_namespace.your-namespace]
+
+    metadata {
+        name = "your-service"
+        namespace = "your-namespace"
+    }
+    spec {
+        selector = {
+            app = "your-app-selector"
+        }
+        port {
+            port = 80
+        }
+
+        type = "ClusterIP"
+    }
+}

+ 40 - 0
library/terraform/proxmox/provider.tf

@@ -0,0 +1,40 @@
+terraform {
+  required_version = ">= 0.13.0"
+
+  required_providers {
+    proxmox = {
+      # LINK https://github.com/Telmate/terraform-provider-proxmox
+      source = "telmate/proxmox"
+      version = "3.0.1-rc9"
+    }
+  }
+}
+
+variable "PROXMOX_URL" {
+  type = string
+}
+
+variable "PROXMOX_USER" {
+  type      = string
+  sensitive = true
+}
+
+variable "PROXMOX_TOKEN" {
+  type      = string
+  sensitive = true
+}
+
+variable "PUBLIC_SSH_KEY" {
+  # NOTE This is the publich SSH key, you want to upload to VMs and LXC containers.
+  type      = string
+  sensitive = true
+}
+
+provider "proxmox" {
+  pm_api_url = var.PROXMOX_URL
+  pm_api_token_id = var.PROXMOX_USER
+  pm_api_token_secret = var.PROXMOX_TOKEN
+  
+  # NOTE Optional, but recommended to set to true if you are using self-signed certificates.
+  pm_tls_insecure = false
+}

+ 102 - 0
library/terraform/proxmox/vm_qemu.tf

@@ -0,0 +1,102 @@
+resource "proxmox_vm_qemu" "your-vm" {
+
+  # SECTION General Settings
+
+  name = "vm-name"
+  desc = "description"
+  agent = 1  # <-- (Optional) Enable QEMU Guest Agent
+
+  # FIXME Before deployment, set the correct target node name
+  target_node = "your-proxmox-node"
+
+  # FIXME Before deployment, set the desired VM ID (must be unique on the target node)
+  vmid = "100"
+
+  # !SECTION
+  
+  # SECTION Template Settings
+
+  # FIXME Before deployment, set the correct template or VM name in the clone field
+  #       or set full_clone to false, and remote "clone" to manage existing (imported) VMs
+  clone = "your-clone-name"
+  full_clone = true
+
+  # !SECTION
+
+  # SECTION Boot Process
+
+  onboot = true 
+
+  # NOTE Change startup, shutdown and auto reboot behavior
+  startup = ""
+  automatic_reboot = false
+
+  # !SECTION
+
+  # SECTION Hardware Settings
+
+  qemu_os = "other"
+  bios = "seabios"
+  cores = 2
+  sockets = 1
+  cpu_type = "host"
+  memory = 2048
+
+  # NOTE Minimum memory of the balloon device, set to 0 to disable ballooning
+  balloon = 2048
+  
+  # !SECTION
+
+  # SECTION Network Settings
+
+  network {
+    id     = 0  # NOTE Required since 3.x.x
+    bridge = "vmbr1"
+    model  = "virtio"
+  }
+
+  # !SECTION
+
+  # SECTION Disk Settings
+  
+  # NOTE Change the SCSI controller type, since Proxmox 7.3, virtio-scsi-single is the default one         
+  scsihw = "virtio-scsi-single"
+  
+  # NOTE New disk layout (changed in 3.x.x)
+  disks {
+    ide {
+      ide0 {
+        cloudinit {
+          storage = "local-lvm"
+        }
+      }
+    }
+    virtio {
+      virtio0 {
+        disk {
+          storage = "local-lvm"
+
+          # NOTE Since 3.x.x size change disk size will trigger a disk resize
+          size = "20G"
+
+          # NOTE Enable IOThread for better disk performance in virtio-scsi-single
+          #      and enable disk replication
+          iothread = true
+          replicate = false
+        }
+      }
+    }
+  }
+
+  # !SECTION
+
+  # SECTION Cloud Init Settings
+
+  # FIXME Before deployment, adjust according to your network configuration
+  ipconfig0 = "ip=0.0.0.0/0,gw=0.0.0.0"
+  nameserver = "0.0.0.0"
+  ciuser = "your-username"
+  sshkeys = var.PUBLIC_SSH_KEY
+
+  # !SECTION
+}

+ 15 - 0
library/terraform/templates/cloud-deployment-example/civo.tf

@@ -0,0 +1,15 @@
+data "civo_ssh_key" "sshkey" {
+  name = "your-ssh-key-name"
+}
+
+resource "civo_instance" "server" {
+    hostname = "servername"
+    size = "g3.small"
+    disk_image = "ubuntu-focal"
+    # (optional):
+    # ---
+    # tags = ["python", "nginx"]
+    # notes = "this is a note for the server"
+    # initial_user = "user"
+    # sshkey_id = data.civo_ssh_key.sshkey.id
+}

+ 9 - 0
library/terraform/templates/cloud-deployment-example/cloudflare.tf

@@ -0,0 +1,9 @@
+variable "zone_id" {}
+
+resource "cloudflare_record" "server" {
+  zone_id = var.zone_id
+  name    = "your-dns-name"
+  value   = civo_instance.server.public_ip
+  type    = "A"
+  proxied = false
+}

+ 40 - 0
library/terraform/templates/cloud-deployment-example/main.tf

@@ -0,0 +1,40 @@
+# General Terraform Settings
+# ---
+
+terraform {
+  required_providers {
+    cloudflare = {
+        source = "cloudflare/cloudflare"
+        version = "~> 4.0"
+    }
+    civo = {
+        source = "civo/civo"
+    }
+  }
+}
+
+# Declare Variables
+# ---
+# TODO: Create a yourfile.auto.tfvars file in the project directory and add your variables in it.
+#   Example:
+#   cloudflare_email = "youremail@yourmail.com"
+#   cloudflare_api_key = "your-api-key"
+#   civo_token = "your-token"
+
+variable "cloudflare_email" {}
+variable "cloudflare_api_key" {}
+variable "civo_token" {}
+
+# Set Default Provider Settings
+# ---
+
+provider "cloudflare" {
+  email = var.cloudflare_email
+  api_key =  var.cloudflare_api_key
+}
+
+provider "civo" {
+  token = var.civo_token
+  # (optional) change the defaullt region
+  # region = "FRA1"
+}

+ 78 - 0
library/terraform/templates/kubernetes-automation-example/certmanager.tf

@@ -0,0 +1,78 @@
+resource "kubernetes_namespace" "certmanager" {
+
+    depends_on = [
+        time_sleep.wait_for_kubernetes
+    ]
+
+    metadata {
+        name = "certmanager"
+    }
+}
+
+resource "helm_release" "certmanager" {
+
+    depends_on = [
+        kubernetes_namespace.certmanager
+    ]
+
+    name = "certmanager"
+    namespace = "certmanager"
+
+    repository = "https://charts.jetstack.io"
+    chart = "cert-manager"
+
+    # Install Kubernetes CRDs
+    set {
+        name  = "installCRDs"
+        value = "true"
+    }
+}
+
+resource "time_sleep" "wait_for_certmanager" {
+
+    depends_on = [
+        helm_release.certmanager
+    ]
+
+    create_duration = "10s"
+}
+
+# Create a ClusterIssuer
+
+resource "kubectl_manifest" "cloudflare_prod" {
+
+    depends_on = [
+        time_sleep.wait_for_certmanager
+    ]
+
+    # TODO: add your mail address according to your configuration and API authentication settings!
+    # ---
+    yaml_body = <<YAML
+apiVersion: cert-manager.io/v1
+kind: ClusterIssuer
+metadata:
+  name: cloudflare-prod
+spec:
+  acme:
+    email: your-mail-address
+    server: https://acme-v02.api.letsencrypt.org/directory
+    privateKeySecretRef:
+      name: cloudflare-prod-account-key
+    solvers:
+    - dns01:
+        cloudflare:
+          email: your-mail-address
+          apiKeySecretRef:
+            name: cloudflare-api-key-secret
+            key: api-key
+    YAML
+}
+
+resource "time_sleep" "wait_for_clusterissuer" {
+
+    depends_on = [
+        kubectl_manifest.cloudflare_prod
+    ]
+
+    create_duration = "30s"
+}

+ 77 - 0
library/terraform/templates/kubernetes-automation-example/civo.tf

@@ -0,0 +1,77 @@
+# Kubernetes Cluster
+
+data "civo_size" "xsmall" {
+
+    # TODO: (optional): change the values according to your desired instance image sizing
+    # ---
+    filter {
+        key = "name"
+        values = ["g4s.kube.xsmall"]
+        match_by = "re"
+    }
+}
+
+resource "civo_kubernetes_cluster" "k8s_demo_1" {
+    name = "k8s_demo_1"
+    applications = ""
+    num_target_nodes = 2
+    target_nodes_size = element(data.civo_size.xsmall.sizes, 0).name
+    firewall_id = civo_firewall.fw_demo_1.id
+}
+
+resource "civo_firewall" "fw_demo_1" {
+    name = "fw_demo_1"
+
+    create_default_rules = false
+}
+
+resource "civo_firewall_rule" "kubernetes_http" {
+    firewall_id = civo_firewall.fw_demo_1.id
+    protocol = "tcp"
+    start_port = "80"
+    end_port = "80"
+    cidr = ["0.0.0.0/0"]
+    direction = "ingress"
+    action = "allow"
+    label = "kubernetes_http"
+}
+
+resource "civo_firewall_rule" "kubernetes_https" {
+    firewall_id = civo_firewall.fw_demo_1.id
+    protocol = "tcp"
+    start_port = "443"
+    end_port = "443"
+    cidr = ["0.0.0.0/0"]
+    direction = "ingress"
+    action = "allow"
+    label = "kubernetes_https"
+}
+
+resource "civo_firewall_rule" "kubernetes_api" {
+    firewall_id = civo_firewall.fw_demo_1.id
+    protocol = "tcp"
+    start_port = "6443"
+    end_port = "6443"
+    cidr = ["0.0.0.0/0"]
+    direction = "ingress"
+    action = "allow"
+    label = "kubernetes_api"
+}
+
+resource "time_sleep" "wait_for_kubernetes" {
+
+    depends_on = [
+        civo_kubernetes_cluster.k8s_demo_1
+    ]
+
+    create_duration = "20s"
+}
+
+data "civo_loadbalancer" "traefik_lb" {
+
+    depends_on = [
+        helm_release.traefik
+    ]
+
+    name = "k8s_demo_1-traefik-traefik"
+}

+ 20 - 0
library/terraform/templates/kubernetes-automation-example/cloudflare.tf

@@ -0,0 +1,20 @@
+# Cloudflare DNS records and API Secret
+
+resource "kubernetes_secret" "cloudflare_api_key_secret" {
+
+    depends_on = [
+        kubernetes_namespace.certmanager
+    ]
+
+    metadata {
+        name = "cloudflare-api-key-secret"
+        namespace = "certmanager"
+    }
+
+    data = {
+        api-key = var.cloudflare_api_key
+    }
+
+    type = "Opaque"
+}
+

+ 153 - 0
library/terraform/templates/kubernetes-automation-example/nginx1.tf

@@ -0,0 +1,153 @@
+# NGINX 1 Test Deployment
+#
+# TODO: Change your-domain according to your DNS record that you want to create
+# TODO: Change your-zone-id according to your DNS zone ID in Cloudflare
+# ---
+
+resource "kubernetes_namespace" "nginx1" {
+
+    depends_on = [
+        time_sleep.wait_for_kubernetes
+    ]
+
+    metadata {
+        name = "nginx1"
+    }
+}
+
+
+resource "kubernetes_deployment" "nginx1" {
+
+    depends_on = [
+        kubernetes_namespace.nginx1
+    ]
+
+    metadata {
+        name = "nginx1"
+        namespace = "nginx1"
+        labels = {
+            app = "nginx1"
+        }
+    }
+
+    spec {
+        replicas = 1
+
+        selector {
+            match_labels = {
+                app = "nginx1"
+            }
+        }
+
+        template {
+            metadata {
+                labels = {
+                    app = "nginx1"
+                }
+            }
+
+            spec {
+                container {
+                    image = "nginx:latest"
+                    name  = "nginx"
+
+                    port {
+                        container_port = 80
+                    }
+                }
+            }
+        }
+    }
+}
+
+
+resource "kubernetes_service" "nginx1" {
+
+    depends_on = [
+        kubernetes_namespace.nginx1
+    ]
+
+    metadata {
+        name = "nginx1"
+        namespace = "nginx1"
+    }
+    spec {
+        selector = {
+            app = "nginx1"
+        }
+        port {
+            port = 80
+        }
+
+        type = "ClusterIP"
+    }
+}
+
+
+resource "kubectl_manifest" "nginx1-certificate" {
+
+    depends_on = [kubernetes_namespace.nginx1, time_sleep.wait_for_clusterissuer]
+
+    yaml_body = <<YAML
+apiVersion: cert-manager.io/v1
+kind: Certificate
+metadata:
+  name: nginx1
+  namespace: nginx1
+spec:
+  secretName: nginx1
+  issuerRef:
+    name: cloudflare-prod
+    kind: ClusterIssuer
+  dnsNames:
+  - 'your-domain'
+    YAML
+}
+
+
+resource "kubernetes_ingress_v1" "nginx1" {
+
+    depends_on = [kubernetes_namespace.nginx1]
+
+    metadata {
+        name = "nginx1"
+        namespace = "nginx1"
+    }
+
+    spec {
+        rule {
+
+            host = "your-domain"
+
+            http {
+
+                path {
+                    path = "/"
+
+                    backend {
+                        service {
+                            name = "nginx1"
+                            port {
+                                number = 80
+                            }
+                        }
+                    }
+
+                }
+            }
+        }
+
+        tls {
+          secret_name = "nginx1"
+          hosts = ["your-domain"]
+        }
+    }
+}
+
+resource "cloudflare_record" "clcreative-main-cluster" {
+    zone_id = "your-zone-id"
+    name = "your-domain"
+    value =  data.civo_loadbalancer.traefik_lb.public_ip
+    type = "A"
+    proxied = false
+}

+ 76 - 0
library/terraform/templates/kubernetes-automation-example/provider.tf

@@ -0,0 +1,76 @@
+terraform {
+
+    required_version = ">= 0.13.0"
+
+    required_providers {
+        civo = {
+            source = "civo/civo"
+            version = "~> 1.1.0"
+        }
+        helm = {
+            source = "hashicorp/helm"
+            version = "2.17.0"
+        }
+        kubernetes = {
+            source = "hashicorp/kubernetes"
+            version = "2.37.1"
+        }
+        kubectl = {
+            source = "gavinbunney/kubectl"
+            version = "1.19.0"
+        }
+        cloudflare = {
+            source = "cloudflare/cloudflare"
+            version = "~> 4.0"
+        }
+    }
+}
+
+variable "civo_token" {
+    type = string
+}
+
+variable "cloudflare_email" {
+    type = string
+}
+
+variable "cloudflare_api_key" {
+    type = string
+}
+
+provider "civo" {
+    token = var.civo_token
+
+    # TODO: (optional) change region to your desired datacenter location
+    # ---
+    # region = "FRA1"
+}
+
+provider "helm" {
+    kubernetes {
+        host = "${yamldecode(civo_kubernetes_cluster.k8s_demo_1.kubeconfig).clusters.0.cluster.server}"
+        client_certificate = "${base64decode(yamldecode(civo_kubernetes_cluster.k8s_demo_1.kubeconfig).users.0.user.client-certificate-data)}"
+        client_key = "${base64decode(yamldecode(civo_kubernetes_cluster.k8s_demo_1.kubeconfig).users.0.user.client-key-data)}"
+        cluster_ca_certificate ="${base64decode(yamldecode(civo_kubernetes_cluster.k8s_demo_1.kubeconfig).clusters.0.cluster.certificate-authority-data)}"
+    }
+}
+
+provider "kubernetes" {
+    host = "${yamldecode(civo_kubernetes_cluster.k8s_demo_1.kubeconfig).clusters.0.cluster.server}"
+    client_certificate = "${base64decode(yamldecode(civo_kubernetes_cluster.k8s_demo_1.kubeconfig).users.0.user.client-certificate-data)}"
+    client_key = "${base64decode(yamldecode(civo_kubernetes_cluster.k8s_demo_1.kubeconfig).users.0.user.client-key-data)}"
+    cluster_ca_certificate = "${base64decode(yamldecode(civo_kubernetes_cluster.k8s_demo_1.kubeconfig).clusters.0.cluster.certificate-authority-data)}"
+}
+
+provider "kubectl" {
+    host = "${yamldecode(civo_kubernetes_cluster.k8s_demo_1.kubeconfig).clusters.0.cluster.server}"
+    client_certificate = "${base64decode(yamldecode(civo_kubernetes_cluster.k8s_demo_1.kubeconfig).users.0.user.client-certificate-data)}"
+    client_key = "${base64decode(yamldecode(civo_kubernetes_cluster.k8s_demo_1.kubeconfig).users.0.user.client-key-data)}"
+    cluster_ca_certificate = "${base64decode(yamldecode(civo_kubernetes_cluster.k8s_demo_1.kubeconfig).clusters.0.cluster.certificate-authority-data)}"
+    load_config_file = false
+}
+
+provider "cloudflare" {
+    email = var.cloudflare_email
+    api_key = var.cloudflare_api_key
+}

+ 47 - 0
library/terraform/templates/kubernetes-automation-example/traefik.tf

@@ -0,0 +1,47 @@
+# Traefik Deployment
+
+resource "kubernetes_namespace" "traefik" {
+
+    depends_on = [
+        time_sleep.wait_for_kubernetes
+    ]
+
+    metadata {
+        name = "traefik"
+    }
+}
+
+resource "helm_release" "traefik" {
+    depends_on = [
+        kubernetes_namespace.traefik
+    ]
+
+    name = "traefik"
+    namespace = "traefik"
+
+    repository = "https://helm.traefik.io/traefik"
+    chart = "traefik"
+
+    # Set Traefik as the Default Ingress Controller
+    set {
+        name  = "ingressClass.enabled"
+        value = "true"
+    }
+    set {
+        name  = "ingressClass.isDefaultClass"
+        value = "true"
+    }
+
+    # Default Redirect
+    set {
+        name  = "ports.web.redirectTo"
+        value = "websecure"
+    }
+
+    # Enable TLS on Websecure
+    set {
+        name  = "ports.websecure.tls.enabled"
+        value = "true"
+    }
+
+}

+ 24 - 0
library/terraform/templates/simple-docker-example/main.tf

@@ -0,0 +1,24 @@
+terraform {
+  required_providers {
+    docker = {
+      source = "kreuzwerker/docker"
+      version = "~> 3.6.0"
+    }
+  }
+}
+
+provider "docker" {}
+
+resource "docker_image" "nginx" {
+  name         = "nginx:latest"
+  keep_locally = false
+}
+
+resource "docker_container" "nginx" {
+  image = docker_image.nginx.latest
+  name  = "tutorial"
+  ports {
+    internal = 80
+    external = 8000
+  }
+}

+ 20 - 0
library/terraform/twingate/provider.tf

@@ -0,0 +1,20 @@
+terraform {
+  required_version = ">= 0.13.0"
+  required_providers {
+    twingate = {
+      source = "Twingate/twingate"
+      version = "3.3.2"
+    }
+  }
+}
+
+variable "TWINGATE_TOKEN" {
+  type        = string
+  description = "Twingate API Token"
+  sensitive   = true
+}
+
+provider "twingate" {
+  api_token = var.TWINGATE_TOKEN
+  network   = ""  # FIXME Add your Twingate network name here
+}

+ 7 - 0
library/terraform/twingate/twingate_group.tf

@@ -0,0 +1,7 @@
+resource "twingate_group" "administrators" {
+  name = "Administrators"
+
+  user_ids = [
+    data.twingate_user.admin.id
+  ]
+}

+ 7 - 0
library/terraform/twingate/twingate_remote_network.tf

@@ -0,0 +1,7 @@
+data "twingate_remote_network" "default_network" {
+  name = "default_network"
+}
+
+resource "twingate_remote_network" "new_network" {
+  name = "new_network"
+}

+ 28 - 0
library/terraform/twingate/twingate_resource.tf

@@ -0,0 +1,28 @@
+resource "twingate_resource" "new_resource" {
+  name                = "new_resource"
+  address             = "new_resource.home.arpa"
+  remote_network_id   = data.twingate_remote_network.default_network.id
+  security_policy_id  = data.twingate_security_policy.default_policy.id
+
+  protocols = {
+    allow_icmp = true
+    tcp = {
+      policy = "ALLOW_ALL"
+    }
+    udp = {
+      policy = "ALLOW_ALL"
+    }
+  }
+
+  dynamic "access_group" {
+    for_each = [
+      twingate_group.administrators.id
+    ]
+    content {
+      group_id = access_group.value
+      security_policy_id = data.twingate_security_policy.default_policy.id
+    }
+  }
+
+  is_active = true
+}

+ 3 - 0
library/terraform/twingate/twingate_security_policy.tf

@@ -0,0 +1,3 @@
+data "twingate_security_policy" "default_policy" {
+  name = "Default Policy"
+}

+ 11 - 0
library/terraform/twingate/twingate_user.tf

@@ -0,0 +1,11 @@
+data "twingate_user" "admin" {
+  id = ""  # FIXME Replace with actual user ID
+}
+
+resource "twingate_user" "new_user" {
+  email       = "new.user@example.com"
+  first_name  = "New"
+  last_name   = "User"
+  role        = "DEVOPS" # NOTE Defines the role, either ADMIN, DEVOPS, SUPPORT or MEMBER
+  send_invite = true
+}