Переглянути джерело

automation, and template fixes

xcad 6 місяців тому
батько
коміт
1403e02c78
57 змінених файлів з 891 додано та 1145 видалено
  1. 127 0
      .github/workflows/release.yaml
  2. 3 0
      .gitignore
  3. 2 10
      AGENTS.md
  4. 19 4
      cli/__main__.py
  5. 8 5
      cli/core/display.py
  6. 5 0
      cli/core/module.py
  7. 9 0
      cli/core/template.py
  8. 4 4
      cli/core/variables.py
  9. 0 13
      cli/modules/ci.py
  10. 12 2
      cli/modules/compose.py
  11. 0 13
      cli/modules/vagrant.py
  12. 22 0
      dns-test/compose.yaml
  13. 67 0
      dns-test/config/named.conf
  14. 32 0
      dns-test/config/named.conf.zones
  15. 13 0
      dns-test/config/tsig.key
  16. 0 23
      library/ci/github-actions-kubectl/kubernetes-deploy.yml
  17. 0 27
      library/ci/github-actions-scp/copy-config-files.yml
  18. 0 32
      library/ci/github-actions-ssh/restart-docker.yml
  19. 0 51
      library/ci/gitlab-ci-ansible/run.yml
  20. 0 39
      library/ci/gitlab-ci-ansible/test.yml
  21. 0 73
      library/ci/gitlab-ci-docker/config.yml
  22. 0 80
      library/ci/gitlab-ci-docker/deploy.yml
  23. 0 35
      library/ci/gitlab-ci-docker/test.yml
  24. 0 53
      library/ci/gitlab-ci-terraform/apply.yml
  25. 0 51
      library/ci/gitlab-ci-terraform/validate.yml
  26. 0 36
      library/ci/kestra-ansible/ansible-playbook-git.yaml
  27. 0 38
      library/ci/kestra-ansible/ansible-playbook-inline.yaml
  28. 0 31
      library/ci/kestra-docker/docker-build-git.yaml
  29. 0 33
      library/ci/kestra-docker/docker-build-inline.yaml
  30. 0 65
      library/ci/kestra-inputs/kestra-inputs.yaml
  31. 0 19
      library/ci/kestra-python/python_command.yaml
  32. 0 24
      library/ci/kestra-python/python_script.yaml
  33. 0 17
      library/ci/kestra-variables/kestra-variables.yaml
  34. 0 20
      library/ci/kestra-webhook/kestra-webhook.yaml
  35. 26 0
      library/compose/ansiblesemaphore/.env.database.j2
  36. 46 0
      library/compose/ansiblesemaphore/.env.semaphore.j2
  37. 89 35
      library/compose/ansiblesemaphore/compose.yaml.j2
  38. 64 6
      library/compose/ansiblesemaphore/template.yaml
  39. 33 8
      library/compose/bind9/compose.yaml.j2
  40. 0 1
      library/compose/bind9/config/example.named.conf
  41. 67 0
      library/compose/bind9/config/named.conf.j2
  42. 32 0
      library/compose/bind9/config/named.conf.zones.j2
  43. 13 0
      library/compose/bind9/config/tsig.key.j2
  44. 63 8
      library/compose/bind9/template.yaml
  45. 0 16
      library/compose/cadvisor/compose.yaml.j2
  46. 0 19
      library/compose/cadvisor/template.yaml
  47. 0 20
      library/vagrant/hyperv/ubuntu/docker/Vagrantfile
  48. 0 38
      library/vagrant/hyperv/ubuntu/docker/playbook.yaml
  49. 0 20
      library/vagrant/hyperv/ubuntu/microk8s-installed/Vagrantfile
  50. 0 16
      library/vagrant/hyperv/ubuntu/microk8s-installed/playbook.yaml
  51. 0 22
      library/vagrant/hyperv/ubuntu/plain-with-cockpit/Vagrantfile
  52. 0 10
      library/vagrant/hyperv/ubuntu/plain-with-cockpit/playbook.yaml
  53. 0 15
      library/vagrant/hyperv/ubuntu/plain/Vagrantfile
  54. 0 38
      library/vagrant/hyperv/ubuntu/ubuntu-cluster-plain/Vagrantfile
  55. 0 16
      library/vagrant/kvm/ubuntu/Vagrantfile
  56. 2 15
      renovate.json
  57. 133 44
      scripts/install.sh

+ 127 - 0
.github/workflows/release.yaml

@@ -0,0 +1,127 @@
+---
+name: Release
+
+on:
+  push:
+    tags:
+      - 'v*.*.*'  # Trigger on version tags like v1.0.0, v2.1.3, etc.
+
+permissions:
+  contents: write
+
+jobs:
+  release:
+    runs-on: ubuntu-latest
+    steps:
+      - name: Checkout code
+        uses: actions/checkout@v4
+        with:
+          fetch-depth: 0
+
+      - name: Extract version from tag
+        id: version
+        run: |
+          # Remove 'v' prefix if present (e.g., v1.0.0 -> 1.0.0)
+          VERSION="${GITHUB_REF#refs/tags/}"
+          VERSION="${VERSION#v}"
+          echo "version=$VERSION" >> $GITHUB_OUTPUT
+          echo "tag=$GITHUB_REF_NAME" >> $GITHUB_OUTPUT
+          echo "Extracted version: $VERSION from tag $GITHUB_REF_NAME"
+
+      - name: Update version in pyproject.toml
+        run: |
+          VERSION="${{ steps.version.outputs.version }}"
+          sed -i "s/^version = .*/version = \"$VERSION\"/" pyproject.toml
+          echo "✓ Updated pyproject.toml with version $VERSION"
+
+      - name: Update version in cli/__main__.py
+        run: |
+          VERSION="${{ steps.version.outputs.version }}"
+          sed -i "s/^__version__ = .*/__version__ = \"$VERSION\"/" cli/__main__.py
+          echo "✓ Updated cli/__main__.py with version $VERSION"
+
+      - name: Verify changes
+        run: |
+          echo "=== pyproject.toml ==="
+          grep "^version" pyproject.toml
+          echo ""
+          echo "=== cli/__main__.py ==="
+          grep "^__version__" cli/__main__.py
+
+      - name: Commit and update tag
+        run: |
+          git config --local user.email "github-actions[bot]@users.noreply.github.com"
+          git config --local user.name "github-actions[bot]"
+          
+          # Add changes
+          git add pyproject.toml cli/__main__.py
+          
+          # Check if there are changes to commit
+          if git diff --staged --quiet; then
+            echo "No version changes needed"
+          else
+            # Commit the version updates
+            git commit -m "chore: bump version to ${{ steps.version.outputs.version }}"
+            
+            # Delete the tag locally and remotely
+            git tag -d ${{ steps.version.outputs.tag }}
+            git push origin :refs/tags/${{ steps.version.outputs.tag }}
+            
+            # Recreate the tag pointing to the new commit
+            git tag -a ${{ steps.version.outputs.tag }} -m "Release ${{ steps.version.outputs.tag }}"
+            
+            # Push the new tag
+            git push origin ${{ steps.version.outputs.tag }}
+            
+            echo "✓ Tag ${{ steps.version.outputs.tag }} updated to point to version bump commit"
+          fi
+
+      - name: Set up Python
+        uses: actions/setup-python@v5
+        with:
+          python-version: '3.9'
+
+      - name: Install build dependencies
+        run: |
+          python -m pip install --upgrade pip
+          pip install build twine
+
+      - name: Build package
+        run: python -m build
+
+      - name: Check distribution
+        run: |
+          echo "Built packages:"
+          ls -lh dist/
+          echo ""
+          echo "Checking package integrity:"
+          twine check dist/*
+
+      - name: Publish to PyPI
+        if: >
+          ${{ !contains(steps.version.outputs.version, 'alpha') &&
+          !contains(steps.version.outputs.version, 'beta') &&
+          !contains(steps.version.outputs.version, 'rc') }}
+        env:
+          TWINE_USERNAME: __token__
+          TWINE_PASSWORD: ${{ secrets.PYPI_API_TOKEN }}
+        run: |
+          echo "Publishing to PyPI..."
+          twine upload dist/*
+
+      - name: Create GitHub Release
+        uses: softprops/action-gh-release@v1
+        with:
+          tag_name: ${{ steps.version.outputs.tag }}
+          name: Release ${{ steps.version.outputs.tag }}
+          draft: false
+          prerelease: >
+            ${{ contains(steps.version.outputs.version, 'alpha') ||
+            contains(steps.version.outputs.version, 'beta') ||
+            contains(steps.version.outputs.version, 'rc') }}
+          generate_release_notes: true
+          files: |
+            dist/*.whl
+            dist/*.tar.gz
+        env:
+          GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}

+ 3 - 0
.gitignore

@@ -20,3 +20,6 @@
 
 # Packaging
 *.egg-info/
+
+# Installation tracking
+.installed-version

+ 2 - 10
AGENTS.md

@@ -15,12 +15,11 @@ The CLI is a Python application built with Typer for the command-line interface
   - `cli/modules/` - Technology-specific modules (terraform, docker, compose, config, etc.)
 - `library/` - Template collections organized by module
   - `library/ansible/` - Ansible playbooks and configurations
-  - `library/ci/` - CI/CD automation templates (GitHub Actions, GitLab CI, Kestra)
-  - `library/config/` - Application-specific configuration templates
   - `library/compose/` - Docker Compose configurations
+  - `library/docker/` - Docker templates
   - `library/kubernetes/` - Kubernetes deployments
+  - `library/packer/` - Packer templates
   - `library/terraform/` - OpenTofu/Terraform templates and examples
-  - And more...
 
 ## Development Setup
 
@@ -324,10 +323,3 @@ Use separate `.env.{service}.j2` files for different services (e.g., `.env.authe
 - Provide comprehensive descriptions for user guidance
 - Group related variables into logical sections
 - Validate toggle variables are boolean type
-
-### Testing Templates
-- Test generation with default values
-- Test with toggle sections enabled and disabled
-- Test with edge cases (empty values, special characters)
-- Verify yamllint compliance for YAML files
-- Check that generated files are syntactically valid

+ 19 - 4
cli/__main__.py

@@ -17,7 +17,14 @@ import cli.modules
 from cli.core.registry import registry
 # Using standard Python exceptions instead of custom ones
 
-app = Typer(no_args_is_help=True)
+# Version is automatically updated by CI/CD on release
+__version__ = "0.0.1"
+
+app = Typer(
+  no_args_is_help=True,
+  help="CLI tool for managing infrastructure boilerplates",
+  add_completion=True,
+)
 console = Console()
 
 def setup_logging(log_level: str = "WARNING") -> None:
@@ -48,17 +55,25 @@ def setup_logging(log_level: str = "WARNING") -> None:
   except Exception as e:
     raise RuntimeError(f"Failed to configure logging: {e}")
 
-
-@app.callback()
+@app.callback(invoke_without_command=True)
 def main(
   ctx: Context,
+  version: Optional[bool] = Option(
+    None,
+    "--version",
+    "-v",
+    help="Show the application version and exit.",
+    is_flag=True,
+    callback=lambda v: console.print(f"boilerplates version {__version__}") or sys.exit(0) if v else None,
+    is_eager=True,
+  ),
   log_level: Optional[str] = Option(
     None,
     "--log-level",
     help="Set the logging level (DEBUG, INFO, WARNING, ERROR, CRITICAL). If omitted, logging is disabled."
   )
 ) -> None:
-  """Main CLI application for managing boilerplates."""
+  """CLI tool for managing infrastructure boilerplates."""
   # Disable logging by default; only enable when user provides --log-level
   if log_level:
     # Re-enable logging and configure

+ 8 - 5
cli/core/display.py

@@ -44,7 +44,7 @@ class DisplayManager:
             name = template.metadata.name or "Unnamed Template"
             tags_list = template.metadata.tags or []
             tags = ", ".join(tags_list) if tags_list else "-"
-            version = template.metadata.version or ""
+            version = str(template.metadata.version) if template.metadata.version else ""
             library = template.metadata.library or ""
 
             table.add_row(template.id, name, tags, version, library)
@@ -72,7 +72,7 @@ class DisplayManager:
     def _display_template_header(self, template: Template, template_id: str) -> None:
         """Display the header for a template."""
         template_name = template.metadata.name or "Unnamed Template"
-        version = template.metadata.version or "Not specified"
+        version = str(template.metadata.version) if template.metadata.version else "Not specified"
         description = template.metadata.description or "No description available"
 
         console.print(
@@ -153,10 +153,13 @@ class DisplayManager:
                 variables_table.add_row("", "", "", "", "", style="dim")
             first_section = False
 
-            # Use section's native is_enabled() method
-            is_dimmed = not section.is_enabled()
+            # Check if section is enabled AND dependencies are satisfied
+            is_enabled = section.is_enabled()
+            dependencies_satisfied = template.variables.is_section_satisfied(section.key)
+            is_dimmed = not (is_enabled and dependencies_satisfied)
 
-            disabled_text = " (disabled)" if is_dimmed else ""
+            # Only show (disabled) if section has no dependencies (dependencies make it obvious)
+            disabled_text = " (disabled)" if (is_dimmed and not section.needs) else ""
             required_text = " [yellow](required)[/yellow]" if section.required else ""
             # Add dependency information
             needs_text = ""

+ 5 - 0
cli/core/module.py

@@ -309,6 +309,11 @@ class Module(ABC):
       
       console.print(f"\n[green]✓ Template generated successfully in '{output_dir}'[/green]")
       logger.info(f"Template written to directory: {output_dir}")
+      
+      # Display next steps if provided in template metadata
+      if template.metadata.next_steps:
+        console.print("\n[bold cyan]Next Steps:[/bold cyan]")
+        console.print(template.metadata.next_steps)
 
     except Exception as e:
       logger.error(f"Error rendering template '{id}': {e}")

+ 9 - 0
cli/core/template.py

@@ -43,6 +43,7 @@ class TemplateMetadata:
   tags: List[str] = field(default_factory=list)
   # files: List[str] = field(default_factory=list) # No longer needed, as TemplateFile handles this
   library: str = "unknown"
+  next_steps: str = ""
 
   def __init__(self, template_data: dict, library_name: str | None = None) -> None:
     """Initialize TemplateMetadata from parsed YAML template data.
@@ -73,6 +74,14 @@ class TemplateMetadata:
     self.tags = metadata_section.get("tags", []) or []
     # self.files = metadata_section.get("files", []) or [] # No longer needed
     self.library = library_name or "unknown"
+    
+    # Extract next_steps (optional)
+    raw_next_steps = metadata_section.get("next_steps", "")
+    if isinstance(raw_next_steps, str):
+      next_steps = raw_next_steps.rstrip("\n")
+    else:
+      next_steps = str(raw_next_steps) if raw_next_steps else ""
+    self.next_steps = next_steps
 
   @staticmethod
   def _validate_metadata(template_data: dict) -> None:

+ 4 - 4
cli/core/variables.py

@@ -729,8 +729,8 @@ class VariableCollection:
     
     1. Dependencies come before dependents (topological sort)
     2. Required sections first (in their original order)
-    3. Enabled sections next (in their original order)
-    4. Disabled sections last (in their original order)
+    3. Enabled sections with satisfied dependencies next (in their original order)
+    4. Disabled sections or sections with unsatisfied dependencies last (in their original order)
     
     This maintains the original ordering within each group while organizing
     sections logically for display and user interaction, and ensures that
@@ -743,12 +743,12 @@ class VariableCollection:
     section_items = [(key, self._sections[key]) for key in sorted_keys]
     
     # Define sort key: (priority, original_index)
-    # Priority: 0 = required, 1 = enabled, 2 = disabled
+    # Priority: 0 = required, 1 = enabled with satisfied dependencies, 2 = disabled or unsatisfied dependencies
     def get_sort_key(item_with_index):
       index, (key, section) = item_with_index
       if section.required:
         priority = 0
-      elif section.is_enabled():
+      elif section.is_enabled() and self.is_section_satisfied(key):
         priority = 1
       else:
         priority = 2

+ 0 - 13
cli/modules/ci.py

@@ -1,13 +0,0 @@
-from __future__ import annotations
-
-from ..core.module import Module
-from ..core.registry import registry
-
-class CIModule(Module):
-  """Module for managing CI/CD automation templates."""
-  
-  name: str = "ci"
-  description: str = "Manage CI/CD automation templates (GitHub Actions, GitLab CI, Kestra)"
-
-# Register the module
-registry.register(CIModule)

+ 12 - 2
cli/modules/compose.py

@@ -37,6 +37,16 @@ spec = OrderedDict(
             "description": "Container internal hostname",
             "type": "str",
           },
+          "user_uid": {
+            "description": "User UID for container process",
+            "type": "int",
+            "default": 1000,
+          },
+          "user_gid": {
+            "description": "User GID for container process",
+            "type": "int",
+            "default": 1000,
+          },
         },
       },
       "network": {
@@ -139,7 +149,7 @@ spec = OrderedDict(
       "database": {
         "title": "Database",
         "toggle": "database_enabled",
-        "description": "Connect to external database (PostgreSQL, MySQL, MariaDB, etc.)",
+        "description": "Connect to external database (PostgreSQL or MySQL)",
         "vars": {
           "database_enabled": {
             "description": "Enable external database integration",
@@ -149,7 +159,7 @@ spec = OrderedDict(
           "database_type": {
             "description": "Database type",
             "type": "enum",
-            "options": ["postgres", "mysql", "mariadb"],
+            "options": ["postgres", "mysql"],
             "default": "postgres",
           },
           "database_external": {

+ 0 - 13
cli/modules/vagrant.py

@@ -1,13 +0,0 @@
-from __future__ import annotations
-
-from ..core.module import Module
-from ..core.registry import registry
-
-class VagrantModule(Module):
-  """Module for managing Vagrant configurations and files."""
-  
-  name: str = "vagrant"
-  description: str = "Manage Vagrant configurations and files"
-
-# Register the module
-registry.register(VagrantModule)

+ 22 - 0
dns-test/compose.yaml

@@ -0,0 +1,22 @@
+services:
+  bind9:
+    image: docker.io/ubuntu/bind9:9.20-24.10_edge
+    container_name: bind9
+    hostname: ns1
+    environment:
+      - TZ=America/New_York
+      - BIND9_USER=bind
+    ports:
+      - "53:53/tcp"
+      - "53:53/udp"
+    volumes:
+      - ./config:/etc/bind
+      - bind9_zones:/var/lib/bind
+      - bind9_cache:/var/cache/bind
+    restart: unless-stopped
+
+volumes:
+  bind9_zones:
+    driver: local
+  bind9_cache:
+    driver: local

+ 67 - 0
dns-test/config/named.conf

@@ -0,0 +1,67 @@
+// BIND9 Main Configuration File
+// Documentation: https://bind9.readthedocs.io/
+
+// Include TSIG keys for secure zone transfers
+include "/etc/bind/tsig.key";
+
+// ACL definitions for access control
+acl "trusted" {
+    127.0.0.1;
+    ::1;
+    10.0.0.0/8;
+    172.16.0.0/12;
+    192.168.0.0/16;
+};
+
+options {
+    directory "/var/cache/bind";
+
+    // DNS forwarders for recursive queries
+    forwarders {
+        1.1.1.1;
+        8.8.8.8;
+    };
+
+    // Allow recursion from trusted networks only
+    allow-recursion { trusted; };
+
+    // Allow queries from any (adjust as needed)
+    allow-query { any; };
+
+    // Disable zone transfers by default (enable per-zone with TSIG)
+    allow-transfer { none; };
+
+    // DNSSEC validation
+    dnssec-validation auto;
+
+    // Listen on all interfaces
+    listen-on { any; };
+    listen-on-v6 { any; };
+
+    // Disable query logging (enable for debugging)
+    // querylog yes;
+};
+
+// Local zones
+zone "localhost" {
+    type master;
+    file "/etc/bind/db.local";
+};
+
+zone "127.in-addr.arpa" {
+    type master;
+    file "/etc/bind/db.127";
+};
+
+zone "0.in-addr.arpa" {
+    type master;
+    file "/etc/bind/db.0";
+};
+
+zone "255.in-addr.arpa" {
+    type master;
+    file "/etc/bind/db.255";
+};
+
+// Include your custom zones
+include "/etc/bind/named.conf.zones";

+ 32 - 0
dns-test/config/named.conf.zones

@@ -0,0 +1,32 @@
+// Custom DNS Zones Configuration
+// Add your authoritative zones here
+
+// Example zone for home.arpa
+zone "home.arpa" {
+    type master;
+    file "/var/lib/bind/db.home.arpa";
+
+    // Allow zone transfers using TSIG key
+    allow-transfer { key "transfer-key"; };
+
+    // Enable zone updates with TSIG (for dynamic DNS)
+    // allow-update { key "transfer-key"; };
+
+    // Enable DNSSEC inline signing (optional)
+    // dnssec-policy default;
+    // inline-signing yes;
+};
+
+// Example reverse zone for 192.168.1.0/24
+// zone "1.168.192.in-addr.arpa" {
+//     type master;
+//     file "/var/lib/bind/db.192.168.1";
+//     allow-transfer { key "transfer-key"; };
+// };
+
+// Secondary/Slave zone example
+// zone "secondary.example.com" {
+//     type slave;
+//     masters { 192.168.1.100 key transfer-key; };
+//     file "/var/lib/bind/db.secondary.example.com";
+// };

+ 13 - 0
dns-test/config/tsig.key

@@ -0,0 +1,13 @@
+// TSIG Key for Secure Zone Transfers
+// Auto-generated base64-encoded secret for secure zone transfers and dynamic DNS updates
+// Algorithm: hmac-sha256
+
+key "transfer-key" {
+    algorithm hmac-sha256;
+    secret "6STNyEEL46IfvP/uZJyJ2e5I56uh0eK6CXMINOw4SHGXlIslx1BDkq1FurTwPtxf";
+};
+
+// To manually generate a new key:
+// docker exec bind9 tsig-keygen -a hmac-sha256 transfer-key
+//
+// Then update the secret value above with the generated secret

+ 0 - 23
library/ci/github-actions-kubectl/kubernetes-deploy.yml

@@ -1,23 +0,0 @@
----
-name: Kubernetes Deploy
-
-on:  # yamllint disable-line rule:truthy
-  push:
-    branches:
-      - main
-
-env:
-  KUBE_CONFIG: ${{ secrets.KUBE_CONFIG }}
-
-jobs:
-  deploy:
-    runs-on: your-runner
-
-    steps:
-      - name: Checkout
-        uses: actions/checkout@v2
-
-      - name: Deploy Manifest
-        uses: actions-hub/kubectl@master
-        with:
-          args: apply -f your-manifest.yml

+ 0 - 27
library/ci/github-actions-scp/copy-config-files.yml

@@ -1,27 +0,0 @@
----
-name: copy config files to remote machine
-
-on:  # yamllint disable-line rule:truthy
-  push:
-    branches:
-      - main
-    paths:
-      - 'config/**'
-
-jobs:
-  deploy:
-    runs-on: your-runner
-
-    steps:
-      - name: Checkout
-        uses: actions/checkout@v2
-
-      - name: Upload new Config Files
-        uses: appleboy/scp-action@master
-        with:
-          username: your-username
-          host: your-host
-          key: ${{ secrets.your-private-ssh-key }}
-          source: './config/*'
-          target: '/target/path/'
-          strip_components: 1   # remove the top level directory

+ 0 - 32
library/ci/github-actions-ssh/restart-docker.yml

@@ -1,32 +0,0 @@
----
-name: Update Docker Compose File
-
-on:  # yamllint disable-line rule:truthy
-  push:
-    branches:
-      - main
-    paths:
-      - 'docker-compose.yml'
-
-env:
-  YOUR-ENV-SECRET: ${{ secrets.YOUR-ENV-SECRET }}
-  YOUR-ENV-VAR: ${{ vars.YOUR-ENV-VAR }}
-jobs:
-  deploy:
-    runs-on: your-runner
-
-    steps:
-      - name: Checkout
-        uses: actions/checkout@v2
-
-      - name: Restart Docker Container
-        uses: fifsky/ssh-action@master
-        with:
-          user: your-user
-          host: your-host
-          key: ${{ secrets.your-private-ssh-key }}
-          command: |
-            cd your-compose-project-directory
-            export YOUR-ENV-SECRET=${{ secrets.YOUR-ENV-SECRET }}
-            export YOUR-ENV-VAR=${{ vars.YOUR-ENV-VAR }}
-            docker-compose up -d --force-recreate

+ 0 - 51
library/ci/gitlab-ci-ansible/run.yml

@@ -1,51 +0,0 @@
----
-spec:
-  inputs:
-    as:
-      default: run-ansible
-    stage:
-      default: ansible
-
-    root_dir:
-      default: ${CI_PROJECT_DIR}/ansible
-      description: 'Root directory for the Ansible config and playbooks.'
-    project_file:
-      description: 'Ansible Playbook to run.'
-    inventory_file:
-      default: ${CI_PROJECT_DIR}/ansible/inventory
-      description: 'Ansible Inventory File'
-
-    remote_ssh:
-      description: 'Remote ssh'
-
----
-'$[[ inputs.as ]]':
-  stage: '$[[ inputs.stage ]]'
-  image:
-    name: alpine:latest
-    entrypoint: [""]
-  variables:
-    PROJECT_DIR: "$[[ inputs.root_dir ]]"
-    PROJECT_FILE: "$[[ inputs.project_file ]]"
-    INVENTORY_FILE: "$[[ inputs.inventory_file ]]"
-    SSH_KEY: "$[[ inputs.remote_ssh ]]"
-  before_script: |
-    echo "Before → Executing..."
-    echo "Before → Installing dependencies"
-    apk add --no-cache openssh-client ansible-core
-    echo "Before → Enter Ansible root directory"
-    cd ${PROJECT_DIR}
-    echo "Before → Adding ssh key"
-    echo "${SSH_KEY}" > id_rsa && chmod 600 id_rsa
-    eval $(ssh-agent -s)
-    ssh-add id_rsa
-    echo "Before → Setting additional environment variables"
-    export ANSIBLE_HOST_KEY_CHECKING=false
-  script: |
-    echo "Script → Executing..."
-    echo "Script → Run Ansible Playbooks"
-    ansible-playbook -i ${INVENTORY_FILE} ${PROJECT_FILE}
-  rules:
-    - if: '$CI_COMMIT_REF_NAME == "main"'
-      changes:
-        - '$[[ inputs.root_dir ]]/$[[ inputs.project_file ]]'

+ 0 - 39
library/ci/gitlab-ci-ansible/test.yml

@@ -1,39 +0,0 @@
----
-spec:
-  inputs:
-    as:
-      default: test-ansible
-    stage:
-      default: test
-
-    root_dir:
-      default: ${CI_PROJECT_DIR}/ansible
-      description: 'Root directory for the Ansible config and playbooks.'
-    project_file:
-      description: 'Ansible Playbook to run.'
-
----
-'$[[ inputs.as ]]':
-  stage: '$[[ inputs.stage ]]'
-  image:
-    name: alpine:latest
-    entrypoint: [""]
-  variables:
-    ANSIBLE_DIR: "$[[ inputs.root_dir ]]"
-    PROJECT_FILE: "$[[ inputs.project_file ]]"
-  before_script: |
-    echo "Before → Executing..."
-    echo "Before → Enter Ansible root directory"
-    cd ${ANSIBLE_DIR}
-  script: |
-    echo "Script → Executing..."
-    echo "Before → Installing dependencies"
-    apk add --no-cache ansible-core
-    echo "Script → Test Ansible Playbooks"
-    ansible-lint ${PROJECT_FILE}
-  rules:
-    - if: |
-        $CI_PIPELINE_SOURCE == "push" ||
-        $CI_PIPELINE_SOURCE == "merge_request_event"
-      changes:
-        - '$[[ inputs.root_dir ]]/**'

+ 0 - 73
library/ci/gitlab-ci-docker/config.yml

@@ -1,73 +0,0 @@
----
-spec:
-  inputs:
-    as:
-      default: config-docker
-    stage:
-      default: config
-
-    config_dir:
-      default: ${CI_PROJECT_DIR}
-      description: 'Config directory to copy.'
-    project_file:
-      default: 'compose.yaml'
-      description: 'Docker Compose file to use.'
-
-    remote_host:
-      description: 'Remote host'
-    remote_user:
-      description: 'Remote user'
-    remote_ssh:
-      description: 'Remote ssh'
-
-    remote_config:
-      default: ${CI_PROJECT_DIR}
-      description: 'Target directory on the remote server for the config.'
-    remote_dir:
-      default: ${CI_PROJECT_DIR}
-      description: 'Directory on the remote server for the Docker Compose project.'
-
-
-    restart:
-      default: 'false'
-      description: 'Restart the remote compose project after config update?'
-
----
-'$[[ inputs.as ]]':
-  stage: '$[[ inputs.stage ]]'
-  image: alpine:latest
-  variables:
-    CONFIG_DIR: "$[[ inputs.config_dir ]]"
-    PROJECT_FILE: "$[[ inputs.project_file ]]"
-    SSH_KEY: "$[[ inputs.remote_ssh ]]"
-    REMOTE_HOST: "$[[ inputs.remote_host ]]"
-    REMOTE_USER: "$[[ inputs.remote_user ]]"
-    REMOTE_CONFIG: "$[[ inputs.remote_config ]]"
-    REMOTE_PATH: "$[[ inputs.remote_dir ]]"
-    RESTART: "$[[ inputs.restart ]]"
-  before_script: |
-    echo "Before → Executing..."
-    echo "Before → Installing dependencies"
-    apk add --no-cache openssh-client
-    echo "Before → Adding ssh key"
-    echo "$SSH_KEY" > id_rsa && chmod 600 id_rsa
-    eval $(ssh-agent -s)
-    ssh-add id_rsa
-  script: |
-    echo "Script → Executing..."
-    echo "Script → Copying config file to remote host"
-    ssh -o StrictHostKeyChecking=no $REMOTE_USER@$REMOTE_HOST "mkdir -p $REMOTE_CONFIG"
-    scp -o StrictHostKeyChecking=no $CONFIG_DIR/* $REMOTE_USER@$REMOTE_HOST:$REMOTE_CONFIG
-    echo "Script → Executing remote commands"
-    ssh -o StrictHostKeyChecking=no $REMOTE_USER@$REMOTE_HOST<<EOF
-      if [ '$RESTART' = 'true' ]; then
-        echo "Script → Restarting services"
-        docker compose -f $REMOTE_PATH/$PROJECT_FILE down --remove-orphans
-        docker compose -f $REMOTE_PATH/$PROJECT_FILE up -d
-      fi
-    EOF
-    echo "Script ✓ Done"
-  rules:
-    - if: '$CI_COMMIT_REF_NAME == "main"'
-      changes:
-        - '$[[ inputs.config_dir ]]/**'

+ 0 - 80
library/ci/gitlab-ci-docker/deploy.yml

@@ -1,80 +0,0 @@
----
-spec:
-  inputs:
-    as:
-      default: deploy-docker
-    stage:
-      default: deploy
-
-    root_dir:
-      default: ${CI_PROJECT_DIR}
-      description: 'Root directory for the Docker Compose project.'
-    project_file:
-      default: 'compose.yaml'
-      description: 'Docker Compose file to use.'
-
-    remote_host:
-      description: 'Remote host'
-    remote_user:
-      description: 'Remote user'
-    remote_ssh:
-      description: 'Remote ssh'
-
-    remote_dir:
-      default: ${CI_PROJECT_DIR}
-      description: 'Directory on the remote server for the Docker Compose project.'
-
-    docker_login:
-      default: 'true'
-      description: 'Login to Docker on the remote server?'
-    docker_user:
-      default: ${DOCKER_USER}
-      description: 'Docker user on the remote server'
-    docker_password:
-      default: ${DOCKER_PASSWORD}
-      description: 'Docker group on the remote server'
-
----
-'$[[ inputs.as ]]':
-  stage: '$[[ inputs.stage ]]'
-  image: docker:latest
-  variables:
-    PROJECT_DIR: "$[[ inputs.root_dir ]]"
-    PROJECT_FILE: "$[[ inputs.project_file ]]"
-    SSH_KEY: "$[[ inputs.remote_ssh ]]"
-    REMOTE_HOST: "$[[ inputs.remote_host ]]"
-    REMOTE_USER: "$[[ inputs.remote_user ]]"
-    REMOTE_PATH: "$[[ inputs.remote_dir ]]"
-    DOCKER_LOGIN: "$[[ inputs.docker_login ]]"
-    DOCKER_USER: "$[[ inputs.docker_user ]]"
-    DOCKER_PASSWORD: "$[[ inputs.docker_password ]]"
-  before_script: |
-    echo "Before → Executing..."
-    cd $PROJECT_DIR
-    echo "Before → Installing dependencies"
-    apk add --no-cache openssh-client
-    echo "Before → Adding ssh key"
-    echo "$SSH_KEY" > id_rsa && chmod 600 id_rsa
-    eval $(ssh-agent -s)
-    ssh-add id_rsa
-  script: |
-    echo "Script → Executing..."
-    echo "Script → Copying docker compose file to remote host"
-    ssh -o StrictHostKeyChecking=no $REMOTE_USER@$REMOTE_HOST "mkdir -p $REMOTE_PATH"
-    scp -o StrictHostKeyChecking=no $PROJECT_FILE $REMOTE_USER@$REMOTE_HOST:$REMOTE_PATH
-    echo "Script → Executing remote commands"
-    ssh -o StrictHostKeyChecking=no $REMOTE_USER@$REMOTE_HOST<<EOF
-      if [ '$DOCKER_LOGIN' = 'true' ]; then
-        echo "Script → Logging into docker hub"
-        docker login -u $DOCKER_USER -p $DOCKER_PASSWORD
-      fi
-      echo "Script → Pulling and restarting services"
-      docker compose -f $REMOTE_PATH/$PROJECT_FILE pull -q
-      docker compose -f $REMOTE_PATH/$PROJECT_FILE down --remove-orphans
-      docker compose -f $REMOTE_PATH/$PROJECT_FILE up -d
-    EOF
-    echo "Script ✓ Done"
-  rules:
-    - if: '$CI_COMMIT_REF_NAME == "main"'
-      changes:
-        - '$[[ inputs.root_dir ]]/$[[ inputs.project_file ]]'

+ 0 - 35
library/ci/gitlab-ci-docker/test.yml

@@ -1,35 +0,0 @@
----
-spec:
-  inputs:
-    as:
-      default: test-docker
-    stage:
-      default: test
-
-    root_dir:
-      default: ${CI_PROJECT_DIR}
-      description: 'Root directory for the Docker Compose project.'
-    project_file:
-      default: 'compose.yaml'
-      description: 'Docker Compose file to use.'
-
----
-'$[[ inputs.as ]]':
-  stage: '$[[ inputs.stage ]]'
-  image: docker:latest
-  variables:
-    PROJECT_DIR: "$[[ inputs.root_dir ]]"
-    PROJECT_FILE: "$[[ inputs.project_file ]]"
-  before_script:
-    - cd $PROJECT_DIR
-  script:
-    - docker compose -f $PROJECT_FILE config --quiet
-  rules:
-    - if: '$CI_COMMIT_REF_NAME == "main"'
-      changes:
-        - '$[[ inputs.root_dir ]]/$[[ inputs.project_file ]]'
-    - if: |
-        $CI_PIPELINE_SOURCE == "push" ||
-        $CI_PIPELINE_SOURCE == "merge_request_event"
-      changes:
-        - '$[[ inputs.root_dir ]]/$[[ inputs.project_file ]]'

+ 0 - 53
library/ci/gitlab-ci-terraform/apply.yml

@@ -1,53 +0,0 @@
----
-spec:
-  inputs:
-    as:
-      default: apply-terraform
-    stage:
-      default: terraform
-
-    root_dir:
-      default: ${CI_PROJECT_DIR}/terraform
-      description: 'Root directory for the OpenTofu project.'
-    state_name:
-      default: default
-      description: 'Remote OpenTofu state name.'
-
----
-variables:
-  TF_ROOT: "$[[ inputs.root_dir ]]"
-  TF_ADDRESS: ${CI_API_V4_URL}/projects/${CI_PROJECT_ID}/terraform/state/$[[ inputs.state_name ]]
-  TF_USERNAME: gitlab-ci-token
-  TF_PASSWORD: ${CI_JOB_TOKEN}
-
-'$[[ inputs.as ]]':
-  stage: '$[[ inputs.stage ]]'
-  image:
-    name: ghcr.io/opentofu/opentofu:latest
-    entrypoint: [""]
-  before_script: |
-    echo "Before → Executing..."
-    echo "Before → Enter TF root directory"
-    cd ${TF_ROOT}
-  script: |
-    echo "Script → Executing..."
-    echo "Script → Initialize Terraform backend"
-    tofu init \
-     -backend-config=address=${TF_ADDRESS} \
-     -backend-config=lock_address=${TF_ADDRESS}/lock \
-     -backend-config=unlock_address=${TF_ADDRESS}/lock \
-     -backend-config=username=${TF_USERNAME} \
-     -backend-config=password=${TF_PASSWORD} \
-     -backend-config=lock_method=POST \
-     -backend-config=unlock_method=DELETE \
-     -backend-config=retry_wait_min=5
-    echo "Script → Validate Terraform"
-    tofu validate
-    echo "Script → Plan Terraform"
-    tofu plan -lock=false -out=tfplan
-    echo "Script → Apply Terraform"
-    tofu apply -lock=false -auto-approve tfplan
-  rules:
-    - if: $CI_COMMIT_BRANCH == "main"
-      changes:
-        - '$[[ inputs.root_dir ]]/**'

+ 0 - 51
library/ci/gitlab-ci-terraform/validate.yml

@@ -1,51 +0,0 @@
----
-spec:
-  inputs:
-    as:
-      default: validate-terraform
-    stage:
-      default: test
-
-    root_dir:
-      default: ${CI_PROJECT_DIR}/terraform
-      description: 'Root directory for the OpenTofu project.'
-    state_name:
-      default: default
-      description: 'Remote OpenTofu state name.'
-
----
-variables:
-  TF_ROOT: "$[[ inputs.root_dir ]]"
-  TF_ADDRESS: ${CI_API_V4_URL}/projects/${CI_PROJECT_ID}/terraform/state/$[[ inputs.state_name ]]
-  TF_USERNAME: gitlab-ci-token
-  TF_PASSWORD: ${CI_JOB_TOKEN}
-
-'$[[ inputs.as ]]':
-  stage: '$[[ inputs.stage ]]'
-  image:
-    name: ghcr.io/opentofu/opentofu:latest
-    entrypoint: [""]
-  before_script: |
-    echo "Before → Executing..."
-    echo "Before → Enter TF root directory"
-    cd ${TF_ROOT}
-  script: |
-    echo "Script → Executing..."
-    echo "Script → Initialize Terraform backend"
-    tofu init \
-     -backend-config=address=${TF_ADDRESS} \
-     -backend-config=lock_address=${TF_ADDRESS}/lock \
-     -backend-config=unlock_address=${TF_ADDRESS}/lock \
-     -backend-config=username=${TF_USERNAME} \
-     -backend-config=password=${TF_PASSWORD} \
-     -backend-config=lock_method=POST \
-     -backend-config=unlock_method=DELETE \
-     -backend-config=retry_wait_min=5
-    echo "Script → Validate Terraform"
-    tofu validate
-  rules:
-    - if: |
-        $CI_PIPELINE_SOURCE == "push" ||
-        $CI_PIPELINE_SOURCE == "merge_request_event"
-      changes:
-        - '$[[ inputs.root_dir ]]/**'

+ 0 - 36
library/ci/kestra-ansible/ansible-playbook-git.yaml

@@ -1,36 +0,0 @@
----
-# Kestra ansible-playbook Template
-# ---
-#
-# Run an ansible playbook cloned from a Git Repository
-#
-id: ansible_playbook_git
-namespace: your_namespace  # <-- Replace with your namespace...
-tasks:
-  - id: ansible_job
-    type: io.kestra.plugin.core.flow.WorkingDirectory
-    inputFiles:
-      id_rsa: "{{ secret('RSA_SSH_KEY') }}"  # <-- (Required) Replace with your secret key...
-      # id_ed25519: "{{ secret('ED25519_SSH_KEY') }}"  # <-- (Optional) Replace with your secret key, when using ED25519...
-    tasks:
-      - id: git_clone
-        type: io.kestra.plugin.git.Clone
-        url: your-git-repository-url  # <-- Replace with your Git repository URL...
-        directory: ansible
-        branch: main  # <-- (Optional) Replace with your Git branch...
-        # --> (Optional) If Git repository is private, add your Git token...
-        # username: xcad
-        # password: "{{ secret('GITOKEN') }}"
-        # <--
-      - id: ansible_playbook
-        type: io.kestra.plugin.ansible.cli.AnsibleCLI
-        taskRunner:
-          type: io.kestra.plugin.scripts.runner.docker.Docker
-          image: docker.io/cytopia/ansible:latest-tools
-          user: "1000"  # <-- (Required) Replace with your user id...
-        env:
-          "ANSIBLE_HOST_KEY_CHECKING": "false"
-          "ANSIBLE_REMOTE_USER": "your-remote-user"  # <-- (Required) Replace with your remote user...
-        commands:
-          - ansible-playbook -i ansible/inventory --key-file id_rsa ansible/your-playbook.yaml
-          # - ansible-playbook -i ansible/inventory --key-file id_ed25519 ansible/your-playbook.yaml  # <-- (Optional) when using ED25519...

+ 0 - 38
library/ci/kestra-ansible/ansible-playbook-inline.yaml

@@ -1,38 +0,0 @@
----
-# Kestra ansible-playbook Template
-# ---
-#
-# Run an ansible playbook defined inline the kestra flow.
-#
-id: ansible_playbook_inline
-namespace: your_namespace  # <-- Replace with your namespace...
-tasks:
-  - id: ansible_job
-    type: io.kestra.plugin.core.flow.WorkingDirectory
-    inputFiles:
-      inventory.ini: |  # <-- Replace with your inventory file content...
-        srv-demo-1.home.clcreative.de
-      myplaybook.yaml: |  # <-- Replace with your playbook file content...
-        ---
-        - hosts: srv-demo-1.home.clcreative.de
-          tasks:
-            - name: upgrade apt packages
-              become: true
-              ansible.builtin.apt:
-                upgrade: true
-                update_cache: true
-      id_rsa: "{{ secret('RSA_SSH_KEY') }}"  # <-- (Required) Replace with your secret key...
-      # id_ed25519: "{{ secret('ED25519_SSH_KEY') }}"  # <-- (Optional) Replace with your secret key, when using ED25519...
-    tasks:
-      - id: ansible_playbook
-        type: io.kestra.plugin.ansible.cli.AnsibleCLI
-        taskRunner:
-          type: io.kestra.plugin.scripts.runner.docker.Docker
-          image: docker.io/cytopia/ansible:latest-tools
-          user: "1000"  # <-- (Required) Replace with your user id...
-        env:
-          "ANSIBLE_HOST_KEY_CHECKING": "false"
-          "ANSIBLE_REMOTE_USER": "your-remote-user"  # <-- (Required) Replace with your remote user...
-        commands:
-          - ansible-playbook -i inventory.ini --key-file id_rsa myplaybook.yaml
-          # - ansible-playbook -i inventory.ini --key-file id_ed25519 myplaybook.yaml  # <-- (Optional) when using ED25519...

+ 0 - 31
library/ci/kestra-docker/docker-build-git.yaml

@@ -1,31 +0,0 @@
----
-# Kestra Docker Git Build Template
-# ---
-#
-# Build a Docker image from a Git repository.
-#
-id: docker_build_git
-namespace: your_namespace  # <- Replace with your namespace...
-tasks:
-  - id: docker_job
-    type: io.kestra.plugin.core.flow.WorkingDirectory
-    tasks:
-      - id: git_clone
-        type: io.kestra.plugin.git.Clone
-        url: your-git-repository-url  # <-- Replace with your Git repository URL...
-        directory: docker
-        branch: main  # <-- (Optional) Replace with your Git branch...
-        # --> (Optional) If Git repository is private, add your Git token...
-        # username: xcad
-        # password: "{{ secret('GITOKEN') }}"
-        # <--
-      - id: docker_build
-        type: io.kestra.plugin.docker.Build
-        dockerfile: "docker/src/Dockerfile"  # <- Replace with your Dockerfile path...
-        tags:
-          - your-username/your-repository:your-tag  # <- Replace with your Docker image tag...
-        push: true
-        credentials:
-          registry: https://index.docker.io/v1/
-          username: "{{ secret('YOUR_USERNAME') }}"  # <- Replace with your Docker Hub username...
-          password: "{{ secret('YOUR_PASSWORD') }}"  # <- Replace with your Docker Hub password...

+ 0 - 33
library/ci/kestra-docker/docker-build-inline.yaml

@@ -1,33 +0,0 @@
----
-# Kestra Docker File Build Template
-# ---
-#
-# Build a Docker image from a File.
-#
-id: docker_build_inline
-namespace: your_namespace  # <- Replace with your namespace...
-tasks:
-  - id: docker_job
-    type: io.kestra.plugin.core.flow.WorkingDirectory
-    inputFiles:
-      Dockerfile: |  # <- Replace with your Dockerfile content...
-        FROM alpine:latest
-        WORKDIR /app
-        COPY . /app
-        RUN apk add --update python3
-        CMD [ "python", "main.py"]
-      main.py: |  # <- Replace with your Python script content...
-        if __name__ == "__main__":
-          print("Hello from Docker!")
-          exit(0)
-    tasks:
-      - id: docker_build
-        type: io.kestra.plugin.docker.Build
-        dockerfile: "src/Dockerfile"  # <- Replace with your Dockerfile path...
-        tags:
-          - your-username/your-repository:your-tag  # <- Replace with your Docker image tag...
-        push: true
-        credentials:
-          registry: https://index.docker.io/v1/
-          username: "{{ secret('YOUR_USERNAME') }}"  # <- Replace with your Docker Hub username...
-          password: "{{ secret('YOUR_PASSWORD') }}"  # <- Replace with your Docker Hub password...

+ 0 - 65
library/ci/kestra-inputs/kestra-inputs.yaml

@@ -1,65 +0,0 @@
----
-# Kestra Inputs Template
-# ---
-#
-# Inputs is a list of dynamic values passed to the flow at runtime.
-#
-
-id: inputs  # <- Replace with your task id...
-namespace: your-namespace  # <- Replace with your namespace...
-
-inputs:
-  - id: string  # <- Replace with your input name...
-    type: STRING
-
-  - id: optional  # <- Replace with your input name...
-    type: STRING
-    required: false
-
-  - id: int  # <- Replace with your input name...
-    type: INT
-
-  - id: bool  # <- Replace with your input name...
-    type: BOOLEAN
-
-  - id: float  # <- Replace with your input name...
-    type: FLOAT
-
-  - id: instant  # <- Replace with your input name...
-    type: DATETIME
-
-  - id: date  # <- Replace with your input name...
-    type: DATE
-
-  - id: time  # <- Replace with your input name...
-    type: TIME
-
-  - id: duration  # <- Replace with your input name...
-    type: DURATION
-
-  - id: file  # <- Replace with your input name...
-    type: FILE
-
-  - id: optionalFile  # <- Replace with your input name...
-    type: FILE
-
-  - id: instantDefaults  # <- Replace with your input name...
-    type: DATETIME
-    defaults: "2013-08-09T14:19:00Z"  # <- Replace with your default value...
-
-  - id: json  # <- Replace with your input name...
-    type: JSON
-
-  - id: uri  # <- Replace with your input name...
-    type: URI
-
-  - id: secret  # <- Replace with your input name...
-    type: SECRET
-
-  - id: nested.string  # <- Replace with your input name...
-    type: STRING
-
-tasks:
-  - id: using_inputs
-    type: io.kestra.plugin.core.log.Log
-    message: "{{ inputs.string }}"

+ 0 - 19
library/ci/kestra-python/python_command.yaml

@@ -1,19 +0,0 @@
----
-# Kestra Python Command Template
-# ---
-#
-# This template is a simple Python script.
-#
-# usage:
-#   make sure the Kestra instance can access the /app/scripts/your-python-script.py file
-#   if you're running Kestra in Docker, use a volume to mount the file/directory.
-#
-id: python_command
-namespace: your_namespace  # <-- Replace with your namespace...
-tasks:
-  - id: python_job
-    type: io.kestra.plugin.scripts.python.Commands
-    commands:
-      - python /app/scripts/your-python-script.py
-    taskRunner:
-      type: io.kestra.plugin.core.runner.Process

+ 0 - 24
library/ci/kestra-python/python_script.yaml

@@ -1,24 +0,0 @@
----
-# Kestra Python Command Template
-# ---
-#
-# This template is a simple Python script that can be used to make a request to a website and log the status code.
-#
-id: python_script
-namespace: your_namespace  # <-- Replace with your namespace...
-tasks:
-  - id: python_job
-    type: io.kestra.plugin.scripts.python.Script
-    taskRunner:
-      type: io.kestra.plugin.core.runner.Process
-    script: |
-        from kestra import Kestra
-        import requests
-
-        response = requests.get('{{inputs.website}}')
-        print(response.status_code)
-
-        Kestra.outputs({'status': response.status_code, 'text': response.text})
-  - id: log
-    type: io.kestra.plugin.core.log.Log
-    message: "StatusCode: {{outputs.pythonscript.vars.status}}"

+ 0 - 17
library/ci/kestra-variables/kestra-variables.yaml

@@ -1,17 +0,0 @@
----
-# Kestra Variable Template
-# ---
-#
-# Variables is a list of static values passed to the flow at runtime.
-#
-
-id: variables  # <- Replace with your task id...
-namespace: your-namespace  # <- Replace with your namespace...
-
-variables:
-  variable-name: "variable-value"  # <- Replace with your variable name and value...
-
-tasks:
-  - id: using_variables
-    type: io.kestra.plugin.core.log.Log
-    message: "{{ vars.variable-name }}"

+ 0 - 20
library/ci/kestra-webhook/kestra-webhook.yaml

@@ -1,20 +0,0 @@
----
-# Kestra Webhook Template
-# ---
-#
-# This template is a simple webhook trigger that can be used to trigger a task execution.
-#
-# usage:
-#   curl http://your-kestra-instance/api/v1/executions/webhook/your-namespace/your-task-id/your-secret-key
-#
-
-id: webhook  # <- Replace with your task id...
-namespace: your-namespace  # <- Replace with your namespace...
-
-tasks:
-# -- Add your tasks here...
-
-triggers:
-  - id: webhook
-    type: io.kestra.plugin.core.trigger.Webhook
-    key: your-secret-key  # <- Replace with your secret key...

+ 26 - 0
library/compose/ansiblesemaphore/.env.database.j2

@@ -0,0 +1,26 @@
+{% if database_type == 'mysql' -%}
+# MySQL Database Configuration
+# Used when database_type=mysql and database_external=false
+
+# Database Settings
+MYSQL_RANDOM_ROOT_PASSWORD=yes
+MYSQL_DATABASE={{ database_name | default('semaphore') }}
+MYSQL_USER={{ database_user | default('semaphore') }}
+MYSQL_PASSWORD={{ database_password | default('semaphore') }}
+
+# Character Set
+MYSQL_CHARSET=utf8mb4
+MYSQL_COLLATION=utf8mb4_unicode_ci
+
+{% elif database_type == 'postgres' -%}
+# PostgreSQL Database Configuration
+# Used when database_type=postgres and database_external=false
+
+# Database Settings
+POSTGRES_DB={{ database_name | default('semaphore') }}
+POSTGRES_USER={{ database_user | default('semaphore') }}
+POSTGRES_PASSWORD={{ database_password | default('semaphore') }}
+
+# PostgreSQL Configuration
+POSTGRES_INITDB_ARGS=--encoding=UTF8 --locale=C
+{% endif -%}

+ 46 - 0
library/compose/ansiblesemaphore/.env.semaphore.j2

@@ -0,0 +1,46 @@
+# Ansible Semaphore Application Configuration
+# Contains application settings and database connection
+
+# Timezone
+TZ={{ container_timezone | default('UTC') }}
+
+# Database Connection
+{% if database_type == 'mysql' %}
+SEMAPHORE_DB_DIALECT=mysql
+{% elif database_type == 'postgres' %}
+SEMAPHORE_DB_DIALECT=postgres
+{% endif %}
+{% if database_external %}
+SEMAPHORE_DB_HOST={{ database_host }}
+{% else %}
+SEMAPHORE_DB_HOST={{ service_name | default('semaphore') }}-{{ database_type }}
+{% endif %}
+SEMAPHORE_DB_PORT={% if database_type == 'postgres' %}5432{% else %}3306{% endif %}
+SEMAPHORE_DB={{ database_name | default('semaphore') }}
+SEMAPHORE_DB_USER={{ database_user | default('semaphore') }}
+SEMAPHORE_DB_PASS={{ database_password | default('semaphore') }}
+
+# Admin Configuration
+SEMAPHORE_ADMIN={{ semaphore_admin_name | default('admin') }}
+SEMAPHORE_ADMIN_NAME={{ semaphore_admin_name | default('admin') }}
+SEMAPHORE_ADMIN_EMAIL={{ semaphore_admin_email | default('admin@localhost') }}
+SEMAPHORE_ADMIN_PASSWORD={{ semaphore_admin_password if semaphore_admin_password else (none | pwgen(20)) }}
+
+# Playbook Configuration
+SEMAPHORE_PLAYBOOK_PATH={{ semaphore_playbook_path | default('/tmp/semaphore/') }}
+
+# Access Key Encryption
+SEMAPHORE_ACCESS_KEY_ENCRYPTION={{ semaphore_access_key_encryption if semaphore_access_key_encryption else (none | pwgen(32)) }}
+
+# Ansible Settings
+ANSIBLE_HOST_KEY_CHECKING={{ ansible_host_key_checking | default(false) }}
+
+{% if email_enabled -%}
+# Email Server Configuration
+SEMAPHORE_EMAIL_SENDER={{ email_from }}
+SEMAPHORE_EMAIL_HOST={{ email_host }}
+SEMAPHORE_EMAIL_PORT={{ email_port | default(587) }}
+SEMAPHORE_EMAIL_USERNAME={{ email_username }}
+SEMAPHORE_EMAIL_PASSWORD={{ email_password }}
+SEMAPHORE_EMAIL_SECURE={{ email_use_tls | default(true) }}
+{% endif %}

+ 89 - 35
library/compose/ansiblesemaphore/compose.yaml.j2

@@ -1,43 +1,97 @@
 services:
-  mysql:
-    image: docker.io/library/mysql:8.4
-    hostname: mysql
-    volumes:
-      - semaphore-mysql:/var/lib/mysql
-    environment:
-      - MYSQL_RANDOM_ROOT_PASSWORD=yes
-      - MYSQL_DATABASE=semaphore
-      - MYSQL_USER=semaphore
-      - MYSQL_PASSWORD=secret-password  # change!
-    restart: unless-stopped
-  semaphore:
-    container_name: ansiblesemaphore
+  {{ service_name | default('semaphore') }}:
     image: docker.io/semaphoreui/semaphore:v2.16.18
-    user: "${UID}:${GID}"
+    container_name: {{ container_name | default('semaphore') }}
+    user: "{{ user_uid | default(1000) }}:{{ user_gid | default(1000) }}"
+    env_file:
+      - .env.semaphore
+    {% if ports_enabled %}
     ports:
-      - 3000:3000
-    environment:
-      - SEMAPHORE_DB_USER=semaphore
-      - SEMAPHORE_DB_PASS=secret-password  # change!
-      - SEMAPHORE_DB_HOST=mysql
-      - SEMAPHORE_DB_PORT=3306
-      - SEMAPHORE_DB_DIALECT=mysql
-      - SEMAPHORE_DB=semaphore
-      - SEMAPHORE_PLAYBOOK_PATH=/tmp/semaphore/
-      - SEMAPHORE_ADMIN_PASSWORD=secret-admin-password  # change!
-      - SEMAPHORE_ADMIN_NAME=admin
-      - SEMAPHORE_ADMIN_EMAIL=admin@localhost
-      - SEMAPHORE_ADMIN=admin
-      - SEMAPHORE_ACCESS_KEY_ENCRYPTION=  # add to your access key encryption !
-      - ANSIBLE_HOST_KEY_CHECKING=false  # (optional) change to true if you want to enable host key checking
+      - "{{ ports_http | default(3000) }}:3000"
+    {% endif %}
+    {% if network_enabled %}
+    networks:
+      - {{ network_name | default('bridge') }}
+    {% endif %}
+    {% if traefik_enabled %}
+    labels:
+      - traefik.enable=true
+      - traefik.http.services.{{ service_name | default('semaphore') }}.loadbalancer.server.port=3000
+      - traefik.http.routers.{{ service_name | default('semaphore') }}-http.rule=Host(`{{ traefik_host }}`)
+      - traefik.http.routers.{{ service_name | default('semaphore') }}-http.entrypoints={{ traefik_entrypoint | default('web') }}
+      {% if traefik_tls_enabled %}
+      - traefik.http.routers.{{ service_name | default('semaphore') }}-https.rule=Host(`{{ traefik_host }}`)
+      - traefik.http.routers.{{ service_name | default('semaphore') }}-https.entrypoints={{ traefik_tls_entrypoint | default('websecure') }}
+      - traefik.http.routers.{{ service_name | default('semaphore') }}-https.tls=true
+      - traefik.http.routers.{{ service_name | default('semaphore') }}-https.tls.certresolver={{ traefik_tls_certresolver }}
+      {% endif %}
+    {% endif %}
     volumes:
-      - ./inventory/:/inventory:ro
-      - ./authorized-keys/:/authorized-keys:ro
-      - ./config/:/etc/semaphore:rw
-    restart: unless-stopped
+      - ./inventory:/inventory:ro
+      - ./authorized-keys:/authorized-keys:ro
+      - ./config:/etc/semaphore:rw
     depends_on:
-      - mysql
+      {% if database_type == 'mysql' %}
+      - {{ service_name | default('semaphore') }}-mysql
+      {% elif database_type == 'postgres' %}
+      - {{ service_name | default('semaphore') }}-postgres
+      {% endif %}
+    restart: {{ restart_policy | default('unless-stopped') }}
+
+  {% if not database_external %}
+  {% if database_type == 'mysql' %}
+  {{ service_name | default('semaphore') }}-mysql:
+    image: docker.io/library/mysql:8.4
+    container_name: {{ service_name | default('semaphore') }}-mysql
+    env_file:
+      - .env.database
+    healthcheck:
+      test: ["CMD", "mysqladmin", "ping", "-h", "localhost", "-u", "{{ database_user | default('semaphore') }}", "-p{{ database_password | default('semaphore') }}"]
+      start_period: 30s
+      interval: 10s
+      timeout: 10s
+      retries: 5
+    volumes:
+      - database_data:/var/lib/mysql
+    {% if network_enabled %}
+    networks:
+      - {{ network_name | default('bridge') }}
+    {% endif %}
+    restart: {{ restart_policy | default('unless-stopped') }}
+  {% elif database_type == 'postgres' %}
+  {{ service_name | default('semaphore') }}-postgres:
+    image: docker.io/library/postgres:17.6
+    container_name: {{ service_name | default('semaphore') }}-postgres
+    env_file:
+      - .env.database
+    healthcheck:
+      test: ["CMD-SHELL", "pg_isready -U {{ database_user | default('semaphore') }}"]
+      start_period: 30s
+      interval: 10s
+      timeout: 10s
+      retries: 5
+    volumes:
+      - database_data:/var/lib/postgresql/data
+    {% if network_enabled %}
+    networks:
+      - {{ network_name | default('bridge') }}
+    {% endif %}
+    restart: {{ restart_policy | default('unless-stopped') }}
+  {% endif %}
+  {% endif %}
+
+{% if network_enabled %}
+networks:
+  {{ network_name | default('bridge') }}:
+    {% if network_external %}
+    external: true
+    {% else %}
+    driver: bridge
+    {% endif %}
+{% endif %}
 
 volumes:
-  semaphore-mysql:
+  {% if not database_external %}
+  database_data:
     driver: local
+  {% endif %}

+ 64 - 6
library/compose/ansiblesemaphore/template.yaml

@@ -1,12 +1,70 @@
 ---
 kind: compose
 metadata:
-  name: Volumes
-  description: Docker compose setup for volumes
-  version: 8.4
+  name: Ansible Semaphore
+  description: >
+    Modern UI for Ansible automation with task scheduling and web-based management.
+    Semaphore provides a beautiful web interface to run Ansible playbooks, manage
+    inventories, and schedule automated tasks. Perfect for teams who want a
+    user-friendly way to execute and monitor Ansible automation.
+
+
+    Project: https://www.semaphoreui.com/
+
+    Documentation: https://docs.semaphoreui.com/
+
+    GitHub: https://github.com/semaphoreui/semaphore
+  version: 2.16.18
   author: Christian Lempa
-  date: '2025-09-28'
+  date: '2025-10-02'
   tags:
     - ansible
-    - terraform
-spec: {}
+    - automation
+    - devops
+    - infrastructure
+spec:
+  database:
+    required: true
+    vars:
+      database_type:
+        default: "mysql"
+  ports:
+    vars:
+      ports_http:
+        description: "Host port for web interface"
+        type: int
+        default: 3000
+  semaphore:
+    description: "Configure Ansible Semaphore application settings"
+    required: true
+    vars:
+      semaphore_admin_name:
+        description: "Initial admin username"
+        type: str
+        default: "admin"
+      semaphore_admin_email:
+        description: "Admin email address"
+        type: str
+        default: "admin@localhost"
+      semaphore_admin_password:
+        description: "Initial admin password"
+        extra: "Leave empty for auto-generated 20-character secure password"
+        type: str
+        sensitive: true
+        autogenerated: true
+        default: ""
+      semaphore_access_key_encryption:
+        description: "Encryption key for access keys storage"
+        extra: "Leave empty for auto-generated 32-character secure key"
+        type: str
+        sensitive: true
+        autogenerated: true
+        default: ""
+      semaphore_playbook_path:
+        description: "Path for temporary playbook execution"
+        type: str
+        default: "/tmp/semaphore/"
+      ansible_host_key_checking:
+        description: "Enable Ansible SSH host key checking"
+        type: bool
+        default: false

+ 33 - 8
library/compose/bind9/compose.yaml.j2

@@ -1,11 +1,36 @@
 services:
-  bind9:
-    image: docker.io/ubuntu/bind9:9.20-24.10_edge
-    container_name: bind9
+  {{ service_name | default('bind9') }}:
+    image: docker.io/ubuntu/bind9:{{ bind9_version | default('9.20-24.10_edge') }}
+    container_name: {{ container_name | default('bind9') }}
+    hostname: {{ container_hostname | default('ns1') }}
+    environment:
+      - TZ={{ container_timezone | default('UTC') }}
+      - BIND9_USER=bind
+    {% if ports_enabled %}
     ports:
-      - "53:53"
+      - "53:53/tcp"
+      - "53:53/udp"
+    {% endif %}
     volumes:
-      - /etc/bind/:/etc/bind/
-      - /var/cache/bind:/var/cache/bind
-      - /var/lib/bind:/var/lib/bind
-    restart: unless-stopped
+      - ./config:/etc/bind
+      - bind9_zones:/var/lib/bind
+      - bind9_cache:/var/cache/bind
+    {% if network_enabled %}
+    networks:
+      - {{ network_name | default('bridge') }}
+    {% endif %}
+    restart: {{ restart_policy | default('unless-stopped') }}
+
+volumes:
+  bind9_zones:
+    driver: local
+  bind9_cache:
+    driver: local
+
+{% if network_enabled %}
+networks:
+  {{ network_name | default('bridge') }}:
+    {% if network_external %}
+    external: true
+    {% endif %}
+{% endif %}

+ 0 - 1
library/compose/bind9/config/example.named.conf

@@ -1 +0,0 @@
-

+ 67 - 0
library/compose/bind9/config/named.conf.j2

@@ -0,0 +1,67 @@
+// BIND9 Main Configuration File
+// Documentation: https://bind9.readthedocs.io/
+
+// Include TSIG keys for secure zone transfers
+include "/etc/bind/tsig.key";
+
+// ACL definitions for access control
+acl "trusted" {
+    127.0.0.1;
+    ::1;
+    10.0.0.0/8;
+    172.16.0.0/12;
+    192.168.0.0/16;
+};
+
+options {
+    directory "/var/cache/bind";
+    
+    // DNS forwarders for recursive queries
+    forwarders {
+        1.1.1.1;
+        8.8.8.8;
+    };
+    
+    // Allow recursion from trusted networks only
+    allow-recursion { trusted; };
+    
+    // Allow queries from any (adjust as needed)
+    allow-query { any; };
+    
+    // Disable zone transfers by default (enable per-zone with TSIG)
+    allow-transfer { none; };
+    
+    // DNSSEC validation
+    dnssec-validation auto;
+    
+    // Listen on all interfaces
+    listen-on { any; };
+    listen-on-v6 { any; };
+    
+    // Disable query logging (enable for debugging)
+    // querylog yes;
+};
+
+// Local zones
+zone "localhost" {
+    type master;
+    file "/etc/bind/db.local";
+};
+
+zone "127.in-addr.arpa" {
+    type master;
+    file "/etc/bind/db.127";
+};
+
+zone "0.in-addr.arpa" {
+    type master;
+    file "/etc/bind/db.0";
+};
+
+zone "255.in-addr.arpa" {
+    type master;
+    file "/etc/bind/db.255";
+};
+
+// Include your custom zones
+include "/etc/bind/named.conf.zones";

+ 32 - 0
library/compose/bind9/config/named.conf.zones.j2

@@ -0,0 +1,32 @@
+// Custom DNS Zones Configuration
+// Add your authoritative zones here
+
+// Example zone for {{ domain_name | default('example.com') }}
+zone "{{ domain_name | default('example.com') }}" {
+    type master;
+    file "/var/lib/bind/db.{{ domain_name | default('example.com') }}";
+    
+    // Allow zone transfers using TSIG key
+    allow-transfer { key "{{ tsig_key_name | default('transfer-key') }}"; };
+    
+    // Enable zone updates with TSIG (for dynamic DNS)
+    // allow-update { key "{{ tsig_key_name | default('transfer-key') }}"; };
+    
+    // Enable DNSSEC inline signing (optional)
+    // dnssec-policy default;
+    // inline-signing yes;
+};
+
+// Example reverse zone for 192.168.1.0/24
+// zone "1.168.192.in-addr.arpa" {
+//     type master;
+//     file "/var/lib/bind/db.192.168.1";
+//     allow-transfer { key "{{ tsig_key_name | default('transfer-key') }}"; };
+// };
+
+// Secondary/Slave zone example
+// zone "secondary.example.com" {
+//     type slave;
+//     masters { 192.168.1.100 key {{ tsig_key_name | default('transfer-key') }}; };
+//     file "/var/lib/bind/db.secondary.example.com";
+// };

+ 13 - 0
library/compose/bind9/config/tsig.key.j2

@@ -0,0 +1,13 @@
+// TSIG Key for Secure Zone Transfers
+// Auto-generated base64-encoded secret for secure zone transfers and dynamic DNS updates
+// Algorithm: hmac-sha256
+
+key "{{ tsig_key_name | default('transfer-key') }}" {
+    algorithm hmac-sha256;
+    secret "{{ tsig_key_secret if tsig_key_secret else (none | random_base64(64)) }}";
+};
+
+// To manually generate a new key:
+// docker exec bind9 tsig-keygen -a hmac-sha256 {{ tsig_key_name | default('transfer-key') }}
+//
+// Then update the secret value above with the generated secret

+ 63 - 8
library/compose/bind9/template.yaml

@@ -1,19 +1,74 @@
 ---
 kind: compose
 metadata:
-  name: Bind9
-  description: Docker compose setup for bind9
+  name: BIND9
+  description: >
+    BIND9 is the most widely used DNS server on the Internet.
+    This template provides an authoritative and recursive DNS server with example zones,
+    TSIG authentication for secure zone transfers, and DNSSEC support.
+
+
+    Project: https://www.isc.org/bind/
+
+    Documentation: https://bind9.readthedocs.io/
   version: 9.20-24.10_edge
   author: Christian Lempa
-  date: '2025-09-28'
+  date: '2025-10-02'
   tags:
-  - bind9
-  - docker
-  - compose
+    - bind9
+    - dns
+    - nameserver
+    - authoritative
+    - recursive
+  next_steps: |
+    1. Start the DNS server:
+       docker compose up -d
+
+    2. View the auto-generated TSIG key:
+       cat config/tsig.key
+
+    3. Test DNS queries:
+       dig @localhost home.arpa
+
+    4. Customize your zone:
+       - Edit config/named.conf.zones to add more zones
+       - Add zone files to /var/lib/bind/ volume
+       - Update config/named.conf to adjust forwarders and ACLs
+
+    5. Reload configuration after changes:
+       docker exec bind9 rndc reload
+
+    6. Check BIND9 configuration syntax:
+       docker exec bind9 named-checkconf /etc/bind/named.conf
+       docker exec bind9 named-checkzone home.arpa /var/lib/bind/db.home.arpa
+
+    For more information, visit: https://bind9.readthedocs.io/
 spec:
   general:
     vars:
       bind9_version:
         type: string
-        description: Bind9 version
-        default: latest
+        description: BIND9 Docker image tag
+        default: "9.20-24.10_edge"
+      domain_name:
+        type: str
+        description: "Primary domain name for your zone (e.g., home.arpa)"
+        default: "home.arpa"
+      tsig_key_name:
+        type: str
+        description: "TSIG key name for secure zone transfers"
+        default: "transfer-key"
+      tsig_key_secret:
+        type: str
+        description: "TSIG key secret (base64, auto-generated if empty)"
+        default: ""
+        sensitive: true
+        autogenerated: true
+  ports:
+    vars:
+      ports_enabled:
+        default: true
+  network:
+    vars:
+      network_enabled:
+        default: false

+ 0 - 16
library/compose/cadvisor/compose.yaml.j2

@@ -1,16 +0,0 @@
-services:
-  cadvisor:
-    image: gcr.io/cadvisor/cadvisor:v0.52.1
-    container_name: cadvisor
-    ports:
-      - 8080:8080
-    volumes:
-      - /:/rootfs:ro
-      - /run:/run:ro
-      - /sys:/sys:ro
-      - /var/lib/docker/:/var/lib/docker:ro
-      - /dev/disk/:/dev/disk:ro
-    devices:
-      - /dev/kmsg
-    privileged: true
-    restart: unless-stopped

+ 0 - 19
library/compose/cadvisor/template.yaml

@@ -1,19 +0,0 @@
----
-kind: compose
-metadata:
-  name: Cadvisor
-  description: Docker compose setup for cadvisor
-  version: v0.52.1
-  author: Christian Lempa
-  date: '2025-09-28'
-  tags:
-  - cadvisor
-  - docker
-  - compose
-spec:
-  general:
-    vars:
-      cadvisor_version:
-        type: string
-        description: Cadvisor version
-        default: latest

+ 0 - 20
library/vagrant/hyperv/ubuntu/docker/Vagrantfile

@@ -1,20 +0,0 @@
-# -*- mode: ruby -*-
-# vi: set ft=ruby :
-
-Vagrant.configure("2") do |config|
-  config.vm.box = "generic/ubuntu2004"
-  config.vm.network "public_network"
-  config.vm.synced_folder ".", "/vagrant_data", disabled: true
-  config.vm.provider "hyperv"
-  config.vm.provider "hyperv" do |h|
-    h.enable_virtualization_extensions = false
-    h.linked_clone = true
-    h.memory = 2048
-    h.vmname = "ubuntu_docker_1"
-  end
-
-  config.vm.provision "ansible" do |a|
-    a.verbose = "v"
-    a.playbook = "playbook.yaml"
-  end
-end

+ 0 - 38
library/vagrant/hyperv/ubuntu/docker/playbook.yaml

@@ -1,38 +0,0 @@
----
-- name: Install Docker
-  hosts: all
-  become: true
-
-  tasks:
-    - name: Install prerequisites
-      ansible.builtin.apt:
-        name:
-          - apt-transport-https
-          - ca-certificates
-          - curl
-          - gnupg-agent
-          - software-properties-common
-        update_cache: true
-
-    - name: Add apt-key
-      ansible.builtin.apt_key:
-        url: https://download.docker.com/linux/ubuntu/gpg
-
-    - name: Add docker repo
-      ansible.builtin.apt_repository:
-        repo: deb https://download.docker.com/linux/ubuntu focal stable
-
-    - name: Install docker
-      ansible.builtin.apt:
-        name:
-          - docker-ce
-          - docker-ce-cli
-          - containerd.io
-          - docker-compose
-        update_cache: true
-
-    - name: Add user vagrant to group docker
-      ansible.builtin.user:
-        name: vagrant
-        groups: docker
-        append: true

+ 0 - 20
library/vagrant/hyperv/ubuntu/microk8s-installed/Vagrantfile

@@ -1,20 +0,0 @@
-# -*- mode: ruby -*-
-# vi: set ft=ruby :
-
-Vagrant.configure("2") do |config|
-  config.vm.box = "generic/ubuntu2004"
-  config.vm.network "public_network"
-  config.vm.synced_folder ".", "/vagrant_data", disabled: true
-  config.vm.provider "hyperv"
-  config.vm.provider "hyperv" do |h|
-    h.enable_virtualization_extensions = false
-    h.linked_clone = true
-    h.memory = 2048
-    h.vmname = "ubuntu_plain_1"
-  end
-
-  config.vm.provision "ansible" do |a|
-    a.verbose = "v"
-    a.playbook = "playbook.yaml"
-  end
-end

+ 0 - 16
library/vagrant/hyperv/ubuntu/microk8s-installed/playbook.yaml

@@ -1,16 +0,0 @@
----
-- name: Install microk8s
-  hosts: all
-  become: true
-
-  tasks:
-    - name: Install microk8s
-      community.general.snap:
-        classic: true
-        name: microk8s
-
-    - name: Add user vagrant to group microk8s
-      ansible.builtin.user:
-        name: vagrant
-        groups: microk8s
-        append: true

+ 0 - 22
library/vagrant/hyperv/ubuntu/plain-with-cockpit/Vagrantfile

@@ -1,22 +0,0 @@
-# -*- mode: ruby -*-
-# vi: set ft=ruby :
-
-Vagrant.configure("2") do |config|
-  config.vm.box = "generic/ubuntu2004"
-  config.vm.network "public_network", bridge: "BRIDGE"
-  config.vm.synced_folder ".", "/vagrant_data", disabled: true
-  config.vm.provider "hyperv"
-  config.vm.hostname = "ubuntu_plan-with-cockpit-1"
-
-  config.vm.provider "hyperv" do |h|
-    h.enable_virtualization_extensions = false
-    h.linked_clone = true
-    h.memory = 2048
-    h.vmname = "ubuntu_plan-with-cockpit-1"
-  end
-
-  config.vm.provision "ansible" do |a|
-    a.verbose = "v"
-    a.playbook = "playbook.yaml"
-  end
-end

+ 0 - 10
library/vagrant/hyperv/ubuntu/plain-with-cockpit/playbook.yaml

@@ -1,10 +0,0 @@
----
-- name: Install Cockpit
-  hosts: all
-  become: true
-
-  tasks:
-    - name: Install cockpit
-      ansible.builtin.apt:
-        name: cockpit
-        update_cache: true

+ 0 - 15
library/vagrant/hyperv/ubuntu/plain/Vagrantfile

@@ -1,15 +0,0 @@
-# -*- mode: ruby -*-
-# vi: set ft=ruby :
-
-Vagrant.configure("2") do |config|
-  config.vm.box = "generic/ubuntu2004"
-  config.vm.network "public_network"
-  config.vm.synced_folder ".", "/vagrant_data", disabled: true
-  config.vm.provider "hyperv"
-  config.vm.provider "hyperv" do |h|
-    h.enable_virtualization_extensions = false
-    h.linked_clone = true
-    h.memory = 2048
-    h.vmname = "ubuntu_plain_1"
-  end
-end

+ 0 - 38
library/vagrant/hyperv/ubuntu/ubuntu-cluster-plain/Vagrantfile

@@ -1,38 +0,0 @@
-# -*- mode: ruby -*-
-# vi: set ft=ruby :
-
-Vagrant.configure("2") do |config|
-
-    config.vm.define "node1", primary: true do |node1|
-
-        node1.vm.box = "generic/ubuntu2004"
-        node1.vm.network "public_network", bridge: "BRIDGE"
-        node1.vm.synced_folder ".", "/vagrant_data", disabled: true
-        node1.vm.provider "hyperv"
-        node1.vm.hostname = "node1"
-
-        node1.vm.provider "hyperv" do |h|
-            h.enable_virtualization_extensions = false
-            h.linked_clone = true
-            h.memory = 2048
-            h.vmname = "node1"
-        end
-    end
-
-    config.vm.define "node2" do |node2|
-
-        node2.vm.box = "generic/ubuntu2004"
-        node2.vm.network "public_network", bridge: "BRIDGE"
-        node2.vm.synced_folder ".", "/vagrant_data", disabled: true
-        node2.vm.provider "hyperv"
-        node2.vm.hostname = "node2"
-
-        node2.vm.provider "hyperv" do |h|
-            h.enable_virtualization_extensions = false
-            h.linked_clone = true
-            h.memory = 2048
-            h.vmname = "node2"
-        end
-    end
-
-end

+ 0 - 16
library/vagrant/kvm/ubuntu/Vagrantfile

@@ -1,16 +0,0 @@
-# -*- mode: ruby -*-
-# vi: set ft=ruby :
-
-Vagrant.configure("2") do |config|
-  config.vm.box = "generic/ubuntu2004"
-  config.vm.network :public_network,
-      :dev => "virbr0",
-      :mode => "bridge",
-      :type => "bridge"
-  config.vm.synced_folder ".", "/vagrant_data", disabled: true
-  config.vm.provider :libvirt do |libvirt|
-    libvirt.cpus = 2
-    libvirt.memory = 4096
-    libvirt.nested = true
-  end
-end

+ 2 - 15
renovate.json

@@ -149,28 +149,15 @@
     },
     {
       "customType": "regex",
-      "description": "Update Terraform provider versions",
+      "description": "Update Terraform/OpenTofu providers and modules in templates",
       "fileMatch": [
         "library/terraform/.+\\.tf$",
         "library/terraform/.+\\.j2$"
       ],
       "matchStrings": [
-        "source\\s*=\\s*[\"'](?<depName>[^\"']+)[\"']\\s*\\n\\s*version\\s*=\\s*[\"'](?<currentValue>[^\"']+)[\"']"
+        "(?:source|module)\\s*=\\s*[\"'](?<depName>[^\"']+)[\"'](?:[\\s\\S]*?)version\\s*=\\s*[\"'](?<currentValue>[^\"']+)[\"']"
       ],
       "datasourceTemplate": "terraform-provider"
-    },
-    {
-      "customType": "regex",
-      "description": "Update Terraform module versions",
-      "fileMatch": [
-        "library/terraform/.+\\.tf$",
-        "library/terraform/.+\\.j2$"
-      ],
-      "matchStrings": [
-        "source\\s*=\\s*[\"'](?<depName>[^\"'?]+)(?:\\?ref=(?<currentValue>[^\"']+))?[\"']"
-      ],
-      "datasourceTemplate": "github-tags",
-      "versioningTemplate": "semver"
     }
   ],
   "postUpgradeTasks": {

+ 133 - 44
scripts/install.sh

@@ -1,19 +1,32 @@
 #!/usr/bin/env bash
 set -euo pipefail
 
-REPO_URL="${REPO_URL:-https://github.com/christianlempa/boilerplates.git}"
-BRANCH="${BRANCH:-main}"
+REPO_OWNER="christianlempa"
+REPO_NAME="boilerplates"
+VERSION="${VERSION:-latest}"
 TARGET_DIR="${TARGET_DIR:-$HOME/boilerplates}"
 
 usage() {
   cat <<USAGE
-Usage: install.sh [--path DIR] [--repo URL] [--branch BRANCH]
+Usage: install.sh [OPTIONS]
+
+Install the boilerplates CLI from GitHub releases.
 
 Options:
-  --path DIR      Installation directory (default: \"$HOME/boilerplates\")
-  --repo URL      Git repository URL (default: $REPO_URL)
-  --branch NAME   Git branch or tag to checkout (default: $BRANCH)
-  -h, --help      Show this message
+  --path DIR        Installation directory (default: "$HOME/boilerplates")
+  --version VER     Version to install (default: "latest")
+                    Examples: latest, v1.0.0, v0.0.1
+  -h, --help        Show this message
+
+Examples:
+  # Install latest version
+  curl -fsSL https://raw.githubusercontent.com/christianlempa/boilerplates/main/scripts/install.sh | bash
+
+  # Install specific version
+  curl -fsSL https://raw.githubusercontent.com/christianlempa/boilerplates/main/scripts/install.sh | bash -s -- --version v1.0.0
+
+  # Install to custom directory
+  curl -fsSL https://raw.githubusercontent.com/christianlempa/boilerplates/main/scripts/install.sh | bash -s -- --path ~/my-boilerplates
 USAGE
 }
 
@@ -42,14 +55,9 @@ parse_args() {
         TARGET_DIR="$2"
         shift 2
         ;;
-      --repo)
-        [[ $# -lt 2 ]] && error "--repo requires a value"
-        REPO_URL="$2"
-        shift 2
-        ;;
-      --branch)
-        [[ $# -lt 2 ]] && error "--branch requires a value"
-        BRANCH="$2"
+      --version)
+        [[ $# -lt 2 ]] && error "--version requires a value"
+        VERSION="$2"
         shift 2
         ;;
       -h|--help)
@@ -70,34 +78,89 @@ print(os.path.abspath(os.path.expanduser(sys.argv[1])))
 PY
 }
 
-update_repo() {
-  log "Updating existing repository at $TARGET_DIR"
-  git -C "$TARGET_DIR" fetch --tags origin "$BRANCH"
-  git -C "$TARGET_DIR" checkout "$BRANCH"
-  git -C "$TARGET_DIR" pull --ff-only origin "$BRANCH"
-}
-
-clone_repo() {
-  log "Cloning $REPO_URL into $TARGET_DIR"
-  git clone --branch "$BRANCH" "$REPO_URL" "$TARGET_DIR"
+get_latest_release() {
+  local api_url="https://api.github.com/repos/$REPO_OWNER/$REPO_NAME/releases/latest"
+  local release_tag
+  
+  log "Fetching latest release information..."
+  
+  if command -v curl >/dev/null 2>&1; then
+    release_tag=$(curl -fsSL "$api_url" | grep '"tag_name":' | sed -E 's/.*"tag_name": "([^"]+)".*/\1/')
+  elif command -v wget >/dev/null 2>&1; then
+    release_tag=$(wget -qO- "$api_url" | grep '"tag_name":' | sed -E 's/.*"tag_name": "([^"]+)".*/\1/')
+  else
+    error "Neither curl nor wget found. Please install one of them."
+  fi
+  
+  if [[ -z "$release_tag" ]]; then
+    error "Failed to fetch latest release tag"
+  fi
+  
+  echo "$release_tag"
 }
 
-ensure_repo() {
-  if [[ -d "$TARGET_DIR/.git" ]]; then
-    local current_remote
-    if current_remote=$(git -C "$TARGET_DIR" remote get-url origin 2>/dev/null); then
-      if [[ "$current_remote" != "$REPO_URL" ]]; then
-        log "Updating origin remote to $REPO_URL"
-        git -C "$TARGET_DIR" remote set-url origin "$REPO_URL"
-      fi
-    fi
-    update_repo
-  elif [[ -e "$TARGET_DIR" ]]; then
-    error "Target path $TARGET_DIR exists but is not a git repository"
+download_release() {
+  local version="$1"
+  local download_url
+  
+  # If version is "latest", resolve it to the actual version tag
+  if [[ "$version" == "latest" ]]; then
+    version=$(get_latest_release)
+    log "Latest version is $version"
+  fi
+  
+  # Ensure version has 'v' prefix for GitHub releases
+  if [[ ! "$version" =~ ^v ]]; then
+    version="v$version"
+  fi
+  
+  download_url="https://github.com/$REPO_OWNER/$REPO_NAME/archive/refs/tags/$version.tar.gz"
+  
+  log "Downloading release $version..."
+  log "URL: $download_url"
+  
+  local temp_dir
+  temp_dir=$(mktemp -d)
+  trap 'rm -rf "$temp_dir"' EXIT
+  
+  local archive_file="$temp_dir/boilerplates.tar.gz"
+  
+  if command -v curl >/dev/null 2>&1; then
+    curl -fsSL -o "$archive_file" "$download_url" || error "Failed to download release"
+  elif command -v wget >/dev/null 2>&1; then
+    wget -qO "$archive_file" "$download_url" || error "Failed to download release"
   else
-    mkdir -p "$(dirname "$TARGET_DIR")"
-    clone_repo
+    error "Neither curl nor wget found. Please install one of them."
+  fi
+  
+  log "Extracting release..."
+  
+  # Remove existing installation if present
+  if [[ -d "$TARGET_DIR" ]]; then
+    log "Removing existing installation at $TARGET_DIR"
+    rm -rf "$TARGET_DIR"
   fi
+  
+  # Create parent directory
+  mkdir -p "$(dirname "$TARGET_DIR")"
+  
+  # Extract with strip-components to remove the top-level directory
+  tar -xzf "$archive_file" -C "$(dirname "$TARGET_DIR")"
+  
+  # Rename extracted directory to target name
+  local extracted_dir
+  extracted_dir=$(dirname "$TARGET_DIR")/"$REPO_NAME-${version#v}"
+  
+  if [[ ! -d "$extracted_dir" ]]; then
+    error "Extraction failed: expected directory $extracted_dir not found"
+  fi
+  
+  mv "$extracted_dir" "$TARGET_DIR"
+  
+  log "Release extracted to $TARGET_DIR"
+  
+  # Store version info
+  echo "$version" > "$TARGET_DIR/.installed-version"
 }
 
 ensure_pipx() {
@@ -129,30 +192,56 @@ pipx_install() {
   "${PIPX_CMD}" install --editable --force "$TARGET_DIR"
 }
 
+check_current_version() {
+  if [[ -f "$TARGET_DIR/.installed-version" ]]; then
+    cat "$TARGET_DIR/.installed-version"
+  else
+    echo "unknown"
+  fi
+}
+
 main() {
   parse_args "$@"
-  require_command git
   require_command python3
+  require_command tar
 
   TARGET_DIR="$(make_absolute_path)"
+  
+  # Check if already installed
+  local current_version
+  current_version=$(check_current_version)
+  
+  if [[ "$current_version" != "unknown" ]]; then
+    log "Currently installed version: $current_version"
+  fi
 
-  ensure_repo
+  download_release "$VERSION"
   ensure_pipx
   pipx_install
 
   local pipx_info
   pipx_info=$("${PIPX_CMD}" list --short 2>/dev/null | grep -E '^boilerplates' || echo "boilerplates (not detected)")
+  
+  local installed_version
+  installed_version=$(check_current_version)
 
   cat <<EOF2
 
-Installation complete.
-Repository: $TARGET_DIR
+✓ Installation complete!
+
+Version: $installed_version
+Location: $TARGET_DIR
 pipx environment: $pipx_info
 
 To use the CLI:
   boilerplate --help
+  boilerplate --version
+
+To update to the latest version:
+  curl -fsSL https://raw.githubusercontent.com/$REPO_OWNER/$REPO_NAME/main/scripts/install.sh | bash
 
-Re-run this script anytime to fetch the latest changes and refresh dependencies.
+To install a specific version:
+  curl -fsSL https://raw.githubusercontent.com/$REPO_OWNER/$REPO_NAME/main/scripts/install.sh | bash -s -- --version v1.0.0
 EOF2
 }