Przeglądaj źródła

chore: merge release/v0.0.7 into main

xcad 3 miesięcy temu
rodzic
commit
5765491012

+ 3 - 42
.editorconfig

@@ -9,57 +9,18 @@ indent_style = space
 insert_final_newline = true
 trim_trailing_whitespace = true
 
-[/docker-compose/nginx/**/*.conf]
-indent_size = 2
-
-[/docker-compose/wazuh/**/*.conf]
+[*.json]
 indent_size = 2
 
-[*.css]
-indent_size = 2
-
-[{*.go,go.mod}]
-indent_style = tab
-indent_size = unset
-
-[*.hcl]
-indent_size = unset
-
-[*{.min,.min.*,-min}.js]
-charset = unset
-indent_size = unset
-indent_style = unset
-insert_final_newline = unset
-max_line_length = off
-
-[*.json]
+[*.{js,jsx,ts,tsx}]
 indent_size = 2
 
 [*.md]
 indent_size = unset
 trim_trailing_whitespace = false
 
-[*.nix]
-indent_size = 2
-
 [*.py]
-indent_size = 2
-
-[*.tf]
-indent_size = unset
-
-[/packer/**/http/user-data]
-indent_size = 2
+indent_size = 4
 
 [{*.{yaml,yml},.yamllint}]
 indent_size = 2
-
-[*.xml]
-indent_size = 2
-
-[Makefile]
-indent_style = tab
-indent_size = unset
-
-[Vagrantfile]
-indent_size = 2

+ 23 - 40
.github/workflows/release-create-cli-release.yaml

@@ -28,54 +28,37 @@ jobs:
           echo "tag=$GITHUB_REF_NAME" >> $GITHUB_OUTPUT
           echo "Extracted version: $VERSION from tag $GITHUB_REF_NAME"
 
-      - name: Update version in pyproject.toml
+      - name: Validate version consistency
         run: |
-          VERSION="${{ steps.version.outputs.version }}"
-          sed -i "s/^version = .*/version = \"$VERSION\"/" pyproject.toml
-          echo "✓ Updated pyproject.toml with version $VERSION"
+          TAG_VERSION="${{ steps.version.outputs.version }}"
 
-      - name: Update version in cli/__main__.py
-        run: |
-          VERSION="${{ steps.version.outputs.version }}"
-          sed -i "s/^__version__ = .*/__version__ = \"$VERSION\"/" cli/__main__.py
-          echo "✓ Updated cli/__main__.py with version $VERSION"
-
-      - name: Verify changes
-        run: |
-          echo "=== pyproject.toml ==="
-          grep "^version" pyproject.toml
-          echo ""
-          echo "=== cli/__main__.py ==="
-          grep "^__version__" cli/__main__.py
-
-      - name: Commit and update tag
-        run: |
-          git config --local user.email "github-actions[bot]@users.noreply.github.com"
-          git config --local user.name "github-actions[bot]"
+          # Extract version from pyproject.toml
+          PYPROJECT_VERSION=$(grep '^version = ' pyproject.toml | sed 's/version = "\(.*\)"/\1/')
 
-          # Add changes
-          git add pyproject.toml cli/__main__.py
+          # Extract version from cli/__init__.py
+          CLI_VERSION=$(grep '^__version__ = ' cli/__init__.py | sed 's/__version__ = "\(.*\)"/\1/')
 
-          # Check if there are changes to commit
-          if git diff --staged --quiet; then
-            echo "No version changes needed"
-          else
-            # Commit the version updates
-            git commit -m "chore: bump version to ${{ steps.version.outputs.version }}"
-
-            # Delete the tag locally and remotely
-            git tag -d ${{ steps.version.outputs.tag }}
-            git push origin :refs/tags/${{ steps.version.outputs.tag }}
-
-            # Recreate the tag pointing to the new commit
-            git tag -a ${{ steps.version.outputs.tag }} -m "Release ${{ steps.version.outputs.tag }}"
+          echo "Tag version:        $TAG_VERSION"
+          echo "pyproject.toml:     $PYPROJECT_VERSION"
+          echo "cli/__init__.py:    $CLI_VERSION"
+          echo ""
 
-            # Push the new tag
-            git push origin ${{ steps.version.outputs.tag }}
+          # Check if all versions match
+          if [ "$TAG_VERSION" != "$PYPROJECT_VERSION" ]; then
+            echo "Error: Tag version ($TAG_VERSION) does not match pyproject.toml version ($PYPROJECT_VERSION)"
+            echo "Please update pyproject.toml to version $TAG_VERSION before creating the release."
+            exit 1
+          fi
 
-            echo "✓ Tag ${{ steps.version.outputs.tag }} updated to point to version bump commit"
+          if [ "$TAG_VERSION" != "$CLI_VERSION" ]; then
+            echo "Error: Tag version ($TAG_VERSION) does not match cli/__init__.py version ($CLI_VERSION)"
+            echo "Please update cli/__init__.py to version $TAG_VERSION before creating the release."
+            exit 1
           fi
 
+          echo "Version consistency check passed"
+          echo "All version strings match: $TAG_VERSION"
+
       - name: Set up Python
         uses: actions/setup-python@v6
         with:

+ 0 - 1
.gitignore

@@ -29,4 +29,3 @@ tests/
 config.yaml
 
 *~
-

+ 184 - 13
AGENTS.md

@@ -21,8 +21,7 @@ python3 -m cli --log-level DEBUG compose list
 
 Should **always** happen before pushing anything to the repository.
 
-- Use `yamllint` for YAML files and `pylint` for Python code.
-- Use `2` spaces for YAML and Python indentation.
+- Use `yamllint` for YAML files and `ruff` for Python code.
 
 ### Project Management and Git
 
@@ -60,25 +59,86 @@ The project is stored in a public GitHub Repository, use issues, and branches fo
 - `cli/core/prompt.py` - Interactive CLI prompts using rich library
 - `cli/core/registry.py` - Central registry for module classes (auto-discovers modules)
 - `cli/core/repo.py` - Repository management for syncing git-based template libraries
-- `cli/core/sections.py` - Dataclass for VariableSection (stores section metadata and variables)
+- `cli/core/section.py` - Dataclass for VariableSection (stores section metadata and variables)
 - `cli/core/template.py` - Template Class for parsing, managing and rendering templates
-- `cli/core/variables.py` - Dataclass for Variable (stores variable metadata and values)
+- `cli/core/variable.py` - Dataclass for Variable (stores variable metadata and values)
+- `cli/core/validators.py` - Semantic validators for template content (Docker Compose, YAML, etc.)
+- `cli/core/version.py` - Version comparison utilities for semantic versioning
 
 ### Modules
 
-- `cli/modules/compose.py` - Docker Compose-specific functionality
-**(Work in Progress)**
-- `cli/modules/terraform.py` - Terraform-specific functionality
-- `cli/modules/docker.py` - Docker-specific functionality
-- `cli/modules/ansible.py` - Ansible-specific functionality
-- `cli/modules/kubernetes.py` - Kubernetes-specific functionality
-- `cli/modules/packer.py` - Packer-specific functionality
+**Module Structure:**
+Modules can be either single files or packages:
+- **Single file**: `cli/modules/modulename.py` (for simple modules)
+- **Package**: `cli/modules/modulename/` with `__init__.py` (for multi-schema modules)
+
+**Creating Modules:**
+- Subclass `Module` from `cli/core/module.py`
+- Define `name`, `description`, and `schema_version` class attributes
+- For multi-schema modules: organize specs in separate files (e.g., `spec_v1_0.py`, `spec_v1_1.py`)
+- Call `registry.register(YourModule)` at module bottom
+- Auto-discovered and registered at CLI startup
+
+**Module Spec:**
+Optional class attribute for module-wide variable defaults. Example:
+```python
+spec = VariableCollection.from_dict({
+  "general": {"vars": {"common_var": {"type": "str", "default": "value"}}},
+  "networking": {"title": "Network", "toggle": "net_enabled", "vars": {...}}
+})
+```
+
+**Multi-Schema Modules:**
+For modules supporting multiple schema versions, use package structure:
+```
+cli/modules/compose/
+  __init__.py          # Module class, loads appropriate spec
+  spec_v1_0.py         # Schema 1.0 specification
+  spec_v1_1.py         # Schema 1.1 specification
+```
+
+**Existing Modules:**
+- `cli/modules/compose/` - Docker Compose package with schema 1.0 and 1.1 support
+  - `spec_v1_0.py` - Basic compose spec
+  - `spec_v1_1.py` - Extended with network_mode, swarm support
+
+**(Work in Progress):** terraform, docker, ansible, kubernetes, packer modules
 
 ### LibraryManager
 
 - Loads libraries from config file
 - Stores Git Libraries under: `~/.config/boilerplates/libraries/{name}/`
 - Uses sparse-checkout to clone only template directories for git-based libraries (avoiding unnecessary files)
+- Supports two library types: **git** (synced from repos) and **static** (local directories)
+- Priority determined by config order (first = highest)
+
+**Library Types:**
+- `git`: Requires `url`, `branch`, `directory` fields
+- `static`: Requires `path` field (absolute or relative to config)
+
+**Duplicate Handling:**
+- Within same library: Raises `DuplicateTemplateError`
+- Across libraries: Uses qualified IDs (e.g., `alloy.default`, `alloy.local`)
+- Simple IDs use priority: `compose show alloy` loads from first library
+- Qualified IDs target specific library: `compose show alloy.local`
+
+**Config Example:**
+```yaml
+libraries:
+  - name: default       # Highest priority (checked first)
+    type: git
+    url: https://github.com/user/templates.git
+    branch: main
+    directory: library
+  - name: local         # Lower priority
+    type: static
+    path: ~/my-templates
+    url: ''             # Backward compatibility fields
+    branch: main
+    directory: .
+```
+
+**Note:** Static libraries include dummy `url`/`branch`/`directory` fields for backward compatibility with older CLI versions.
 
 ### ConfigManager
 
@@ -100,6 +160,7 @@ Requires `template.yaml` or `template.yml` with metadata and variables:
 ```yaml
 ---
 kind: compose
+schema: "1.0"  # Optional: Defaults to 1.0 if not specified
 metadata:
   name: My Nginx Template
   description: >
@@ -123,6 +184,71 @@ spec:
         default: latest
 ```
 
+### Template Schema Versioning
+
+Templates and modules use schema versioning to ensure compatibility. Each module defines a supported schema version, and templates declare which schema version they use.
+
+```yaml
+---
+kind: compose
+schema: "1.0"  # Defaults to 1.0 if not specified
+metadata:
+  name: My Template
+  version: 1.0.0
+  # ... other metadata fields
+spec:
+  # ... variable specifications
+```
+
+**How It Works:**
+- **Module Schema Version**: Each module defines `schema_version` (e.g., "1.1")
+- **Module Spec Loading**: Modules load appropriate spec based on template's schema version
+- **Template Schema Version**: Each template declares `schema` at the top level (defaults to "1.0")
+- **Compatibility Check**: Template schema ≤ Module schema → Compatible
+- **Incompatibility**: Template schema > Module schema → `IncompatibleSchemaVersionError`
+
+**Behavior:**
+- Templates without `schema` field default to "1.0" (backward compatible)
+- Old templates (schema 1.0) work with newer modules (schema 1.1)
+- New templates (schema 1.2) fail on older modules (schema 1.1) with clear error
+- Version comparison uses 2-level versioning (major.minor format)
+
+**When to Use:**
+- Increment module schema version when adding new features (new variable types, sections, etc.)
+- Set template schema when using features from a specific schema
+- Example: Template using new variable type added in schema 1.1 should set `schema: "1.1"`
+
+**Single-File Module Example:**
+```python
+class SimpleModule(Module):
+  name = "simple"
+  description = "Simple module"
+  schema_version = "1.0"
+  spec = VariableCollection.from_dict({...})  # Single spec
+```
+
+**Multi-Schema Module Example:**
+```python
+# cli/modules/compose/__init__.py
+class ComposeModule(Module):
+  name = "compose"
+  description = "Manage Docker Compose configurations"
+  schema_version = "1.1"  # Highest schema version supported
+  
+  def get_spec(self, template_schema: str) -> VariableCollection:
+    """Load spec based on template schema version."""
+    if template_schema == "1.0":
+      from .spec_v1_0 import get_spec
+    elif template_schema == "1.1":
+      from .spec_v1_1 import get_spec
+    return get_spec()
+```
+
+**Version Management:**
+- CLI version is defined in `cli/__init__.py` as `__version__`
+- pyproject.toml version must match `__version__` for releases
+- GitHub release workflow validates version consistency
+
 ### Template Files
 
 - **Jinja2 Templates (`.j2`)**: Rendered by Jinja2, `.j2` extension removed in output. Support `{% include %}` and `{% import %}`.
@@ -137,7 +263,23 @@ spec:
 3. User `config.yaml` (overrides template and module defaults)
 4. CLI `--var` (highest priority)
 
-**Key Features:**
+**Variable Types:**
+- `str` (default), `int`, `float`, `bool`
+- `email` - Email validation with regex
+- `url` - URL validation (requires scheme and host)
+- `hostname` - Hostname/domain validation
+- `enum` - Choice from `options` list
+
+**Variable Properties:**
+- `sensitive: true` - Masked in prompts/display (e.g., passwords)
+- `autogenerated: true` - Auto-generates value if empty (shows `*auto` placeholder)
+- `default` - Default value
+- `description` - Variable description
+- `prompt` - Custom prompt text (overrides description)
+- `extra` - Additional help text
+- `options` - List of valid values (for enum type)
+
+**Section Features:**
 - **Required Sections**: Mark with `required: true` (general is implicit). Users must provide all values.
 - **Toggle Settings**: Conditional sections via `toggle: "bool_var_name"`. If false, section is skipped.
 - **Dependencies**: Use `needs: "section_name"` or `needs: ["sec1", "sec2"]`. Dependent sections only shown when dependencies are enabled. Auto-validated (detects circular/missing/self dependencies). Topologically sorted.
@@ -171,13 +313,42 @@ spec:
         default: myresolver
 ```
 
+## Validation
+
+**Jinja2 Validation:**
+- Templates validated for Jinja2 syntax errors during load
+- Checks for undefined variables (variables used but not declared in spec)
+- Built into Template class
+
+**Semantic Validation:**
+- Validator registry system in `cli/core/validators.py`
+- Extensible: `ContentValidator` abstract base class
+- Built-in validators: `DockerComposeValidator`, `YAMLValidator`
+- Validates rendered output (YAML structure, Docker Compose schema, etc.)
+- Triggered via `compose validate` command with `--semantic` flag (enabled by default)
+
 ## Prompt
 
 Uses `rich` library for interactive prompts. Supports:
 - Text input
-- Password input (masked)
+- Password input (masked, for `sensitive: true` variables)
 - Selection from list (single/multiple)
 - Confirmation (yes/no)
 - Default values
+- Autogenerated variables (show `*auto` placeholder, generate on render)
 
 To skip the prompt use the `--no-interactive` flag, which will use defaults or empty values.
+
+## Commands
+
+**Standard Module Commands** (auto-registered for all modules):
+- `list` - List all templates
+- `search <query>` - Search templates by ID
+- `show <id>` - Show template details
+- `generate <id> [directory]` - Generate from template (supports `--dry-run`, `--var`, `--no-interactive`)
+- `validate [id]` - Validate templates (Jinja2 + semantic)
+- `defaults` - Manage config defaults (`get`, `set`, `rm`, `clear`, `list`)
+
+**Core Commands:**
+- `repo sync` - Sync git-based libraries
+- `repo list` - List configured libraries

+ 40 - 1
CHANGELOG.md

@@ -7,6 +7,44 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
 
 ## [Unreleased]
 
+## [0.0.7] - 2025-10-28
+
+### Added
+- Multiple Library Support (#1314) for git and local libraries
+- Multi-Schema Module Support and Backward Compatibility (Schema-1.0)
+- Schema-1.1 `network_mode` with options: bridge, host, macvlan
+- Schema-1.1 `swarm` module support
+- Variable-level and Section-level depenendencies `needs` with multiple values support
+- Optional Variables `optional: true` to allow empty/None values
+- PEP 8 formatting alignment
+- CLI variable dependency validation - raises error when CLI-provided variables have unsatisfied dependencies
+- Support for required variables independent of section state (#1355)
+  - Variables can now be marked with `required: true` in template specs
+  - Required variables are always prompted, validated, and included in rendering
+  - Display shows yellow `(required)` indicator for required variables
+  - Required variables from disabled sections are still collected and available
+
+### Changed
+- Schema-1.1 Unified Docker Swarm Placement (#1359) - Simplified swarm placement constraints into a single variable
+- Refactored compose module from single file to package structure
+- Dependency validation moved to `validate_all()` for better error reporting
+- Schema-1.1 removed `network_enabled`, `ports_enabled` and `database_enabled` toggles (no longer optional)
+- Improved error handling and display output consistency
+- Updated dependency PyYAML to v6.0.3 (Python 3.14 compatibility)
+- Updated dependency rich to v14.2.0 (Python 3.14 compatibility)
+- Pinned all dependencies to specific tested versions for consistent installations
+
+### Fixed
+- Required sections now ignore toggle and are always enabled
+- Module spec loading based on correct template schema version
+- Interactive prompts now skip all variables (including required) when parent section is disabled
+- Absolute paths without leading slash treated as relative paths in generate command (#1357)
+  - Paths like `Users/xcad/Projects/test` are now correctly normalized to `/Users/xcad/Projects/test`
+  - Supports common Unix/Linux root directories: Users/, home/, usr/, opt/, var/, tmp/
+- Repository fetch fails when library directory already exists (#1279)
+- **Critical:** Python 3.9 compatibility - removed Context type annotations causing RuntimeError
+- Context access now uses click.get_current_context() for better compatibility
+
 ## [0.0.6] - 2025-10-14
 
 ### Changed
@@ -26,6 +64,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
 
 Initial public release with core CLI functionality.
 
-[unreleased]: https://github.com/christianlempa/boilerplates/compare/v0.0.6...HEAD
+[unreleased]: https://github.com/christianlempa/boilerplates/compare/v0.0.7...HEAD
+[0.0.7]: https://github.com/christianlempa/boilerplates/compare/v0.0.6...v0.0.7
 [0.0.6]: https://github.com/christianlempa/boilerplates/releases/tag/v0.0.6
 [0.0.4]: https://github.com/christianlempa/boilerplates/releases/tag/v0.0.4

+ 1 - 1
cli/__init__.py

@@ -2,6 +2,6 @@
 Boilerplates CLI - A sophisticated command-line tool for managing infrastructure boilerplates.
 """
 
-__version__ = "0.0.1"
+__version__ = "0.0.7"
 __author__ = "Christian Lempa"
 __description__ = "CLI tool for managing infrastructure boilerplates"

+ 182 - 162
cli/__main__.py

@@ -3,6 +3,7 @@
 Main entry point for the Boilerplates CLI application.
 This file serves as the primary executable when running the CLI.
 """
+
 from __future__ import annotations
 
 import importlib
@@ -11,190 +12,209 @@ import pkgutil
 import sys
 from pathlib import Path
 from typing import Optional
-from typer import Typer, Context, Option
+from typer import Typer, Option
 from rich.console import Console
 import cli.modules
 from cli.core.registry import registry
 from cli.core import repo
+from cli import __version__
 # Using standard Python exceptions instead of custom ones
 
-# NOTE: Placeholder version - will be overwritten by release script (.github/workflows/release.yaml)
-__version__ = "0.0.0"
-
 app = Typer(
-  help="CLI tool for managing infrastructure boilerplates.\n\n[dim]Easily generate, customize, and deploy templates for Docker Compose, Terraform, Kubernetes, and more.\n\n [white]Made with 💜 by [bold]Christian Lempa[/bold]",
-  add_completion=True,
-  rich_markup_mode="rich",
+    help="CLI tool for managing infrastructure boilerplates.\n\n[dim]Easily generate, customize, and deploy templates for Docker Compose, Terraform, Kubernetes, and more.\n\n [white]Made with 💜 by [bold]Christian Lempa[/bold]",
+    add_completion=True,
+    rich_markup_mode="rich",
 )
 console = Console()
 
+
 def setup_logging(log_level: str = "WARNING") -> None:
-  """Configure the logging system with the specified log level.
-  
-  Args:
-      log_level: The logging level (DEBUG, INFO, WARNING, ERROR, CRITICAL)
-  
-  Raises:
-      ValueError: If the log level is invalid
-      RuntimeError: If logging configuration fails
-  """
-  numeric_level = getattr(logging, log_level.upper(), None)
-  if not isinstance(numeric_level, int):
-    raise ValueError(
-      f"Invalid log level '{log_level}'. Valid levels: DEBUG, INFO, WARNING, ERROR, CRITICAL"
-    )
-  
-  try:
-    logging.basicConfig(
-      level=numeric_level,
-      format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
-      datefmt='%Y-%m-%d %H:%M:%S'
-    )
+    """Configure the logging system with the specified log level.
+
+    Args:
+        log_level: The logging level (DEBUG, INFO, WARNING, ERROR, CRITICAL)
+
+    Raises:
+        ValueError: If the log level is invalid
+        RuntimeError: If logging configuration fails
+    """
+    numeric_level = getattr(logging, log_level.upper(), None)
+    if not isinstance(numeric_level, int):
+        raise ValueError(
+            f"Invalid log level '{log_level}'. Valid levels: DEBUG, INFO, WARNING, ERROR, CRITICAL"
+        )
+
+    try:
+        logging.basicConfig(
+            level=numeric_level,
+            format="%(asctime)s - %(name)s - %(levelname)s - %(message)s",
+            datefmt="%Y-%m-%d %H:%M:%S",
+        )
+
+        logger = logging.getLogger(__name__)
+        logger.setLevel(numeric_level)
+    except Exception as e:
+        raise RuntimeError(f"Failed to configure logging: {e}")
 
-    logger = logging.getLogger(__name__)
-    logger.setLevel(numeric_level)
-  except Exception as e:
-    raise RuntimeError(f"Failed to configure logging: {e}")
 
 @app.callback(invoke_without_command=True)
 def main(
-  version: Optional[bool] = Option(
-    None,
-    "--version",
-    "-v",
-    help="Show the application version and exit.",
-    is_flag=True,
-    callback=lambda v: console.print(f"boilerplates version {__version__}") or sys.exit(0) if v else None,
-    is_eager=True,
-  ),
-  log_level: Optional[str] = Option(
-    None,
-    "--log-level",
-    help="Set the logging level (DEBUG, INFO, WARNING, ERROR, CRITICAL). If omitted, logging is disabled."
-  )
+    version: Optional[bool] = Option(
+        None,
+        "--version",
+        "-v",
+        help="Show the application version and exit.",
+        is_flag=True,
+        callback=lambda v: console.print(f"boilerplates version {__version__}")
+        or sys.exit(0)
+        if v
+        else None,
+        is_eager=True,
+    ),
+    log_level: Optional[str] = Option(
+        None,
+        "--log-level",
+        help="Set the logging level (DEBUG, INFO, WARNING, ERROR, CRITICAL). If omitted, logging is disabled.",
+    ),
 ) -> None:
-  """CLI tool for managing infrastructure boilerplates."""
-  # Disable logging by default; only enable when user provides --log-level
-  if log_level:
-    # Re-enable logging and configure
-    logging.disable(logging.NOTSET)
-    setup_logging(log_level)
-  else:
-    # Silence all logging (including third-party) unless user explicitly requests it
-    logging.disable(logging.CRITICAL)
-  
-  # Get context without type annotation (compatible with all Typer versions)
-  import click
-  ctx = click.get_current_context()
-  
-  # Store log level in context for potential use by other commands
-  ctx.ensure_object(dict)
-  ctx.obj['log_level'] = log_level
-  
-  # If no subcommand is provided, show help and friendly intro
-  if ctx.invoked_subcommand is None:
-    console.print(ctx.get_help())
-    sys.exit(0)
+    """CLI tool for managing infrastructure boilerplates."""
+    # Disable logging by default; only enable when user provides --log-level
+    if log_level:
+        # Re-enable logging and configure
+        logging.disable(logging.NOTSET)
+        setup_logging(log_level)
+    else:
+        # Silence all logging (including third-party) unless user explicitly requests it
+        logging.disable(logging.CRITICAL)
+
+    # Get context without type annotation (compatible with all Typer versions)
+    import click
+
+    ctx = click.get_current_context()
+
+    # Store log level in context for potential use by other commands
+    ctx.ensure_object(dict)
+    ctx.obj["log_level"] = log_level
+
+    # If no subcommand is provided, show help and friendly intro
+    if ctx.invoked_subcommand is None:
+        console.print(ctx.get_help())
+        sys.exit(0)
+
 
 def init_app() -> None:
-  """Initialize the application by discovering and registering modules.
-  
-  Raises:
-      ImportError: If critical module import operations fail
-      RuntimeError: If application initialization fails
-  """
-  logger = logging.getLogger(__name__)
-  failed_imports = []
-  failed_registrations = []
-  
-  try:
-    # Auto-discover and import all modules
-    modules_path = Path(cli.modules.__file__).parent
-    logger.debug(f"Discovering modules in {modules_path}")
-    
-    for finder, name, ispkg in pkgutil.iter_modules([str(modules_path)]):
-      if not ispkg and not name.startswith('_') and name != 'base':
+    """Initialize the application by discovering and registering modules.
+
+    Raises:
+        ImportError: If critical module import operations fail
+        RuntimeError: If application initialization fails
+    """
+    logger = logging.getLogger(__name__)
+    failed_imports = []
+    failed_registrations = []
+
+    try:
+        # Auto-discover and import all modules
+        modules_path = Path(cli.modules.__file__).parent
+        logger.debug(f"Discovering modules in {modules_path}")
+
+        for finder, name, ispkg in pkgutil.iter_modules([str(modules_path)]):
+            # Import both module files and packages (for multi-schema modules)
+            if not name.startswith("_") and name != "base":
+                try:
+                    logger.debug(
+                        f"Importing module: {name} ({'package' if ispkg else 'file'})"
+                    )
+                    importlib.import_module(f"cli.modules.{name}")
+                except ImportError as e:
+                    error_info = f"Import failed for '{name}': {str(e)}"
+                    failed_imports.append(error_info)
+                    logger.warning(error_info)
+                except Exception as e:
+                    error_info = f"Unexpected error importing '{name}': {str(e)}"
+                    failed_imports.append(error_info)
+                    logger.error(error_info)
+
+        # Register core repo command
         try:
-          logger.debug(f"Importing module: {name}")
-          importlib.import_module(f"cli.modules.{name}")
-        except ImportError as e:
-          error_info = f"Import failed for '{name}': {str(e)}"
-          failed_imports.append(error_info)
-          logger.warning(error_info)
+            logger.debug("Registering repo command")
+            repo.register_cli(app)
         except Exception as e:
-          error_info = f"Unexpected error importing '{name}': {str(e)}"
-          failed_imports.append(error_info)
-          logger.error(error_info)
-    
-    # Register core repo command
-    try:
-      logger.debug("Registering repo command")
-      repo.register_cli(app)
+            error_info = f"Repo command registration failed: {str(e)}"
+            failed_registrations.append(error_info)
+            logger.warning(error_info)
+
+        # Register template-based modules with app
+        module_classes = list(registry.iter_module_classes())
+        logger.debug(f"Registering {len(module_classes)} template-based modules")
+
+        for name, module_cls in module_classes:
+            try:
+                logger.debug(f"Registering module class: {module_cls.__name__}")
+                module_cls.register_cli(app)
+            except Exception as e:
+                error_info = (
+                    f"Registration failed for '{module_cls.__name__}': {str(e)}"
+                )
+                failed_registrations.append(error_info)
+                # Log warning but don't raise exception for individual module failures
+                logger.warning(error_info)
+                console.print(f"[yellow]Warning:[/yellow] {error_info}")
+
+        # If we have no modules registered at all, that's a critical error
+        if not module_classes and not failed_imports:
+            raise RuntimeError("No modules found to register")
+
+        # Log summary
+        successful_modules = len(module_classes) - len(failed_registrations)
+        logger.info(
+            f"Application initialized: {successful_modules} modules registered successfully"
+        )
+
+        if failed_imports:
+            logger.info(f"Module import failures: {len(failed_imports)}")
+        if failed_registrations:
+            logger.info(f"Module registration failures: {len(failed_registrations)}")
+
     except Exception as e:
-      error_info = f"Repo command registration failed: {str(e)}"
-      failed_registrations.append(error_info)
-      logger.warning(error_info)
-    
-    # Register template-based modules with app
-    module_classes = list(registry.iter_module_classes())
-    logger.debug(f"Registering {len(module_classes)} template-based modules")
-    
-    for name, module_cls in module_classes:
-      try:
-        logger.debug(f"Registering module class: {module_cls.__name__}")
-        module_cls.register_cli(app)
-      except Exception as e:
-        error_info = f"Registration failed for '{module_cls.__name__}': {str(e)}"
-        failed_registrations.append(error_info)
-        # Log warning but don't raise exception for individual module failures
-        logger.warning(error_info)
-        console.print(f"[yellow]Warning:[/yellow] {error_info}")
-    
-    # If we have no modules registered at all, that's a critical error
-    if not module_classes and not failed_imports:
-      raise RuntimeError("No modules found to register")
-    
-    # Log summary
-    successful_modules = len(module_classes) - len(failed_registrations)
-    logger.info(f"Application initialized: {successful_modules} modules registered successfully")
-    
-    if failed_imports:
-      logger.info(f"Module import failures: {len(failed_imports)}")
-    if failed_registrations:
-      logger.info(f"Module registration failures: {len(failed_registrations)}")
-      
-  except Exception as e:
-    error_details = []
-    if failed_imports:
-      error_details.extend(["Import failures:"] + [f"  - {err}" for err in failed_imports])
-    if failed_registrations:
-      error_details.extend(["Registration failures:"] + [f"  - {err}" for err in failed_registrations])
-    
-    details = "\n".join(error_details) if error_details else str(e)
-    raise RuntimeError(f"Application initialization failed: {details}")
+        error_details = []
+        if failed_imports:
+            error_details.extend(
+                ["Import failures:"] + [f"  - {err}" for err in failed_imports]
+            )
+        if failed_registrations:
+            error_details.extend(
+                ["Registration failures:"]
+                + [f"  - {err}" for err in failed_registrations]
+            )
+
+        details = "\n".join(error_details) if error_details else str(e)
+        raise RuntimeError(f"Application initialization failed: {details}")
+
 
 def run() -> None:
-  """Run the CLI application."""
-  try:
-    init_app()
-    app()
-  except (ValueError, RuntimeError) as e:
-    # Handle configuration and initialization errors cleanly
-    console.print(f"[bold red]Error:[/bold red] {e}")
-    sys.exit(1)
-  except ImportError as e:
-    # Handle module import errors with detailed info
-    console.print(f"[bold red]Module Import Error:[/bold red] {e}")
-    sys.exit(1)
-  except KeyboardInterrupt:
-    # Handle Ctrl+C gracefully
-    console.print("\n[yellow]Operation cancelled by user[/yellow]")
-    sys.exit(130)
-  except Exception as e:
-    # Handle unexpected errors - show simplified message
-    console.print(f"[bold red]Unexpected error:[/bold red] {e}")
-    console.print("[dim]Use --log-level DEBUG for more details[/dim]")
-    sys.exit(1)
+    """Run the CLI application."""
+    try:
+        init_app()
+        app()
+    except (ValueError, RuntimeError) as e:
+        # Handle configuration and initialization errors cleanly
+        console.print(f"[bold red]Error:[/bold red] {e}")
+        sys.exit(1)
+    except ImportError as e:
+        # Handle module import errors with detailed info
+        console.print(f"[bold red]Module Import Error:[/bold red] {e}")
+        sys.exit(1)
+    except KeyboardInterrupt:
+        # Handle Ctrl+C gracefully
+        console.print("\n[yellow]Operation cancelled by user[/yellow]")
+        sys.exit(130)
+    except Exception as e:
+        # Handle unexpected errors - show simplified message
+        console.print(f"[bold red]Unexpected error:[/bold red] {e}")
+        console.print("[dim]Use --log-level DEBUG for more details[/dim]")
+        sys.exit(1)
+
 
 if __name__ == "__main__":
-  run()
+    run()

+ 956 - 562
cli/core/collection.py

@@ -11,577 +11,971 @@ logger = logging.getLogger(__name__)
 
 
 class VariableCollection:
-  """Manages variables grouped by sections and builds Jinja context."""
-
-  def __init__(self, spec: dict[str, Any]) -> None:
-    """Initialize VariableCollection from a specification dictionary.
-    
-    Args:
-        spec: Dictionary containing the complete variable specification structure
-              Expected format (as used in compose.py):
-              {
-                "section_key": {
-                  "title": "Section Title",
-                  "prompt": "Optional prompt text",
-                  "toggle": "optional_toggle_var_name", 
-                  "description": "Optional description",
-                  "vars": {
-                    "var_name": {
-                      "description": "Variable description",
-                      "type": "str",
-                      "default": "default_value",
-                      ...
+    """Manages variables grouped by sections and builds Jinja context."""
+
+    def __init__(self, spec: dict[str, Any]) -> None:
+        """Initialize VariableCollection from a specification dictionary.
+
+        Args:
+            spec: Dictionary containing the complete variable specification structure
+                  Expected format (as used in compose.py):
+                  {
+                    "section_key": {
+                      "title": "Section Title",
+                      "prompt": "Optional prompt text",
+                      "toggle": "optional_toggle_var_name",
+                      "description": "Optional description",
+                      "vars": {
+                        "var_name": {
+                          "description": "Variable description",
+                          "type": "str",
+                          "default": "default_value",
+                          ...
+                        }
+                      }
                     }
                   }
-                }
-              }
-    """
-    if not isinstance(spec, dict):
-      raise ValueError("Spec must be a dictionary")
-    
-    self._sections: Dict[str, VariableSection] = {}
-    # NOTE: The _variable_map provides a flat, O(1) lookup for any variable by its name,
-    # avoiding the need to iterate through sections. It stores references to the same
-    # Variable objects contained in the _set structure.
-    self._variable_map: Dict[str, Variable] = {}
-    self._initialize_sections(spec)
-    # Validate dependencies after all sections are loaded
-    self._validate_dependencies()
-
-  def _initialize_sections(self, spec: dict[str, Any]) -> None:
-    """Initialize sections from the spec."""
-    for section_key, section_data in spec.items():
-      if not isinstance(section_data, dict):
-        continue
-      
-      section = self._create_section(section_key, section_data)
-      # Guard against None from empty YAML sections (vars: with no content)
-      vars_data = section_data.get("vars") or {}
-      self._initialize_variables(section, vars_data)
-      self._sections[section_key] = section
-    
-    # Validate all variable names are unique across sections
-    self._validate_unique_variable_names()
-
-  def _create_section(self, key: str, data: dict[str, Any]) -> VariableSection:
-    """Create a VariableSection from data."""
-    section_init_data = {
-      "key": key,
-      "title": data.get("title", key.replace("_", " ").title()),
-      "description": data.get("description"),
-      "toggle": data.get("toggle"),
-      "required": data.get("required", key == "general"),
-      "needs": data.get("needs")
-    }
-    return VariableSection(section_init_data)
-
-  def _initialize_variables(self, section: VariableSection, vars_data: dict[str, Any]) -> None:
-    """Initialize variables for a section."""
-    # Guard against None from empty YAML sections
-    if vars_data is None:
-      vars_data = {}
-    
-    for var_name, var_data in vars_data.items():
-      var_init_data = {"name": var_name, **var_data}
-      variable = Variable(var_init_data)
-      section.variables[var_name] = variable
-      # NOTE: Populate the direct lookup map for efficient access.
-      self._variable_map[var_name] = variable
-    
-    # Validate toggle variable after all variables are added
-    self._validate_section_toggle(section)
-    # TODO: Add more section-level validation:
-    #   - Validate that required sections have at least one non-toggle variable
-    #   - Validate that enum variables have non-empty options lists
-    #   - Validate that variable names follow naming conventions (e.g., lowercase_with_underscores)
-    #   - Validate that default values are compatible with their type definitions
-
-  def _validate_unique_variable_names(self) -> None:
-    """Validate that all variable names are unique across all sections."""
-    var_to_sections: Dict[str, List[str]] = defaultdict(list)
-    
-    # Build mapping of variable names to sections
-    for section_key, section in self._sections.items():
-      for var_name in section.variables:
-        var_to_sections[var_name].append(section_key)
-    
-    # Find duplicates and format error
-    duplicates = {var: sections for var, sections in var_to_sections.items() if len(sections) > 1}
-    
-    if duplicates:
-      errors = ["Variable names must be unique across all sections, but found duplicates:"]
-      errors.extend(f"  - '{var}' appears in sections: {', '.join(secs)}" for var, secs in sorted(duplicates.items()))
-      errors.append("\nPlease rename variables to be unique or consolidate them into a single section.")
-      error_msg = "\n".join(errors)
-      logger.error(error_msg)
-      raise ValueError(error_msg)
-  
-  def _validate_section_toggle(self, section: VariableSection) -> None:
-    """Validate that toggle variable is of type bool if it exists.
-    
-    If the toggle variable doesn't exist (e.g., filtered out), removes the toggle.
-    
-    Args:
-        section: The section to validate
+        """
+        if not isinstance(spec, dict):
+            raise ValueError("Spec must be a dictionary")
+
+        self._sections: Dict[str, VariableSection] = {}
+        # NOTE: The _variable_map provides a flat, O(1) lookup for any variable by its name,
+        # avoiding the need to iterate through sections. It stores references to the same
+        # Variable objects contained in the _set structure.
+        self._variable_map: Dict[str, Variable] = {}
+        self._initialize_sections(spec)
+        # Validate dependencies after all sections are loaded
+        self._validate_dependencies()
+
+    def _initialize_sections(self, spec: dict[str, Any]) -> None:
+        """Initialize sections from the spec."""
+        for section_key, section_data in spec.items():
+            if not isinstance(section_data, dict):
+                continue
+
+            section = self._create_section(section_key, section_data)
+            # Guard against None from empty YAML sections (vars: with no content)
+            vars_data = section_data.get("vars") or {}
+            self._initialize_variables(section, vars_data)
+            self._sections[section_key] = section
+
+        # Validate all variable names are unique across sections
+        self._validate_unique_variable_names()
+
+    def _create_section(self, key: str, data: dict[str, Any]) -> VariableSection:
+        """Create a VariableSection from data."""
+        # Build section init data with only explicitly provided fields
+        # This prevents None values from overriding module spec values during merge
+        section_init_data = {
+            "key": key,
+            "title": data.get("title", key.replace("_", " ").title()),
+        }
         
-    Raises:
-        ValueError: If toggle variable exists but is not boolean type
-    """
-    if not section.toggle:
-      return
-    
-    toggle_var = section.variables.get(section.toggle)
-    if not toggle_var:
-      # Toggle variable doesn't exist (e.g., was filtered out) - remove toggle metadata
-      section.toggle = None
-      return
-    
-    if toggle_var.type != "bool":
-      raise ValueError(
-        f"Section '{section.key}' toggle variable '{section.toggle}' must be type 'bool', "
-        f"but is type '{toggle_var.type}'"
-      )
-  
-  def _validate_dependencies(self) -> None:
-    """Validate section dependencies for cycles and missing references.
-    
-    Raises:
-        ValueError: If circular dependencies or missing section references are found
-    """
-    # Check for missing dependencies
-    for section_key, section in self._sections.items():
-      for dep in section.needs:
-        if dep not in self._sections:
-          raise ValueError(
-            f"Section '{section_key}' depends on '{dep}', but '{dep}' does not exist"
-          )
-    
-    # Check for circular dependencies using depth-first search
-    visited = set()
-    rec_stack = set()
-    
-    def has_cycle(section_key: str) -> bool:
-      visited.add(section_key)
-      rec_stack.add(section_key)
-      
-      section = self._sections[section_key]
-      for dep in section.needs:
-        if dep not in visited:
-          if has_cycle(dep):
+        # Only add optional fields if explicitly provided in the source data
+        if "description" in data:
+            section_init_data["description"] = data["description"]
+        if "toggle" in data:
+            section_init_data["toggle"] = data["toggle"]
+        if "required" in data:
+            section_init_data["required"] = data["required"]
+        elif key == "general":
+            section_init_data["required"] = True
+        if "needs" in data:
+            section_init_data["needs"] = data["needs"]
+            
+        return VariableSection(section_init_data)
+
+    def _initialize_variables(
+        self, section: VariableSection, vars_data: dict[str, Any]
+    ) -> None:
+        """Initialize variables for a section."""
+        # Guard against None from empty YAML sections
+        if vars_data is None:
+            vars_data = {}
+
+        for var_name, var_data in vars_data.items():
+            var_init_data = {"name": var_name, "parent_section": section, **var_data}
+            variable = Variable(var_init_data)
+            section.variables[var_name] = variable
+            # NOTE: Populate the direct lookup map for efficient access.
+            self._variable_map[var_name] = variable
+
+        # Validate toggle variable after all variables are added
+        self._validate_section_toggle(section)
+        # TODO: Add more section-level validation:
+        #   - Validate that required sections have at least one non-toggle variable
+        #   - Validate that enum variables have non-empty options lists
+        #   - Validate that variable names follow naming conventions (e.g., lowercase_with_underscores)
+        #   - Validate that default values are compatible with their type definitions
+
+    def _validate_unique_variable_names(self) -> None:
+        """Validate that all variable names are unique across all sections."""
+        var_to_sections: Dict[str, List[str]] = defaultdict(list)
+
+        # Build mapping of variable names to sections
+        for section_key, section in self._sections.items():
+            for var_name in section.variables:
+                var_to_sections[var_name].append(section_key)
+
+        # Find duplicates and format error
+        duplicates = {
+            var: sections
+            for var, sections in var_to_sections.items()
+            if len(sections) > 1
+        }
+
+        if duplicates:
+            errors = [
+                "Variable names must be unique across all sections, but found duplicates:"
+            ]
+            errors.extend(
+                f"  - '{var}' appears in sections: {', '.join(secs)}"
+                for var, secs in sorted(duplicates.items())
+            )
+            errors.append(
+                "\nPlease rename variables to be unique or consolidate them into a single section."
+            )
+            error_msg = "\n".join(errors)
+            logger.error(error_msg)
+            raise ValueError(error_msg)
+
+    def _validate_section_toggle(self, section: VariableSection) -> None:
+        """Validate that toggle variable is of type bool if it exists.
+
+        If the toggle variable doesn't exist (e.g., filtered out), removes the toggle.
+
+        Args:
+            section: The section to validate
+
+        Raises:
+            ValueError: If toggle variable exists but is not boolean type
+        """
+        if not section.toggle:
+            return
+
+        toggle_var = section.variables.get(section.toggle)
+        if not toggle_var:
+            # Toggle variable doesn't exist (e.g., was filtered out) - remove toggle metadata
+            section.toggle = None
+            return
+
+        if toggle_var.type != "bool":
+            raise ValueError(
+                f"Section '{section.key}' toggle variable '{section.toggle}' must be type 'bool', "
+                f"but is type '{toggle_var.type}'"
+            )
+
+    @staticmethod
+    def _parse_need(need_str: str) -> tuple[str, Optional[Any]]:
+        """Parse a need string into variable name and expected value(s).
+
+        Supports three formats:
+        1. New format with multiple values: "variable_name=value1,value2" - checks if variable equals any value
+        2. New format with single value: "variable_name=value" - checks if variable equals value
+        3. Old format (backwards compatibility): "section_name" - checks if section is enabled
+
+        Args:
+            need_str: Need specification string
+
+        Returns:
+            Tuple of (variable_or_section_name, expected_value)
+            For old format, expected_value is None (means check section enabled)
+            For new format, expected_value is the string value(s) after '=' (string or list)
+
+        Examples:
+            "traefik_enabled=true" -> ("traefik_enabled", "true")
+            "storage_mode=nfs" -> ("storage_mode", "nfs")
+            "network_mode=bridge,macvlan" -> ("network_mode", ["bridge", "macvlan"])
+            "traefik" -> ("traefik", None)  # Old format: section name
+        """
+        if "=" in need_str:
+            # New format: variable=value or variable=value1,value2
+            parts = need_str.split("=", 1)
+            var_name = parts[0].strip()
+            value_part = parts[1].strip()
+
+            # Check if multiple values are provided (comma-separated)
+            if "," in value_part:
+                values = [v.strip() for v in value_part.split(",")]
+                return (var_name, values)
+            else:
+                return (var_name, value_part)
+        else:
+            # Old format: section name (backwards compatibility)
+            return (need_str.strip(), None)
+
+    def _is_need_satisfied(self, need_str: str) -> bool:
+        """Check if a single need condition is satisfied.
+
+        Args:
+            need_str: Need specification ("variable=value", "variable=value1,value2" or "section_name")
+
+        Returns:
+            True if need is satisfied, False otherwise
+        """
+        var_or_section, expected_value = self._parse_need(need_str)
+
+        if expected_value is None:
+            # Old format: check if section is enabled (backwards compatibility)
+            section = self._sections.get(var_or_section)
+            if not section:
+                logger.warning(f"Need references missing section '{var_or_section}'")
+                return False
+            return section.is_enabled()
+        else:
+            # New format: check if variable has expected value(s)
+            variable = self._variable_map.get(var_or_section)
+            if not variable:
+                logger.warning(f"Need references missing variable '{var_or_section}'")
+                return False
+
+            # Convert actual value for comparison
+            try:
+                actual_value = variable.convert(variable.value)
+
+                # Handle multiple expected values (comma-separated in needs)
+                if isinstance(expected_value, list):
+                    # Check if actual value matches any of the expected values
+                    for expected in expected_value:
+                        expected_converted = variable.convert(expected)
+
+                        # Handle boolean comparisons specially
+                        if variable.type == "bool":
+                            if bool(actual_value) == bool(expected_converted):
+                                return True
+                        else:
+                            # String comparison for other types
+                            if actual_value is not None and str(actual_value) == str(
+                                expected_converted
+                            ):
+                                return True
+                    return False  # None of the expected values matched
+                else:
+                    # Single expected value (original behavior)
+                    expected_converted = variable.convert(expected_value)
+
+                    # Handle boolean comparisons specially
+                    if variable.type == "bool":
+                        return bool(actual_value) == bool(expected_converted)
+
+                    # String comparison for other types
+                    return (
+                        str(actual_value) == str(expected_converted)
+                        if actual_value is not None
+                        else False
+                    )
+            except Exception as e:
+                logger.debug(f"Failed to compare need '{need_str}': {e}")
+                return False
+
+    def _validate_dependencies(self) -> None:
+        """Validate section dependencies for cycles and missing references.
+
+        Raises:
+            ValueError: If circular dependencies or missing section references are found
+        """
+        # Check for missing dependencies in sections
+        for section_key, section in self._sections.items():
+            for dep in section.needs:
+                var_or_section, expected_value = self._parse_need(dep)
+
+                if expected_value is None:
+                    # Old format: validate section exists
+                    if var_or_section not in self._sections:
+                        raise ValueError(
+                            f"Section '{section_key}' depends on '{var_or_section}', but '{var_or_section}' does not exist"
+                        )
+                else:
+                    # New format: validate variable exists
+                    # NOTE: We only warn here, not raise an error, because the variable might be
+                    # added later during merge with module spec. The actual runtime check in
+                    # _is_need_satisfied() will handle missing variables gracefully.
+                    if var_or_section not in self._variable_map:
+                        logger.debug(
+                            f"Section '{section_key}' has need '{dep}', but variable '{var_or_section}' "
+                            f"not found (might be added during merge)"
+                        )
+
+        # Check for missing dependencies in variables
+        for var_name, variable in self._variable_map.items():
+            for dep in variable.needs:
+                dep_var, expected_value = self._parse_need(dep)
+                if expected_value is not None:  # Only validate new format
+                    if dep_var not in self._variable_map:
+                        # NOTE: We only warn here, not raise an error, because the variable might be
+                        # added later during merge with module spec. The actual runtime check in
+                        # _is_need_satisfied() will handle missing variables gracefully.
+                        logger.debug(
+                            f"Variable '{var_name}' has need '{dep}', but variable '{dep_var}' "
+                            f"not found (might be added during merge)"
+                        )
+
+        # Check for circular dependencies using depth-first search
+        # Note: Only checks section-level dependencies in old format (section names)
+        # Variable-level dependencies (variable=value) don't create cycles in the same way
+        visited = set()
+        rec_stack = set()
+
+        def has_cycle(section_key: str) -> bool:
+            visited.add(section_key)
+            rec_stack.add(section_key)
+
+            section = self._sections[section_key]
+            for dep in section.needs:
+                # Only check circular deps for old format (section references)
+                dep_name, expected_value = self._parse_need(dep)
+                if expected_value is None and dep_name in self._sections:
+                    # Old format section dependency - check for cycles
+                    if dep_name not in visited:
+                        if has_cycle(dep_name):
+                            return True
+                    elif dep_name in rec_stack:
+                        raise ValueError(
+                            f"Circular dependency detected: '{section_key}' depends on '{dep_name}', "
+                            f"which creates a cycle"
+                        )
+
+            rec_stack.remove(section_key)
+            return False
+
+        for section_key in self._sections:
+            if section_key not in visited:
+                has_cycle(section_key)
+
+    def is_section_satisfied(self, section_key: str) -> bool:
+        """Check if all dependencies for a section are satisfied.
+
+        Supports both formats:
+        - Old format: "section_name" - checks if section is enabled (backwards compatible)
+        - New format: "variable=value" - checks if variable has specific value
+
+        Args:
+            section_key: The key of the section to check
+
+        Returns:
+            True if all dependencies are satisfied, False otherwise
+        """
+        section = self._sections.get(section_key)
+        if not section:
+            return False
+
+        # No dependencies = always satisfied
+        if not section.needs:
             return True
-        elif dep in rec_stack:
-          raise ValueError(
-            f"Circular dependency detected: '{section_key}' depends on '{dep}', "
-            f"which creates a cycle"
-          )
-      
-      rec_stack.remove(section_key)
-      return False
-    
-    for section_key in self._sections:
-      if section_key not in visited:
-        has_cycle(section_key)
-  
-  def is_section_satisfied(self, section_key: str) -> bool:
-    """Check if all dependencies for a section are satisfied.
-    
-    A dependency is satisfied if:
-    1. The dependency section exists
-    2. The dependency section is enabled (if it has a toggle)
-    
-    Args:
-        section_key: The key of the section to check
-        
-    Returns:
-        True if all dependencies are satisfied, False otherwise
-    """
-    section = self._sections.get(section_key)
-    if not section:
-      return False
-    
-    # No dependencies = always satisfied
-    if not section.needs:
-      return True
-    
-    # Check each dependency
-    for dep_key in section.needs:
-      dep_section = self._sections.get(dep_key)
-      if not dep_section:
-        logger.warning(f"Section '{section_key}' depends on missing section '{dep_key}'")
-        return False
-      
-      # Check if dependency is enabled
-      if not dep_section.is_enabled():
-        logger.debug(f"Section '{section_key}' dependency '{dep_key}' is disabled")
-        return False
-    
-    return True
-
-  def sort_sections(self) -> None:
-    """Sort sections with the following priority:
-    
-    1. Dependencies come before dependents (topological sort)
-    2. Required sections first (in their original order)
-    3. Enabled sections with satisfied dependencies next (in their original order)
-    4. Disabled sections or sections with unsatisfied dependencies last (in their original order)
-    
-    This maintains the original ordering within each group while organizing
-    sections logically for display and user interaction, and ensures that
-    sections are prompted in the correct dependency order.
-    """
-    # First, perform topological sort to respect dependencies
-    sorted_keys = self._topological_sort()
-    
-    # Then apply priority sorting within dependency groups
-    section_items = [(key, self._sections[key]) for key in sorted_keys]
-    
-    # Define sort key: (priority, original_index)
-    # Priority: 0 = required, 1 = enabled with satisfied dependencies, 2 = disabled or unsatisfied dependencies
-    def get_sort_key(item_with_index):
-      index, (key, section) = item_with_index
-      if section.required:
-        priority = 0
-      elif section.is_enabled() and self.is_section_satisfied(key):
-        priority = 1
-      else:
-        priority = 2
-      return (priority, index)
-    
-    # Sort with original index to maintain order within each priority group
-    # Note: This preserves the topological order from earlier
-    sorted_items = sorted(
-      enumerate(section_items),
-      key=get_sort_key
-    )
-    
-    # Rebuild _sections dict in new order
-    self._sections = {key: section for _, (key, section) in sorted_items}
-  
-  def _topological_sort(self) -> List[str]:
-    """Perform topological sort on sections based on dependencies using Kahn's algorithm."""
-    in_degree = {key: len(section.needs) for key, section in self._sections.items()}
-    queue = [key for key, degree in in_degree.items() if degree == 0]
-    queue.sort(key=lambda k: list(self._sections.keys()).index(k))  # Preserve original order
-    result = []
-    
-    while queue:
-      current = queue.pop(0)
-      result.append(current)
-      
-      # Update in-degree for dependent sections
-      for key, section in self._sections.items():
-        if current in section.needs:
-          in_degree[key] -= 1
-          if in_degree[key] == 0:
-            queue.append(key)
-    
-    # Fallback to original order if cycle detected
-    if len(result) != len(self._sections):
-      logger.warning("Topological sort incomplete - using original order")
-      return list(self._sections.keys())
-    
-    return result
-
-  def get_sections(self) -> Dict[str, VariableSection]:
-    """Get all sections in the collection."""
-    return self._sections.copy()
-  
-  def get_section(self, key: str) -> Optional[VariableSection]:
-    """Get a specific section by its key."""
-    return self._sections.get(key)
-  
-  def has_sections(self) -> bool:
-    """Check if the collection has any sections."""
-    return bool(self._sections)
-
-  def get_all_values(self) -> dict[str, Any]:
-    """Get all variable values as a dictionary."""
-    # NOTE: Uses _variable_map for O(1) access
-    return {name: var.convert(var.value) for name, var in self._variable_map.items()}
-  
-  def get_satisfied_values(self) -> dict[str, Any]:
-    """Get variable values only from sections with satisfied dependencies.
-    
-    This respects both toggle states and section dependencies, ensuring that:
-    - Variables from disabled sections (toggle=false) are excluded
-    - Variables from sections with unsatisfied dependencies are excluded
-    
-    Returns:
-        Dictionary of variable names to values for satisfied sections only
-    """
-    satisfied_values = {}
-    
-    for section_key, section in self._sections.items():
-      # Skip sections with unsatisfied dependencies
-      if not self.is_section_satisfied(section_key):
-        logger.debug(f"Excluding variables from section '{section_key}' - dependencies not satisfied")
-        continue
-      
-      # Skip disabled sections (toggle check)
-      if not section.is_enabled():
-        logger.debug(f"Excluding variables from section '{section_key}' - section is disabled")
-        continue
-      
-      # Include all variables from this satisfied section
-      for var_name, variable in section.variables.items():
-        satisfied_values[var_name] = variable.convert(variable.value)
-    
-    return satisfied_values
-
-  def get_sensitive_variables(self) -> Dict[str, Any]:
-    """Get only the sensitive variables with their values."""
-    return {name: var.value for name, var in self._variable_map.items() if var.sensitive and var.value}
-
-  def apply_defaults(self, defaults: dict[str, Any], origin: str = "cli") -> list[str]:
-    """Apply default values to variables, updating their origin.
-    
-    Args:
-        defaults: Dictionary mapping variable names to their default values
-        origin: Source of these defaults (e.g., 'config', 'cli')
-        
-    Returns:
-        List of variable names that were successfully updated
-    """
-    # NOTE: This method uses the _variable_map for a significant performance gain,
-    # as it allows direct O(1) lookup of variables instead of iterating
-    # through all sections to find a match.
-    successful = []
-    errors = []
-    
-    for var_name, value in defaults.items():
-      try:
+
+        # Check each dependency using the unified need satisfaction logic
+        for need in section.needs:
+            if not self._is_need_satisfied(need):
+                logger.debug(f"Section '{section_key}' need '{need}' is not satisfied")
+                return False
+
+        return True
+
+    def is_variable_satisfied(self, var_name: str) -> bool:
+        """Check if all dependencies for a variable are satisfied.
+
+        A variable is satisfied if all its needs are met.
+        Needs are specified as "variable_name=value".
+
+        Args:
+            var_name: The name of the variable to check
+
+        Returns:
+            True if all dependencies are satisfied, False otherwise
+        """
         variable = self._variable_map.get(var_name)
         if not variable:
-          logger.warning(f"Variable '{var_name}' not found in template")
-          continue
-        
-        # Store original value before overriding (for display purposes)
-        # Only store if this is the first time config is being applied
-        if origin == "config" and not hasattr(variable, '_original_stored'):
-          variable.original_value = variable.value
-          variable._original_stored = True
-        
-        # Convert and set the new value
-        converted_value = variable.convert(value)
-        variable.value = converted_value
+            return False
+
+        # No dependencies = always satisfied
+        if not variable.needs:
+            return True
+
+        # Check each dependency
+        for need in variable.needs:
+            if not self._is_need_satisfied(need):
+                logger.debug(f"Variable '{var_name}' need '{need}' is not satisfied")
+                return False
+
+        return True
+
+    def reset_disabled_bool_variables(self) -> list[str]:
+        """Reset bool variables with unsatisfied dependencies to False.
         
-        # Set origin to the current source (not a chain)
-        variable.origin = origin
+        This ensures that disabled bool variables don't accidentally remain True
+        and cause confusion in templates or configuration.
         
-        successful.append(var_name)
-          
-      except ValueError as e:
-        error_msg = f"Invalid value for '{var_name}': {value} - {e}"
-        errors.append(error_msg)
-        logger.error(error_msg)
-    
-    if errors:
-      logger.warning(f"Some defaults failed to apply: {'; '.join(errors)}")
-    
-    return successful
-  
-  def validate_all(self) -> None:
-    """Validate all variables in the collection, skipping disabled and unsatisfied sections."""
-    errors: list[str] = []
-
-    for section_key, section in self._sections.items():
-      # Skip sections with unsatisfied dependencies or disabled via toggle
-      if not self.is_section_satisfied(section_key) or not section.is_enabled():
-        logger.debug(f"Skipping validation for section '{section_key}'")
-        continue
-
-      # Validate each variable in the section
-      for var_name, variable in section.variables.items():
-        try:
-          # Skip autogenerated variables when empty
-          if variable.autogenerated and not variable.value:
-            continue
-          
-          # Check required fields
-          if variable.value is None:
-            if variable.is_required():
-              errors.append(f"{section.key}.{var_name} (required - no default provided)")
-            continue
-
-          # Validate typed value
-          typed = variable.convert(variable.value)
-          if variable.type not in ("bool",) and not typed:
-            msg = f"{section.key}.{var_name}"
-            errors.append(f"{msg} (required - cannot be empty)" if variable.is_required() else f"{msg} (empty)")
-
-        except ValueError as e:
-          errors.append(f"{section.key}.{var_name} (invalid format: {e})")
-
-    if errors:
-      error_msg = "Variable validation failed: " + ", ".join(errors)
-      logger.error(error_msg)
-      raise ValueError(error_msg)
-
-  def merge(self, other_spec: Union[Dict[str, Any], 'VariableCollection'], origin: str = "override") -> 'VariableCollection':
-    """Merge another spec or VariableCollection into this one with precedence tracking.
-    
-    OPTIMIZED: Works directly on objects without dict conversions for better performance.
-    
-    The other spec/collection has higher precedence and will override values in self.
-    Creates a new VariableCollection with merged data.
-    
-    Args:
-        other_spec: Either a spec dictionary or another VariableCollection to merge
-        origin: Origin label for variables from other_spec (e.g., 'template', 'config')
+        Note: CLI-provided variables are NOT reset here - they are validated
+        later in validate_all() to provide better error messages.
         
-    Returns:
-        New VariableCollection with merged data
+        Returns:
+            List of variable names that were reset
+        """
+        reset_vars = []
         
-    Example:
-        module_vars = VariableCollection(module_spec)
-        template_vars = module_vars.merge(template_spec, origin='template')
-        # Variables from template_spec override module_spec
-        # Origins tracked: 'module' or 'module -> template'
-    """
-    # Convert dict to VariableCollection if needed (only once)
-    if isinstance(other_spec, dict):
-      other = VariableCollection(other_spec)
-    else:
-      other = other_spec
-    
-    # Create new collection without calling __init__ (optimization)
-    merged = VariableCollection.__new__(VariableCollection)
-    merged._sections = {}
-    merged._variable_map = {}
-    
-    # First pass: clone sections from self
-    for section_key, self_section in self._sections.items():
-      if section_key in other._sections:
-        # Section exists in both - will merge
-        merged._sections[section_key] = self._merge_sections(
-          self_section, 
-          other._sections[section_key], 
-          origin
-        )
-      else:
-        # Section only in self - clone it
-        merged._sections[section_key] = self_section.clone()
-    
-    # Second pass: add sections that only exist in other
-    for section_key, other_section in other._sections.items():
-      if section_key not in merged._sections:
-        # New section from other - clone with origin update
-        merged._sections[section_key] = other_section.clone(origin_update=origin)
-    
-    # Rebuild variable map for O(1) lookups
-    for section in merged._sections.values():
-      for var_name, variable in section.variables.items():
-        merged._variable_map[var_name] = variable
-    
-    return merged
-  
-  def _merge_sections(self, self_section: VariableSection, other_section: VariableSection, origin: str) -> VariableSection:
-    """Merge two sections, with other_section taking precedence."""
-    merged_section = self_section.clone()
-    
-    # Update section metadata from other (other takes precedence)
-    for attr in ('title', 'description', 'toggle'):
-      if getattr(other_section, attr):
-        setattr(merged_section, attr, getattr(other_section, attr))
-    
-    merged_section.required = other_section.required
-    if other_section.needs:
-      merged_section.needs = other_section.needs.copy()
-    
-    # Merge variables
-    for var_name, other_var in other_section.variables.items():
-      if var_name in merged_section.variables:
-        # Variable exists in both - merge with other taking precedence
-        self_var = merged_section.variables[var_name]
+        for section_key, section in self._sections.items():
+            # Check if section dependencies are satisfied
+            section_satisfied = self.is_section_satisfied(section_key)
+            is_enabled = section.is_enabled()
+            
+            for var_name, variable in section.variables.items():
+                # Only process bool variables
+                if variable.type != "bool":
+                    continue
+                    
+                # Check if variable's own dependencies are satisfied
+                var_satisfied = self.is_variable_satisfied(var_name)
+                
+                # If section is disabled OR variable dependencies aren't met, reset to False
+                if not section_satisfied or not is_enabled or not var_satisfied:
+                    # Only reset if current value is not already False
+                    if variable.value is not False:
+                        # Don't reset CLI-provided variables - they'll be validated later
+                        if variable.origin == "cli":
+                            continue
+                        
+                        # Store original value if not already stored (for display purposes)
+                        if not hasattr(variable, "_original_disabled"):
+                            variable._original_disabled = variable.value
+                        
+                        variable.value = False
+                        reset_vars.append(var_name)
+                        logger.debug(
+                            f"Reset disabled bool variable '{var_name}' to False "
+                            f"(section satisfied: {section_satisfied}, enabled: {is_enabled}, "
+                            f"var satisfied: {var_satisfied})"
+                        )
         
-        # Build update dict with ONLY explicitly provided fields from other
-        update = {'origin': origin}
-        field_map = {
-          'type': other_var.type,
-          'description': other_var.description,
-          'prompt': other_var.prompt,
-          'options': other_var.options,
-          'sensitive': other_var.sensitive,
-          'extra': other_var.extra
+        return reset_vars
+
+    def sort_sections(self) -> None:
+        """Sort sections with the following priority:
+
+        1. Dependencies come before dependents (topological sort)
+        2. Required sections first (in their original order)
+        3. Enabled sections with satisfied dependencies next (in their original order)
+        4. Disabled sections or sections with unsatisfied dependencies last (in their original order)
+
+        This maintains the original ordering within each group while organizing
+        sections logically for display and user interaction, and ensures that
+        sections are prompted in the correct dependency order.
+        """
+        # First, perform topological sort to respect dependencies
+        sorted_keys = self._topological_sort()
+
+        # Then apply priority sorting within dependency groups
+        section_items = [(key, self._sections[key]) for key in sorted_keys]
+
+        # Define sort key: (priority, original_index)
+        # Priority: 0 = required, 1 = enabled with satisfied dependencies, 2 = disabled or unsatisfied dependencies
+        def get_sort_key(item_with_index):
+            index, (key, section) = item_with_index
+            if section.required:
+                priority = 0
+            elif section.is_enabled() and self.is_section_satisfied(key):
+                priority = 1
+            else:
+                priority = 2
+            return (priority, index)
+
+        # Sort with original index to maintain order within each priority group
+        # Note: This preserves the topological order from earlier
+        sorted_items = sorted(enumerate(section_items), key=get_sort_key)
+
+        # Rebuild _sections dict in new order
+        self._sections = {key: section for _, (key, section) in sorted_items}
+
+        # NOTE: Sort variables within each section by their dependencies.
+        # This is critical for correct behavior in both display and prompts:
+        # 1. DISPLAY: Variables are shown in logical order (dependencies before dependents)
+        # 2. PROMPTS: Users are asked for dependency values BEFORE dependent values
+        #    Example: network_mode (bridge/host/macvlan) is prompted before
+        #             network_macvlan_ipv4_address (which needs network_mode=macvlan)
+        # 3. VALIDATION: Ensures config/CLI overrides can be checked in correct order
+        # Without this sorting, users would be prompted for irrelevant variables or see
+        # confusing variable order in the UI.
+        for section in self._sections.values():
+            section.sort_variables(self._is_need_satisfied)
+
+    def _topological_sort(self) -> List[str]:
+        """Perform topological sort on sections based on dependencies using Kahn's algorithm."""
+        in_degree = {key: len(section.needs) for key, section in self._sections.items()}
+        queue = [key for key, degree in in_degree.items() if degree == 0]
+        queue.sort(
+            key=lambda k: list(self._sections.keys()).index(k)
+        )  # Preserve original order
+        result = []
+
+        while queue:
+            current = queue.pop(0)
+            result.append(current)
+
+            # Update in-degree for dependent sections
+            for key, section in self._sections.items():
+                if current in section.needs:
+                    in_degree[key] -= 1
+                    if in_degree[key] == 0:
+                        queue.append(key)
+
+        # Fallback to original order if cycle detected
+        if len(result) != len(self._sections):
+            logger.warning("Topological sort incomplete - using original order")
+            return list(self._sections.keys())
+
+        return result
+
+    def get_sections(self) -> Dict[str, VariableSection]:
+        """Get all sections in the collection."""
+        return self._sections.copy()
+
+    def get_section(self, key: str) -> Optional[VariableSection]:
+        """Get a specific section by its key."""
+        return self._sections.get(key)
+
+    def has_sections(self) -> bool:
+        """Check if the collection has any sections."""
+        return bool(self._sections)
+
+    def get_all_values(self) -> dict[str, Any]:
+        """Get all variable values as a dictionary."""
+        # NOTE: Uses _variable_map for O(1) access
+        return {
+            name: var.convert(var.value) for name, var in self._variable_map.items()
         }
-        
-        # Add fields that were explicitly provided and have values
-        for field, value in field_map.items():
-          if field in other_var._explicit_fields and value:
-            update[field] = value
-        
-        # Special handling for value/default
-        if ('value' in other_var._explicit_fields or 'default' in other_var._explicit_fields) and other_var.value is not None:
-          update['value'] = other_var.value
-        
-        merged_section.variables[var_name] = self_var.clone(update=update)
-      else:
-        # New variable from other - clone with origin
-        merged_section.variables[var_name] = other_var.clone(update={'origin': origin})
-    
-    return merged_section
-  
-  def filter_to_used(self, used_variables: Set[str], keep_sensitive: bool = True) -> 'VariableCollection':
-    """Filter collection to only variables that are used (or sensitive).
-    
-    OPTIMIZED: Works directly on objects without dict conversions for better performance.
-    
-    Creates a new VariableCollection containing only the variables in used_variables.
-    Sections with no remaining variables are removed.
-    
-    Args:
-        used_variables: Set of variable names that are actually used
-        keep_sensitive: If True, also keep sensitive variables even if not in used set
-        
-    Returns:
-        New VariableCollection with filtered variables
-        
-    Example:
-        all_vars = VariableCollection(spec)
-        used_vars = all_vars.filter_to_used({'var1', 'var2', 'var3'})
-        # Only var1, var2, var3 (and any sensitive vars) remain
-    """
-    # Create new collection without calling __init__ (optimization)
-    filtered = VariableCollection.__new__(VariableCollection)
-    filtered._sections = {}
-    filtered._variable_map = {}
-    
-    # Filter each section
-    for section_key, section in self._sections.items():
-      # Create a new section with same metadata
-      filtered_section = VariableSection({
-        'key': section.key,
-        'title': section.title,
-        'description': section.description,
-        'toggle': section.toggle,
-        'required': section.required,
-        'needs': section.needs.copy() if section.needs else None,
-      })
-      
-      # Clone only the variables that should be included
-      for var_name, variable in section.variables.items():
-        # Include if used OR if sensitive (and keep_sensitive is True)
-        should_include = (
-          var_name in used_variables or 
-          (keep_sensitive and variable.sensitive)
-        )
-        
-        if should_include:
-          filtered_section.variables[var_name] = variable.clone()
-      
-      # Only add section if it has variables
-      if filtered_section.variables:
-        filtered._sections[section_key] = filtered_section
-        # Add variables to map
-        for var_name, variable in filtered_section.variables.items():
-          filtered._variable_map[var_name] = variable
-    
-    return filtered
-  
-  def get_all_variable_names(self) -> Set[str]:
-    """Get set of all variable names across all sections.
-    
-    Returns:
-        Set of all variable names
-    """
-    return set(self._variable_map.keys())
+
+    def get_satisfied_values(self) -> dict[str, Any]:
+        """Get variable values only from sections with satisfied dependencies.
+
+        This respects both toggle states and section dependencies, ensuring that:
+        - Variables from disabled sections (toggle=false) are excluded EXCEPT required variables
+        - Variables from sections with unsatisfied dependencies are excluded
+        - Required variables are always included if their section dependencies are satisfied
+
+        Returns:
+            Dictionary of variable names to values for satisfied sections only
+        """
+        satisfied_values = {}
+
+        for section_key, section in self._sections.items():
+            # Skip sections with unsatisfied dependencies (even required variables need satisfied deps)
+            if not self.is_section_satisfied(section_key):
+                logger.debug(
+                    f"Excluding variables from section '{section_key}' - dependencies not satisfied"
+                )
+                continue
+
+            # Check if section is enabled
+            is_enabled = section.is_enabled()
+
+            if is_enabled:
+                # Include all variables from enabled section
+                for var_name, variable in section.variables.items():
+                    satisfied_values[var_name] = variable.convert(variable.value)
+            else:
+                # Section is disabled - only include required variables
+                logger.debug(
+                    f"Section '{section_key}' is disabled - including only required variables"
+                )
+                for var_name, variable in section.variables.items():
+                    if variable.required:
+                        logger.debug(
+                            f"Including required variable '{var_name}' from disabled section '{section_key}'"
+                        )
+                        satisfied_values[var_name] = variable.convert(variable.value)
+
+        return satisfied_values
+
+    def get_sensitive_variables(self) -> Dict[str, Any]:
+        """Get only the sensitive variables with their values."""
+        return {
+            name: var.value
+            for name, var in self._variable_map.items()
+            if var.sensitive and var.value
+        }
+
+    def apply_defaults(
+        self, defaults: dict[str, Any], origin: str = "cli"
+    ) -> list[str]:
+        """Apply default values to variables, updating their origin.
+
+        Args:
+            defaults: Dictionary mapping variable names to their default values
+            origin: Source of these defaults (e.g., 'config', 'cli')
+
+        Returns:
+            List of variable names that were successfully updated
+        """
+        # NOTE: This method uses the _variable_map for a significant performance gain,
+        # as it allows direct O(1) lookup of variables instead of iterating
+        # through all sections to find a match.
+        successful = []
+        errors = []
+
+        for var_name, value in defaults.items():
+            try:
+                variable = self._variable_map.get(var_name)
+                if not variable:
+                    logger.warning(f"Variable '{var_name}' not found in template")
+                    continue
+
+                # Check if this is a toggle variable for a required section
+                # If trying to set it to false, warn and skip
+                for section in self._sections.values():
+                    if (
+                        section.required
+                        and section.toggle
+                        and section.toggle == var_name
+                    ):
+                        # Convert value to bool to check if it's false
+                        try:
+                            bool_value = variable.convert(value)
+                            if not bool_value:
+                                logger.warning(
+                                    f"Ignoring attempt to disable toggle '{var_name}' for required section '{section.key}' via {origin}"
+                                )
+                                continue
+                        except Exception:
+                            pass  # If conversion fails, let normal validation handle it
+
+                # Check if variable's needs are satisfied
+                # If not, warn that the override will have no effect
+                if not self.is_variable_satisfied(var_name):
+                    # Build a friendly message about which needs aren't satisfied
+                    unmet_needs = []
+                    for need in variable.needs:
+                        if not self._is_need_satisfied(need):
+                            unmet_needs.append(need)
+                    needs_str = ", ".join(unmet_needs) if unmet_needs else "unknown"
+                    logger.warning(
+                        f"Setting '{var_name}' via {origin} will have no effect - needs not satisfied: {needs_str}"
+                    )
+                    # Continue anyway to store the value (it might become relevant later)
+
+                # Store original value before overriding (for display purposes)
+                # Only store if this is the first time config is being applied
+                if origin == "config" and not hasattr(variable, "_original_stored"):
+                    variable.original_value = variable.value
+                    variable._original_stored = True
+
+                # Convert and set the new value
+                converted_value = variable.convert(value)
+                variable.value = converted_value
+
+                # Set origin to the current source (not a chain)
+                variable.origin = origin
+
+                successful.append(var_name)
+
+            except ValueError as e:
+                error_msg = f"Invalid value for '{var_name}': {value} - {e}"
+                errors.append(error_msg)
+                logger.error(error_msg)
+
+        if errors:
+            logger.warning(f"Some defaults failed to apply: {'; '.join(errors)}")
+
+        return successful
+
+    def validate_all(self) -> None:
+        """Validate all variables in the collection.
+
+        Validates:
+        - All variables in enabled sections with satisfied dependencies
+        - Required variables even if their section is disabled (but dependencies must be satisfied)
+        - CLI-provided bool variables with unsatisfied dependencies
+        """
+        errors: list[str] = []
+
+        # First, check for CLI-provided bool variables with unsatisfied dependencies
+        for section_key, section in self._sections.items():
+            section_satisfied = self.is_section_satisfied(section_key)
+            is_enabled = section.is_enabled()
+            
+            for var_name, variable in section.variables.items():
+                # Check CLI-provided bool variables with unsatisfied dependencies
+                if variable.type == "bool" and variable.origin == "cli" and variable.value is not False:
+                    var_satisfied = self.is_variable_satisfied(var_name)
+                    
+                    if not section_satisfied or not is_enabled or not var_satisfied:
+                        # Build error message with unmet needs (use set to avoid duplicates)
+                        unmet_needs = set()
+                        if not section_satisfied:
+                            for need in section.needs:
+                                if not self._is_need_satisfied(need):
+                                    unmet_needs.add(need)
+                        if not var_satisfied:
+                            for need in variable.needs:
+                                if not self._is_need_satisfied(need):
+                                    unmet_needs.add(need)
+                        
+                        needs_str = ", ".join(sorted(unmet_needs)) if unmet_needs else "dependencies not satisfied"
+                        errors.append(
+                            f"{section.key}.{var_name} (set via CLI to {variable.value} but requires: {needs_str})"
+                        )
+
+        # Then validate all other variables
+        for section_key, section in self._sections.items():
+            # Skip sections with unsatisfied dependencies (even for required variables)
+            if not self.is_section_satisfied(section_key):
+                logger.debug(
+                    f"Skipping validation for section '{section_key}' - dependencies not satisfied"
+                )
+                continue
+
+            # Check if section is enabled
+            is_enabled = section.is_enabled()
+
+            if not is_enabled:
+                logger.debug(
+                    f"Section '{section_key}' is disabled - validating only required variables"
+                )
+
+            # Validate variables in the section
+            for var_name, variable in section.variables.items():
+                # Skip all variables (including required ones) in disabled sections
+                # Required variables are only required when their section is actually enabled
+                if not is_enabled:
+                    continue
+
+                try:
+                    # Skip autogenerated variables when empty
+                    if variable.autogenerated and not variable.value:
+                        continue
+
+                    # Check required fields
+                    if variable.value is None:
+                        # Optional variables can be None/empty
+                        if hasattr(variable, "optional") and variable.optional:
+                            continue
+                        if variable.is_required():
+                            errors.append(
+                                f"{section.key}.{var_name} (required - no default provided)"
+                            )
+                        continue
+
+                    # Validate typed value
+                    typed = variable.convert(variable.value)
+                    if variable.type not in ("bool",) and not typed:
+                        msg = f"{section.key}.{var_name}"
+                        errors.append(
+                            f"{msg} (required - cannot be empty)"
+                            if variable.is_required()
+                            else f"{msg} (empty)"
+                        )
+
+                except ValueError as e:
+                    errors.append(f"{section.key}.{var_name} (invalid format: {e})")
+
+        if errors:
+            error_msg = "Variable validation failed: " + ", ".join(errors)
+            logger.error(error_msg)
+            raise ValueError(error_msg)
+
+    def merge(
+        self,
+        other_spec: Union[Dict[str, Any], "VariableCollection"],
+        origin: str = "override",
+    ) -> "VariableCollection":
+        """Merge another spec or VariableCollection into this one with precedence tracking.
+
+        OPTIMIZED: Works directly on objects without dict conversions for better performance.
+
+        The other spec/collection has higher precedence and will override values in self.
+        Creates a new VariableCollection with merged data.
+
+        Args:
+            other_spec: Either a spec dictionary or another VariableCollection to merge
+            origin: Origin label for variables from other_spec (e.g., 'template', 'config')
+
+        Returns:
+            New VariableCollection with merged data
+
+        Example:
+            module_vars = VariableCollection(module_spec)
+            template_vars = module_vars.merge(template_spec, origin='template')
+            # Variables from template_spec override module_spec
+            # Origins tracked: 'module' or 'module -> template'
+        """
+        # Convert dict to VariableCollection if needed (only once)
+        if isinstance(other_spec, dict):
+            other = VariableCollection(other_spec)
+        else:
+            other = other_spec
+
+        # Create new collection without calling __init__ (optimization)
+        merged = VariableCollection.__new__(VariableCollection)
+        merged._sections = {}
+        merged._variable_map = {}
+
+        # First pass: clone sections from self
+        for section_key, self_section in self._sections.items():
+            if section_key in other._sections:
+                # Section exists in both - will merge
+                merged._sections[section_key] = self._merge_sections(
+                    self_section, other._sections[section_key], origin
+                )
+            else:
+                # Section only in self - clone it
+                merged._sections[section_key] = self_section.clone()
+
+        # Second pass: add sections that only exist in other
+        for section_key, other_section in other._sections.items():
+            if section_key not in merged._sections:
+                # New section from other - clone with origin update
+                merged._sections[section_key] = other_section.clone(
+                    origin_update=origin
+                )
+
+        # Rebuild variable map for O(1) lookups
+        for section in merged._sections.values():
+            for var_name, variable in section.variables.items():
+                merged._variable_map[var_name] = variable
+
+        # Validate dependencies after merge is complete
+        merged._validate_dependencies()
+
+        return merged
+
+    def _merge_sections(
+        self, self_section: VariableSection, other_section: VariableSection, origin: str
+    ) -> VariableSection:
+        """Merge two sections, with other_section taking precedence."""
+        merged_section = self_section.clone()
+
+        # Update section metadata from other (other takes precedence)
+        # Explicit null/empty values clear the property (reset mechanism)
+        for attr in ("title", "description", "toggle"):
+            if (
+                hasattr(other_section, "_explicit_fields")
+                and attr in other_section._explicit_fields
+            ):
+                # Set to the other value even if null/empty (enables explicit reset)
+                setattr(merged_section, attr, getattr(other_section, attr))
+
+        merged_section.required = other_section.required
+        # Respect explicit clears for dependencies (explicit null/empty clears, missing field preserves)
+        if (
+            hasattr(other_section, "_explicit_fields")
+            and "needs" in other_section._explicit_fields
+        ):
+            merged_section.needs = (
+                other_section.needs.copy() if other_section.needs else []
+            )
+
+        # Merge variables
+        for var_name, other_var in other_section.variables.items():
+            if var_name in merged_section.variables:
+                # Variable exists in both - merge with other taking precedence
+                self_var = merged_section.variables[var_name]
+
+                # Build update dict with ONLY explicitly provided fields from other
+                update = {"origin": origin}
+                field_map = {
+                    "type": other_var.type,
+                    "description": other_var.description,
+                    "prompt": other_var.prompt,
+                    "options": other_var.options,
+                    "sensitive": other_var.sensitive,
+                    "extra": other_var.extra,
+                }
+
+                # Add fields that were explicitly provided, even if falsy/empty
+                for field, value in field_map.items():
+                    if field in other_var._explicit_fields:
+                        update[field] = value
+
+                # For boolean flags, only copy if explicitly provided in other
+                # This prevents False defaults from overriding True values
+                for bool_field in ("optional", "autogenerated", "required"):
+                    if bool_field in other_var._explicit_fields:
+                        update[bool_field] = getattr(other_var, bool_field)
+
+                # Special handling for needs (allow explicit null/empty to clear)
+                if "needs" in other_var._explicit_fields:
+                    update["needs"] = (
+                        other_var.needs.copy() if other_var.needs else []
+                    )
+
+                # Special handling for value/default (allow explicit null to clear)
+                if "value" in other_var._explicit_fields:
+                    update["value"] = other_var.value
+                elif "default" in other_var._explicit_fields:
+                    update["value"] = other_var.value
+
+                merged_section.variables[var_name] = self_var.clone(update=update)
+            else:
+                # New variable from other - clone with origin
+                merged_section.variables[var_name] = other_var.clone(
+                    update={"origin": origin}
+                )
+
+        return merged_section
+
+    def filter_to_used(
+        self, used_variables: Set[str], keep_sensitive: bool = True
+    ) -> "VariableCollection":
+        """Filter collection to only variables that are used (or sensitive).
+
+        OPTIMIZED: Works directly on objects without dict conversions for better performance.
+
+        Creates a new VariableCollection containing only the variables in used_variables.
+        Sections with no remaining variables are removed.
+
+        Args:
+            used_variables: Set of variable names that are actually used
+            keep_sensitive: If True, also keep sensitive variables even if not in used set
+
+        Returns:
+            New VariableCollection with filtered variables
+
+        Example:
+            all_vars = VariableCollection(spec)
+            used_vars = all_vars.filter_to_used({'var1', 'var2', 'var3'})
+            # Only var1, var2, var3 (and any sensitive vars) remain
+        """
+        # Create new collection without calling __init__ (optimization)
+        filtered = VariableCollection.__new__(VariableCollection)
+        filtered._sections = {}
+        filtered._variable_map = {}
+
+        # Filter each section
+        for section_key, section in self._sections.items():
+            # Create a new section with same metadata
+            filtered_section = VariableSection(
+                {
+                    "key": section.key,
+                    "title": section.title,
+                    "description": section.description,
+                    "toggle": section.toggle,
+                    "required": section.required,
+                    "needs": section.needs.copy() if section.needs else None,
+                }
+            )
+
+            # Clone only the variables that should be included
+            for var_name, variable in section.variables.items():
+                # Include if used OR if sensitive (and keep_sensitive is True)
+                should_include = var_name in used_variables or (
+                    keep_sensitive and variable.sensitive
+                )
+
+                if should_include:
+                    filtered_section.variables[var_name] = variable.clone()
+
+            # Only add section if it has variables
+            if filtered_section.variables:
+                filtered._sections[section_key] = filtered_section
+                # Add variables to map
+                for var_name, variable in filtered_section.variables.items():
+                    filtered._variable_map[var_name] = variable
+
+        return filtered
+
+    def get_all_variable_names(self) -> Set[str]:
+        """Get set of all variable names across all sections.
+
+        Returns:
+            Set of all variable names
+        """
+        return set(self._variable_map.keys())

+ 428 - 247
cli/core/config.py

@@ -1,7 +1,6 @@
 from __future__ import annotations
 
 import logging
-import os
 import re
 import shutil
 import tempfile
@@ -11,16 +10,13 @@ from typing import Any, Dict, Optional, Union
 import yaml
 from rich.console import Console
 
-from .variable import Variable
-from .section import VariableSection
-from .collection import VariableCollection
 from .exceptions import ConfigError, ConfigValidationError, YAMLParseError
 
 logger = logging.getLogger(__name__)
 console = Console()
 
 # Valid Python identifier pattern for variable names
-VALID_IDENTIFIER_PATTERN = re.compile(r'^[a-zA-Z_][a-zA-Z0-9_]*$')
+VALID_IDENTIFIER_PATTERN = re.compile(r"^[a-zA-Z_][a-zA-Z0-9_]*$")
 
 # Valid path pattern - prevents path traversal attempts
 VALID_PATH_PATTERN = re.compile(r'^[^\x00-\x1f<>:"|?*]+$')
@@ -30,88 +26,114 @@ MAX_STRING_LENGTH = 1000
 MAX_PATH_LENGTH = 4096
 MAX_LIST_LENGTH = 100
 
+
 class ConfigManager:
     """Manages configuration for the CLI application."""
-    
+
     def __init__(self, config_path: Optional[Union[str, Path]] = None) -> None:
         """Initialize the configuration manager.
-        
+
         Args:
-            config_path: Path to the configuration file. If None, uses default location.
+            config_path: Path to the configuration file. If None, auto-detects:
+                        1. Checks for ./config.yaml (local project config)
+                        2. Falls back to ~/.config/boilerplates/config.yaml (global config)
         """
         if config_path is None:
-            # Default to ~/.config/boilerplates/config.yaml
-            config_dir = Path.home() / ".config" / "boilerplates"
-            config_dir.mkdir(parents=True, exist_ok=True)
-            self.config_path = config_dir / "config.yaml"
+            # Check for local config.yaml in current directory first
+            local_config = Path.cwd() / "config.yaml"
+            if local_config.exists() and local_config.is_file():
+                self.config_path = local_config
+                self.is_local = True
+                logger.debug(f"Using local config: {local_config}")
+            else:
+                # Fall back to global config
+                config_dir = Path.home() / ".config" / "boilerplates"
+                config_dir.mkdir(parents=True, exist_ok=True)
+                self.config_path = config_dir / "config.yaml"
+                self.is_local = False
         else:
             self.config_path = Path(config_path)
-        
-        # Create default config if it doesn't exist
+            self.is_local = False
+
+        # Create default config if it doesn't exist (only for global config)
         if not self.config_path.exists():
-            self._create_default_config()
+            if not self.is_local:
+                self._create_default_config()
+            else:
+                raise ConfigError(f"Local config file not found: {self.config_path}")
         else:
             # Migrate existing config if needed
             self._migrate_config_if_needed()
-    
+
     def _create_default_config(self) -> None:
         """Create a default configuration file."""
         default_config = {
             "defaults": {},
-            "preferences": {
-                "editor": "vim",
-                "output_dir": None,
-                "library_paths": []
-            },
+            "preferences": {"editor": "vim", "output_dir": None, "library_paths": []},
             "libraries": [
                 {
                     "name": "default",
+                    "type": "git",
                     "url": "https://github.com/christianlempa/boilerplates.git",
                     "branch": "main",
                     "directory": "library",
-                    "enabled": True
+                    "enabled": True,
                 }
-            ]
+            ],
         }
         self._write_config(default_config)
         logger.info(f"Created default configuration at {self.config_path}")
-    
+
     def _migrate_config_if_needed(self) -> None:
-        """Migrate existing config to add missing sections like libraries."""
+        """Migrate existing config to add missing sections and library types."""
         try:
             config = self._read_config()
             needs_migration = False
-            
+
             # Add libraries section if missing
             if "libraries" not in config:
                 logger.info("Migrating config: adding libraries section")
                 config["libraries"] = [
                     {
                         "name": "default",
+                        "type": "git",
                         "url": "https://github.com/christianlempa/boilerplates.git",
                         "branch": "refactor/boilerplates-v2",
                         "directory": "library",
-                        "enabled": True
+                        "enabled": True,
                     }
                 ]
                 needs_migration = True
-            
+            else:
+                # Migrate existing libraries to add 'type' field if missing
+                # For backward compatibility, assume all old libraries without 'type' are git libraries
+                libraries = config.get("libraries", [])
+                for library in libraries:
+                    if "type" not in library:
+                        logger.info(
+                            f"Migrating library '{library.get('name', 'unknown')}': adding type: git"
+                        )
+                        library["type"] = "git"
+                        needs_migration = True
+
             # Write back if migration was needed
             if needs_migration:
                 self._write_config(config)
-                logger.info("Config migration completed")
+                logger.info("Config migration completed successfully")
         except Exception as e:
             logger.warning(f"Config migration failed: {e}")
-    
+
     @staticmethod
-    def _validate_string_length(value: str, field_name: str, max_length: int = MAX_STRING_LENGTH) -> None:
+    def _validate_string_length(
+        value: str, field_name: str, max_length: int = MAX_STRING_LENGTH
+    ) -> None:
         """Validate string length to prevent DOS attacks.
-        
+
         Args:
             value: String value to validate
             field_name: Name of the field for error messages
             max_length: Maximum allowed length
-            
+
         Raises:
             ConfigValidationError: If string exceeds maximum length
         """
@@ -120,15 +142,15 @@ class ConfigManager:
                 f"{field_name} exceeds maximum length of {max_length} characters "
                 f"(got {len(value)} characters)"
             )
-    
+
     @staticmethod
     def _validate_path_string(path: str, field_name: str) -> None:
         """Validate path string for security concerns.
-        
+
         Args:
             path: Path string to validate
             field_name: Name of the field for error messages
-            
+
         Raises:
             ConfigValidationError: If path contains invalid characters or patterns
         """
@@ -137,26 +159,30 @@ class ConfigManager:
             raise ConfigValidationError(
                 f"{field_name} exceeds maximum path length of {MAX_PATH_LENGTH} characters"
             )
-        
+
         # Check for null bytes and control characters
-        if '\x00' in path or any(ord(c) < 32 for c in path if c not in '\t\n\r'):
+        if "\x00" in path or any(ord(c) < 32 for c in path if c not in "\t\n\r"):
             raise ConfigValidationError(
                 f"{field_name} contains invalid control characters"
             )
-        
+
         # Check for path traversal attempts
-        if '..' in path.split('/'):
-            logger.warning(f"Path '{path}' contains '..' - potential path traversal attempt")
-    
+        if ".." in path.split("/"):
+            logger.warning(
+                f"Path '{path}' contains '..' - potential path traversal attempt"
+            )
+
     @staticmethod
-    def _validate_list_length(lst: list, field_name: str, max_length: int = MAX_LIST_LENGTH) -> None:
+    def _validate_list_length(
+        lst: list, field_name: str, max_length: int = MAX_LIST_LENGTH
+    ) -> None:
         """Validate list length to prevent DOS attacks.
-        
+
         Args:
             lst: List to validate
             field_name: Name of the field for error messages
             max_length: Maximum allowed length
-            
+
         Raises:
             ConfigValidationError: If list exceeds maximum length
         """
@@ -164,25 +190,25 @@ class ConfigManager:
             raise ConfigValidationError(
                 f"{field_name} exceeds maximum length of {max_length} items (got {len(lst)} items)"
             )
-    
+
     def _read_config(self) -> Dict[str, Any]:
         """Read configuration from file.
-        
+
         Returns:
             Dictionary containing the configuration.
-            
+
         Raises:
             YAMLParseError: If YAML parsing fails.
             ConfigValidationError: If configuration structure is invalid.
             ConfigError: If reading fails for other reasons.
         """
         try:
-            with open(self.config_path, 'r') as f:
+            with open(self.config_path, "r") as f:
                 config = yaml.safe_load(f) or {}
-            
+
             # Validate config structure
             self._validate_config_structure(config)
-            
+
             return config
         except yaml.YAMLError as e:
             logger.error(f"Failed to parse YAML configuration: {e}")
@@ -192,16 +218,18 @@ class ConfigManager:
             raise
         except (IOError, OSError) as e:
             logger.error(f"Failed to read configuration file: {e}")
-            raise ConfigError(f"Failed to read configuration file '{self.config_path}': {e}")
-    
+            raise ConfigError(
+                f"Failed to read configuration file '{self.config_path}': {e}"
+            )
+
     def _write_config(self, config: Dict[str, Any]) -> None:
         """Write configuration to file atomically using temp file + rename pattern.
-        
+
         This prevents config file corruption if write operation fails partway through.
-        
+
         Args:
             config: Dictionary containing the configuration to write.
-            
+
         Raises:
             ConfigValidationError: If configuration structure is invalid.
             ConfigError: If writing fails for any reason.
@@ -210,25 +238,25 @@ class ConfigManager:
         try:
             # Validate config structure before writing
             self._validate_config_structure(config)
-            
+
             # Ensure parent directory exists
             self.config_path.parent.mkdir(parents=True, exist_ok=True)
-            
+
             # Write to temporary file in same directory for atomic rename
             with tempfile.NamedTemporaryFile(
-                mode='w',
+                mode="w",
                 delete=False,
                 dir=self.config_path.parent,
-                prefix='.config_',
-                suffix='.tmp'
+                prefix=".config_",
+                suffix=".tmp",
             ) as tmp_file:
                 yaml.dump(config, tmp_file, default_flow_style=False)
                 tmp_path = tmp_file.name
-            
+
             # Atomic rename (overwrites existing file on POSIX systems)
             shutil.move(tmp_path, self.config_path)
             logger.debug(f"Configuration written atomically to {self.config_path}")
-            
+
         except ConfigValidationError:
             # Re-raise validation errors as-is
             if tmp_path:
@@ -242,251 +270,340 @@ class ConfigManager:
                 except (IOError, OSError):
                     logger.warning(f"Failed to clean up temporary file: {tmp_path}")
             logger.error(f"Failed to write configuration file: {e}")
-            raise ConfigError(f"Failed to write configuration to '{self.config_path}': {e}")
-    
+            raise ConfigError(
+                f"Failed to write configuration to '{self.config_path}': {e}"
+            )
+
     def _validate_config_structure(self, config: Dict[str, Any]) -> None:
         """Validate the configuration structure with comprehensive checks.
-        
+
         Args:
             config: Configuration dictionary to validate.
-            
+
         Raises:
             ConfigValidationError: If configuration structure is invalid.
         """
         if not isinstance(config, dict):
             raise ConfigValidationError("Configuration must be a dictionary")
-        
+
         # Check top-level structure
         if "defaults" in config and not isinstance(config["defaults"], dict):
             raise ConfigValidationError("'defaults' must be a dictionary")
-        
+
         if "preferences" in config and not isinstance(config["preferences"], dict):
             raise ConfigValidationError("'preferences' must be a dictionary")
-        
+
         # Validate defaults structure
         if "defaults" in config:
             for module_name, module_defaults in config["defaults"].items():
                 if not isinstance(module_name, str):
-                    raise ConfigValidationError(f"Module name must be a string, got {type(module_name).__name__}")
-                
+                    raise ConfigValidationError(
+                        f"Module name must be a string, got {type(module_name).__name__}"
+                    )
+
                 # Validate module name length
                 self._validate_string_length(module_name, "Module name", max_length=100)
-                
+
                 if not isinstance(module_defaults, dict):
-                    raise ConfigValidationError(f"Defaults for module '{module_name}' must be a dictionary")
-                
+                    raise ConfigValidationError(
+                        f"Defaults for module '{module_name}' must be a dictionary"
+                    )
+
                 # Validate number of defaults per module
                 self._validate_list_length(
-                    list(module_defaults.keys()), 
-                    f"Defaults for module '{module_name}'"
+                    list(module_defaults.keys()), f"Defaults for module '{module_name}'"
                 )
-                
+
                 # Validate variable names are valid Python identifiers
                 for var_name, var_value in module_defaults.items():
                     if not isinstance(var_name, str):
-                        raise ConfigValidationError(f"Variable name must be a string, got {type(var_name).__name__}")
-                    
+                        raise ConfigValidationError(
+                            f"Variable name must be a string, got {type(var_name).__name__}"
+                        )
+
                     # Validate variable name length
-                    self._validate_string_length(var_name, "Variable name", max_length=100)
-                    
+                    self._validate_string_length(
+                        var_name, "Variable name", max_length=100
+                    )
+
                     if not VALID_IDENTIFIER_PATTERN.match(var_name):
                         raise ConfigValidationError(
                             f"Invalid variable name '{var_name}' in module '{module_name}'. "
                             f"Variable names must be valid Python identifiers (letters, numbers, underscores, "
                             f"cannot start with a number)"
                         )
-                    
+
                     # Validate variable value types and lengths
                     if isinstance(var_value, str):
                         self._validate_string_length(
-                            var_value, 
-                            f"Value for '{module_name}.{var_name}'"
+                            var_value, f"Value for '{module_name}.{var_name}'"
                         )
                     elif isinstance(var_value, list):
                         self._validate_list_length(
-                            var_value, 
-                            f"Value for '{module_name}.{var_name}'"
+                            var_value, f"Value for '{module_name}.{var_name}'"
                         )
-                    elif var_value is not None and not isinstance(var_value, (bool, int, float)):
+                    elif var_value is not None and not isinstance(
+                        var_value, (bool, int, float)
+                    ):
                         raise ConfigValidationError(
                             f"Invalid value type for '{module_name}.{var_name}': "
                             f"must be string, number, boolean, list, or null (got {type(var_value).__name__})"
                         )
-        
+
         # Validate preferences structure and types
         if "preferences" in config:
             preferences = config["preferences"]
-            
+
             # Validate known preference types
             if "editor" in preferences:
                 if not isinstance(preferences["editor"], str):
                     raise ConfigValidationError("Preference 'editor' must be a string")
-                self._validate_string_length(preferences["editor"], "Preference 'editor'", max_length=100)
-            
+                self._validate_string_length(
+                    preferences["editor"], "Preference 'editor'", max_length=100
+                )
+
             if "output_dir" in preferences:
                 output_dir = preferences["output_dir"]
                 if output_dir is not None:
                     if not isinstance(output_dir, str):
-                        raise ConfigValidationError("Preference 'output_dir' must be a string or null")
+                        raise ConfigValidationError(
+                            "Preference 'output_dir' must be a string or null"
+                        )
                     self._validate_path_string(output_dir, "Preference 'output_dir'")
-            
+
             if "library_paths" in preferences:
                 if not isinstance(preferences["library_paths"], list):
-                    raise ConfigValidationError("Preference 'library_paths' must be a list")
-                
-                self._validate_list_length(preferences["library_paths"], "Preference 'library_paths'")
-                
+                    raise ConfigValidationError(
+                        "Preference 'library_paths' must be a list"
+                    )
+
+                self._validate_list_length(
+                    preferences["library_paths"], "Preference 'library_paths'"
+                )
+
                 for i, path in enumerate(preferences["library_paths"]):
                     if not isinstance(path, str):
-                        raise ConfigValidationError(f"Library path must be a string, got {type(path).__name__}")
+                        raise ConfigValidationError(
+                            f"Library path must be a string, got {type(path).__name__}"
+                        )
                     self._validate_path_string(path, f"Library path at index {i}")
-        
+
         # Validate libraries structure
         if "libraries" in config:
             libraries = config["libraries"]
-            
+
             if not isinstance(libraries, list):
                 raise ConfigValidationError("'libraries' must be a list")
-            
+
             self._validate_list_length(libraries, "Libraries list")
-            
+
             for i, library in enumerate(libraries):
                 if not isinstance(library, dict):
-                    raise ConfigValidationError(f"Library at index {i} must be a dictionary")
-                
-                # Validate required fields
-                required_fields = ["name", "url", "directory"]
-                for field in required_fields:
-                    if field not in library:
-                        raise ConfigValidationError(f"Library at index {i} missing required field '{field}'")
-                    
-                    if not isinstance(library[field], str):
-                        raise ConfigValidationError(f"Library '{field}' at index {i} must be a string")
-                    
-                    self._validate_string_length(library[field], f"Library '{field}' at index {i}", max_length=500)
-                
-                # Validate optional branch field
-                if "branch" in library:
-                    if not isinstance(library["branch"], str):
-                        raise ConfigValidationError(f"Library 'branch' at index {i} must be a string")
-                    self._validate_string_length(library["branch"], f"Library 'branch' at index {i}", max_length=200)
-                
-                # Validate optional enabled field
+                    raise ConfigValidationError(
+                        f"Library at index {i} must be a dictionary"
+                    )
+
+                # Validate name field (required for all library types)
+                if "name" not in library:
+                    raise ConfigValidationError(
+                        f"Library at index {i} missing required field 'name'"
+                    )
+                if not isinstance(library["name"], str):
+                    raise ConfigValidationError(
+                        f"Library 'name' at index {i} must be a string"
+                    )
+                self._validate_string_length(
+                    library["name"], f"Library 'name' at index {i}", max_length=500
+                )
+
+                # Validate type field (default to "git" for backward compatibility)
+                lib_type = library.get("type", "git")
+                if lib_type not in ("git", "static"):
+                    raise ConfigValidationError(
+                        f"Library type at index {i} must be 'git' or 'static', got '{lib_type}'"
+                    )
+
+                # Type-specific validation
+                if lib_type == "git":
+                    # Git libraries require: url, directory
+                    required_fields = ["url", "directory"]
+                    for field in required_fields:
+                        if field not in library:
+                            raise ConfigValidationError(
+                                f"Git library at index {i} missing required field '{field}'"
+                            )
+
+                        if not isinstance(library[field], str):
+                            raise ConfigValidationError(
+                                f"Library '{field}' at index {i} must be a string"
+                            )
+
+                        self._validate_string_length(
+                            library[field],
+                            f"Library '{field}' at index {i}",
+                            max_length=500,
+                        )
+
+                    # Validate optional branch field
+                    if "branch" in library:
+                        if not isinstance(library["branch"], str):
+                            raise ConfigValidationError(
+                                f"Library 'branch' at index {i} must be a string"
+                            )
+                        self._validate_string_length(
+                            library["branch"],
+                            f"Library 'branch' at index {i}",
+                            max_length=200,
+                        )
+
+                elif lib_type == "static":
+                    # Static libraries require: path
+                    if "path" not in library:
+                        raise ConfigValidationError(
+                            f"Static library at index {i} missing required field 'path'"
+                        )
+
+                    if not isinstance(library["path"], str):
+                        raise ConfigValidationError(
+                            f"Library 'path' at index {i} must be a string"
+                        )
+
+                    self._validate_path_string(
+                        library["path"], f"Library 'path' at index {i}"
+                    )
+
+                # Validate optional enabled field (applies to all types)
                 if "enabled" in library and not isinstance(library["enabled"], bool):
-                    raise ConfigValidationError(f"Library 'enabled' at index {i} must be a boolean")
-    
+                    raise ConfigValidationError(
+                        f"Library 'enabled' at index {i} must be a boolean"
+                    )
+
     def get_config_path(self) -> Path:
-        """Get the path to the configuration file.
-        
+        """Get the path to the configuration file being used.
+
         Returns:
-            Path to the configuration file.
+            Path to the configuration file (global or local).
         """
         return self.config_path
 
+    def is_using_local_config(self) -> bool:
+        """Check if a local configuration file is being used.
+
+        Returns:
+            True if using local config, False if using global config.
+        """
+        return self.is_local
+
     def get_defaults(self, module_name: str) -> Dict[str, Any]:
         """Get default variable values for a module.
-        
+
         Returns defaults in a flat format:
         {
             "var_name": "value",
             "var2_name": "value2"
         }
-        
+
         Args:
             module_name: Name of the module
-            
+
         Returns:
             Dictionary of default values (flat key-value pairs)
         """
         config = self._read_config()
         defaults = config.get("defaults", {})
         return defaults.get(module_name, {})
-    
+
     def set_defaults(self, module_name: str, defaults: Dict[str, Any]) -> None:
         """Set default variable values for a module with comprehensive validation.
-        
+
         Args:
             module_name: Name of the module
             defaults: Dictionary of defaults (flat key-value pairs):
                       {"var_name": "value", "var2_name": "value2"}
-                      
+
         Raises:
             ConfigValidationError: If module name or variable names are invalid.
         """
         # Validate module name
         if not isinstance(module_name, str) or not module_name:
             raise ConfigValidationError("Module name must be a non-empty string")
-        
+
         self._validate_string_length(module_name, "Module name", max_length=100)
-        
+
         # Validate defaults dictionary
         if not isinstance(defaults, dict):
             raise ConfigValidationError("Defaults must be a dictionary")
-        
+
         # Validate number of defaults
         self._validate_list_length(list(defaults.keys()), "Defaults dictionary")
-        
+
         # Validate variable names and values
         for var_name, var_value in defaults.items():
             if not isinstance(var_name, str):
-                raise ConfigValidationError(f"Variable name must be a string, got {type(var_name).__name__}")
-            
+                raise ConfigValidationError(
+                    f"Variable name must be a string, got {type(var_name).__name__}"
+                )
+
             self._validate_string_length(var_name, "Variable name", max_length=100)
-            
+
             if not VALID_IDENTIFIER_PATTERN.match(var_name):
                 raise ConfigValidationError(
                     f"Invalid variable name '{var_name}'. Variable names must be valid Python identifiers "
                     f"(letters, numbers, underscores, cannot start with a number)"
                 )
-            
+
             # Validate value types and lengths
             if isinstance(var_value, str):
                 self._validate_string_length(var_value, f"Value for '{var_name}'")
             elif isinstance(var_value, list):
                 self._validate_list_length(var_value, f"Value for '{var_name}'")
-            elif var_value is not None and not isinstance(var_value, (bool, int, float)):
+            elif var_value is not None and not isinstance(
+                var_value, (bool, int, float)
+            ):
                 raise ConfigValidationError(
                     f"Invalid value type for '{var_name}': "
                     f"must be string, number, boolean, list, or null (got {type(var_value).__name__})"
                 )
-        
+
         config = self._read_config()
-        
+
         if "defaults" not in config:
             config["defaults"] = {}
-        
+
         config["defaults"][module_name] = defaults
         self._write_config(config)
         logger.info(f"Updated defaults for module '{module_name}'")
-    
+
     def set_default_value(self, module_name: str, var_name: str, value: Any) -> None:
         """Set a single default variable value with comprehensive validation.
-        
+
         Args:
             module_name: Name of the module
             var_name: Name of the variable
             value: Default value to set
-            
+
         Raises:
             ConfigValidationError: If module name or variable name is invalid.
         """
         # Validate inputs
         if not isinstance(module_name, str) or not module_name:
             raise ConfigValidationError("Module name must be a non-empty string")
-        
+
         self._validate_string_length(module_name, "Module name", max_length=100)
-        
+
         if not isinstance(var_name, str):
-            raise ConfigValidationError(f"Variable name must be a string, got {type(var_name).__name__}")
-        
+            raise ConfigValidationError(
+                f"Variable name must be a string, got {type(var_name).__name__}"
+            )
+
         self._validate_string_length(var_name, "Variable name", max_length=100)
-        
+
         if not VALID_IDENTIFIER_PATTERN.match(var_name):
             raise ConfigValidationError(
                 f"Invalid variable name '{var_name}'. Variable names must be valid Python identifiers "
                 f"(letters, numbers, underscores, cannot start with a number)"
             )
-        
+
         # Validate value type and length
         if isinstance(value, str):
             self._validate_string_length(value, f"Value for '{var_name}'")
@@ -497,33 +614,33 @@ class ConfigManager:
                 f"Invalid value type for '{var_name}': "
                 f"must be string, number, boolean, list, or null (got {type(value).__name__})"
             )
-        
+
         defaults = self.get_defaults(module_name)
         defaults[var_name] = value
         self.set_defaults(module_name, defaults)
         logger.info(f"Set default for '{module_name}.{var_name}' = '{value}'")
-    
+
     def get_default_value(self, module_name: str, var_name: str) -> Optional[Any]:
         """Get a single default variable value.
-        
+
         Args:
             module_name: Name of the module
             var_name: Name of the variable
-            
+
         Returns:
             Default value or None if not set
         """
         defaults = self.get_defaults(module_name)
         return defaults.get(var_name)
-    
+
     def clear_defaults(self, module_name: str) -> None:
         """Clear all defaults for a module.
-        
+
         Args:
             module_name: Name of the module
         """
         config = self._read_config()
-        
+
         if "defaults" in config and module_name in config["defaults"]:
             del config["defaults"][module_name]
             self._write_config(config)
@@ -531,96 +648,100 @@ class ConfigManager:
 
     def get_preference(self, key: str) -> Optional[Any]:
         """Get a user preference value.
-        
+
         Args:
             key: Preference key (e.g., 'editor', 'output_dir', 'library_paths')
-            
+
         Returns:
             Preference value or None if not set
         """
         config = self._read_config()
         preferences = config.get("preferences", {})
         return preferences.get(key)
-    
+
     def set_preference(self, key: str, value: Any) -> None:
         """Set a user preference value with comprehensive validation.
-        
+
         Args:
             key: Preference key
             value: Preference value
-            
+
         Raises:
             ConfigValidationError: If key or value is invalid for known preference types.
         """
         # Validate key
         if not isinstance(key, str) or not key:
             raise ConfigValidationError("Preference key must be a non-empty string")
-        
+
         self._validate_string_length(key, "Preference key", max_length=100)
-        
+
         # Validate known preference types
         if key == "editor":
             if not isinstance(value, str):
                 raise ConfigValidationError("Preference 'editor' must be a string")
             self._validate_string_length(value, "Preference 'editor'", max_length=100)
-        
+
         elif key == "output_dir":
             if value is not None:
                 if not isinstance(value, str):
-                    raise ConfigValidationError("Preference 'output_dir' must be a string or null")
+                    raise ConfigValidationError(
+                        "Preference 'output_dir' must be a string or null"
+                    )
                 self._validate_path_string(value, "Preference 'output_dir'")
-        
+
         elif key == "library_paths":
             if not isinstance(value, list):
                 raise ConfigValidationError("Preference 'library_paths' must be a list")
-            
+
             self._validate_list_length(value, "Preference 'library_paths'")
-            
+
             for i, path in enumerate(value):
                 if not isinstance(path, str):
-                    raise ConfigValidationError(f"Library path must be a string, got {type(path).__name__}")
+                    raise ConfigValidationError(
+                        f"Library path must be a string, got {type(path).__name__}"
+                    )
                 self._validate_path_string(path, f"Library path at index {i}")
-        
+
         # For unknown preference keys, apply basic validation
         else:
             if isinstance(value, str):
                 self._validate_string_length(value, f"Preference '{key}'")
             elif isinstance(value, list):
                 self._validate_list_length(value, f"Preference '{key}'")
-        
+
         config = self._read_config()
-        
+
         if "preferences" not in config:
             config["preferences"] = {}
-        
+
         config["preferences"][key] = value
         self._write_config(config)
         logger.info(f"Set preference '{key}' = '{value}'")
-    
+
     def get_all_preferences(self) -> Dict[str, Any]:
         """Get all user preferences.
-        
+
         Returns:
             Dictionary of all preferences
         """
         config = self._read_config()
         return config.get("preferences", {})
-    
+
     def get_libraries(self) -> list[Dict[str, Any]]:
         """Get all configured libraries.
-        
+
         Returns:
             List of library configurations
         """
         config = self._read_config()
         return config.get("libraries", [])
-    
+
     def get_library_by_name(self, name: str) -> Optional[Dict[str, Any]]:
         """Get a specific library by name.
-        
+
         Args:
             name: Name of the library
-            
+
         Returns:
             Library configuration dictionary or None if not found
         """
@@ -629,143 +750,203 @@ class ConfigManager:
             if library.get("name") == name:
                 return library
         return None
-    
-    def add_library(self, name: str, url: str, directory: str = "library", branch: str = "main", enabled: bool = True) -> None:
+
+    def add_library(
+        self,
+        name: str,
+        library_type: str = "git",
+        url: Optional[str] = None,
+        directory: Optional[str] = None,
+        branch: str = "main",
+        path: Optional[str] = None,
+        enabled: bool = True,
+    ) -> None:
         """Add a new library to the configuration.
-        
+
         Args:
             name: Unique name for the library
-            url: Git repository URL
-            directory: Directory within the repo containing templates
-            branch: Git branch to use
+            library_type: Type of library ("git" or "static")
+            url: Git repository URL (required for git type)
+            directory: Directory within repo (required for git type)
+            branch: Git branch (for git type)
+            path: Local path to templates (required for static type)
             enabled: Whether the library is enabled
-            
+
         Raises:
             ConfigValidationError: If library with the same name already exists or validation fails
         """
-        # Validate inputs
+        # Validate name
         if not isinstance(name, str) or not name:
             raise ConfigValidationError("Library name must be a non-empty string")
-        
+
         self._validate_string_length(name, "Library name", max_length=100)
-        
-        if not isinstance(url, str) or not url:
-            raise ConfigValidationError("Library URL must be a non-empty string")
-        
-        self._validate_string_length(url, "Library URL", max_length=500)
-        
-        if not isinstance(directory, str) or not directory:
-            raise ConfigValidationError("Library directory must be a non-empty string")
-        
-        self._validate_string_length(directory, "Library directory", max_length=200)
-        
-        if not isinstance(branch, str) or not branch:
-            raise ConfigValidationError("Library branch must be a non-empty string")
-        
-        self._validate_string_length(branch, "Library branch", max_length=200)
-        
+
+        # Validate type
+        if library_type not in ("git", "static"):
+            raise ConfigValidationError(
+                f"Library type must be 'git' or 'static', got '{library_type}'"
+            )
+
         # Check if library already exists
         if self.get_library_by_name(name):
             raise ConfigValidationError(f"Library '{name}' already exists")
-        
+
+        # Type-specific validation and config building
+        if library_type == "git":
+            if not url:
+                raise ConfigValidationError("Git libraries require 'url' parameter")
+            if not directory:
+                raise ConfigValidationError(
+                    "Git libraries require 'directory' parameter"
+                )
+
+            # Validate git-specific fields
+            if not isinstance(url, str) or not url:
+                raise ConfigValidationError("Library URL must be a non-empty string")
+            self._validate_string_length(url, "Library URL", max_length=500)
+
+            if not isinstance(directory, str) or not directory:
+                raise ConfigValidationError(
+                    "Library directory must be a non-empty string"
+                )
+            self._validate_string_length(directory, "Library directory", max_length=200)
+
+            if not isinstance(branch, str) or not branch:
+                raise ConfigValidationError("Library branch must be a non-empty string")
+            self._validate_string_length(branch, "Library branch", max_length=200)
+
+            library_config = {
+                "name": name,
+                "type": "git",
+                "url": url,
+                "branch": branch,
+                "directory": directory,
+                "enabled": enabled,
+            }
+
+        else:  # static
+            if not path:
+                raise ConfigValidationError("Static libraries require 'path' parameter")
+
+            # Validate static-specific fields
+            if not isinstance(path, str) or not path:
+                raise ConfigValidationError("Library path must be a non-empty string")
+            self._validate_path_string(path, "Library path")
+
+            # For backward compatibility with older CLI versions,
+            # add dummy values for git-specific fields
+            library_config = {
+                "name": name,
+                "type": "static",
+                "url": "",  # Empty string for backward compatibility
+                "branch": "main",  # Default value for backward compatibility
+                "directory": ".",  # Default value for backward compatibility
+                "path": path,
+                "enabled": enabled,
+            }
+
         config = self._read_config()
-        
+
         if "libraries" not in config:
             config["libraries"] = []
-        
-        config["libraries"].append({
-            "name": name,
-            "url": url,
-            "branch": branch,
-            "directory": directory,
-            "enabled": enabled
-        })
-        
+
+        config["libraries"].append(library_config)
+
         self._write_config(config)
-        logger.info(f"Added library '{name}'")
-    
+        logger.info(f"Added {library_type} library '{name}'")
+
     def remove_library(self, name: str) -> None:
         """Remove a library from the configuration.
-        
+
         Args:
             name: Name of the library to remove
-            
+
         Raises:
             ConfigError: If library is not found
         """
         config = self._read_config()
         libraries = config.get("libraries", [])
-        
+
         # Find and remove the library
         new_libraries = [lib for lib in libraries if lib.get("name") != name]
-        
+
         if len(new_libraries) == len(libraries):
             raise ConfigError(f"Library '{name}' not found")
-        
+
         config["libraries"] = new_libraries
         self._write_config(config)
         logger.info(f"Removed library '{name}'")
-    
+
     def update_library(self, name: str, **kwargs: Any) -> None:
         """Update a library's configuration.
-        
+
         Args:
             name: Name of the library to update
             **kwargs: Fields to update (url, branch, directory, enabled)
-            
+
         Raises:
             ConfigError: If library is not found
             ConfigValidationError: If validation fails
         """
         config = self._read_config()
         libraries = config.get("libraries", [])
-        
+
         # Find the library
         library_found = False
         for library in libraries:
             if library.get("name") == name:
                 library_found = True
-                
+
                 # Update allowed fields
                 if "url" in kwargs:
                     url = kwargs["url"]
                     if not isinstance(url, str) or not url:
-                        raise ConfigValidationError("Library URL must be a non-empty string")
+                        raise ConfigValidationError(
+                            "Library URL must be a non-empty string"
+                        )
                     self._validate_string_length(url, "Library URL", max_length=500)
                     library["url"] = url
-                
+
                 if "branch" in kwargs:
                     branch = kwargs["branch"]
                     if not isinstance(branch, str) or not branch:
-                        raise ConfigValidationError("Library branch must be a non-empty string")
-                    self._validate_string_length(branch, "Library branch", max_length=200)
+                        raise ConfigValidationError(
+                            "Library branch must be a non-empty string"
+                        )
+                    self._validate_string_length(
+                        branch, "Library branch", max_length=200
+                    )
                     library["branch"] = branch
-                
+
                 if "directory" in kwargs:
                     directory = kwargs["directory"]
                     if not isinstance(directory, str) or not directory:
-                        raise ConfigValidationError("Library directory must be a non-empty string")
-                    self._validate_string_length(directory, "Library directory", max_length=200)
+                        raise ConfigValidationError(
+                            "Library directory must be a non-empty string"
+                        )
+                    self._validate_string_length(
+                        directory, "Library directory", max_length=200
+                    )
                     library["directory"] = directory
-                
+
                 if "enabled" in kwargs:
                     enabled = kwargs["enabled"]
                     if not isinstance(enabled, bool):
                         raise ConfigValidationError("Library enabled must be a boolean")
                     library["enabled"] = enabled
-                
+
                 break
-        
+
         if not library_found:
             raise ConfigError(f"Library '{name}' not found")
-        
+
         config["libraries"] = libraries
         self._write_config(config)
         logger.info(f"Updated library '{name}'")
-    
+
     def get_libraries_path(self) -> Path:
         """Get the path to the libraries directory.
-        
+
         Returns:
             Path to the libraries directory (same directory as config file)
         """

Plik diff jest za duży
+ 408 - 209
cli/core/display.py


+ 59 - 11
cli/core/exceptions.py

@@ -9,27 +9,31 @@ from typing import Optional, List, Dict
 
 class BoilerplatesError(Exception):
     """Base exception for all boilerplates CLI errors."""
+
     pass
 
 
 class ConfigError(BoilerplatesError):
     """Raised when configuration operations fail."""
+
     pass
 
 
 class ConfigValidationError(ConfigError):
     """Raised when configuration validation fails."""
+
     pass
 
 
 class TemplateError(BoilerplatesError):
     """Base exception for template-related errors."""
+
     pass
 
 
 class TemplateNotFoundError(TemplateError):
     """Raised when a template cannot be found."""
-    
+
     def __init__(self, template_id: str, module_name: Optional[str] = None):
         self.template_id = template_id
         self.module_name = module_name
@@ -39,14 +43,27 @@ class TemplateNotFoundError(TemplateError):
         super().__init__(msg)
 
 
+class DuplicateTemplateError(TemplateError):
+    """Raised when duplicate template IDs are found within the same library."""
+
+    def __init__(self, template_id: str, library_name: str):
+        self.template_id = template_id
+        self.library_name = library_name
+        super().__init__(
+            f"Duplicate template ID '{template_id}' found in library '{library_name}'. "
+            f"Each template within a library must have a unique ID."
+        )
+
+
 class TemplateLoadError(TemplateError):
     """Raised when a template fails to load."""
+
     pass
 
 
 class TemplateSyntaxError(TemplateError):
     """Raised when a Jinja2 template has syntax errors."""
-    
+
     def __init__(self, template_id: str, errors: List[str]):
         self.template_id = template_id
         self.errors = errors
@@ -56,12 +73,37 @@ class TemplateSyntaxError(TemplateError):
 
 class TemplateValidationError(TemplateError):
     """Raised when template validation fails."""
+
     pass
 
 
+class IncompatibleSchemaVersionError(TemplateError):
+    """Raised when a template uses a schema version not supported by the module."""
+
+    def __init__(
+        self,
+        template_id: str,
+        template_schema: str,
+        module_schema: str,
+        module_name: str,
+    ):
+        self.template_id = template_id
+        self.template_schema = template_schema
+        self.module_schema = module_schema
+        self.module_name = module_name
+        msg = (
+            f"Template '{template_id}' uses schema version {template_schema}, "
+            f"but module '{module_name}' only supports up to version {module_schema}.\n\n"
+            f"This template requires features not available in your current CLI version.\n"
+            f"Please upgrade the boilerplates CLI.\n\n"
+            f"Run: pip install --upgrade boilerplates"
+        )
+        super().__init__(msg)
+
+
 class TemplateRenderError(TemplateError):
     """Raised when template rendering fails."""
-    
+
     def __init__(
         self,
         message: str,
@@ -71,7 +113,7 @@ class TemplateRenderError(TemplateError):
         context_lines: Optional[List[str]] = None,
         variable_context: Optional[Dict[str, str]] = None,
         suggestions: Optional[List[str]] = None,
-        original_error: Optional[Exception] = None
+        original_error: Optional[Exception] = None,
     ):
         self.file_path = file_path
         self.line_number = line_number
@@ -80,10 +122,10 @@ class TemplateRenderError(TemplateError):
         self.variable_context = variable_context or {}
         self.suggestions = suggestions or []
         self.original_error = original_error
-        
+
         # Build enhanced error message
         parts = [message]
-        
+
         if file_path:
             location = f"File: {file_path}"
             if line_number:
@@ -91,18 +133,19 @@ class TemplateRenderError(TemplateError):
                 if column:
                     location += f", Column: {column}"
             parts.append(location)
-        
+
         super().__init__("\n".join(parts))
 
 
 class VariableError(BoilerplatesError):
     """Base exception for variable-related errors."""
+
     pass
 
 
 class VariableValidationError(VariableError):
     """Raised when variable validation fails."""
-    
+
     def __init__(self, variable_name: str, message: str):
         self.variable_name = variable_name
         msg = f"Validation error for variable '{variable_name}': {message}"
@@ -111,7 +154,7 @@ class VariableValidationError(VariableError):
 
 class VariableTypeError(VariableError):
     """Raised when a variable has an incorrect type."""
-    
+
     def __init__(self, variable_name: str, expected_type: str, actual_type: str):
         self.variable_name = variable_name
         self.expected_type = expected_type
@@ -122,17 +165,19 @@ class VariableTypeError(VariableError):
 
 class LibraryError(BoilerplatesError):
     """Raised when library operations fail."""
+
     pass
 
 
 class ModuleError(BoilerplatesError):
     """Raised when module operations fail."""
+
     pass
 
 
 class ModuleNotFoundError(ModuleError):
     """Raised when a module cannot be found."""
-    
+
     def __init__(self, module_name: str):
         self.module_name = module_name
         msg = f"Module '{module_name}' not found"
@@ -141,22 +186,25 @@ class ModuleNotFoundError(ModuleError):
 
 class ModuleLoadError(ModuleError):
     """Raised when a module fails to load."""
+
     pass
 
 
 class FileOperationError(BoilerplatesError):
     """Raised when file operations fail."""
+
     pass
 
 
 class RenderError(BoilerplatesError):
     """Raised when rendering operations fail."""
+
     pass
 
 
 class YAMLParseError(BoilerplatesError):
     """Raised when YAML parsing fails."""
-    
+
     def __init__(self, file_path: str, original_error: Exception):
         self.file_path = file_path
         self.original_error = original_error

+ 365 - 223
cli/core/library.py

@@ -5,234 +5,376 @@ import logging
 from typing import Optional
 import yaml
 
-from .exceptions import LibraryError, TemplateNotFoundError, YAMLParseError
+from .exceptions import LibraryError, TemplateNotFoundError, DuplicateTemplateError
 
 logger = logging.getLogger(__name__)
 
 
 class Library:
-  """Represents a single library with a specific path."""
-  
-  def __init__(self, name: str, path: Path, priority: int = 0) -> None:
-    """Initialize a library instance.
-    
-    Args:
-      name: Display name for the library
-      path: Path to the library directory
-      priority: Priority for library lookup (higher = checked first)
-    """
-    self.name = name
-    self.path = path
-    self.priority = priority  # Higher priority = checked first
-  
-  def _is_template_draft(self, template_path: Path) -> bool:
-    """Check if a template is marked as draft."""
-    # Find the template file
-    for filename in ("template.yaml", "template.yml"):
-      template_file = template_path / filename
-      if template_file.exists():
-        break
-    else:
-      return False
-    
-    try:
-      with open(template_file, "r", encoding="utf-8") as f:
-        docs = [doc for doc in yaml.safe_load_all(f) if doc]
-        return docs[0].get("metadata", {}).get("draft", False) if docs else False
-    except (yaml.YAMLError, IOError, OSError) as e:
-      logger.warning(f"Error checking draft status for {template_path}: {e}")
-      return False
-
-  def find_by_id(self, module_name: str, template_id: str) -> tuple[Path, str]:
-    """Find a template by its ID in this library.
-    
-    Args:
-        module_name: The module name (e.g., 'compose', 'terraform')
-        template_id: The template ID to find
-    
-    Returns:
-        Path to the template directory if found
-        
-    Raises:
-        FileNotFoundError: If the template ID is not found in this library or is marked as draft
-    """
-    logger.debug(f"Looking for template '{template_id}' in module '{module_name}' in library '{self.name}'")
-    
-    # Build the path to the specific template directory
-    template_path = self.path / module_name / template_id
-    
-    # Check if template directory exists with a template file
-    has_template = template_path.is_dir() and any(
-      (template_path / f).exists() for f in ("template.yaml", "template.yml")
-    )
-    
-    if not has_template or self._is_template_draft(template_path):
-      raise TemplateNotFoundError(template_id, module_name)
-    
-    logger.debug(f"Found template '{template_id}' at: {template_path}")
-    return template_path, self.name
-
-
-  def find(self, module_name: str, sort_results: bool = False) -> list[tuple[Path, str]]:
-    """Find templates in this library for a specific module.
-    
-    Excludes templates marked as draft.
-    
-    Args:
-        module_name: The module name (e.g., 'compose', 'terraform')
-        sort_results: Whether to return results sorted alphabetically
-    
-    Returns:
-        List of Path objects representing template directories (excluding drafts)
-        
-    Raises:
-        FileNotFoundError: If the module directory is not found in this library
-    """
-    logger.debug(f"Looking for templates in module '{module_name}' in library '{self.name}'")
-    
-    # Build the path to the module directory
-    module_path = self.path / module_name
-    
-    # Check if the module directory exists
-    if not module_path.is_dir():
-      raise LibraryError(f"Module '{module_name}' not found in library '{self.name}'")
-    
-    # Get non-draft templates
-    template_dirs = []
-    try:
-      for item in module_path.iterdir():
-        has_template = item.is_dir() and any((item / f).exists() for f in ("template.yaml", "template.yml"))
-        if has_template and not self._is_template_draft(item):
-          template_dirs.append((item, self.name))
-        elif has_template:
-          logger.debug(f"Skipping draft template: {item.name}")
-    except PermissionError as e:
-      raise LibraryError(f"Permission denied accessing module '{module_name}' in library '{self.name}': {e}")
-    
-    # Sort if requested
-    if sort_results:
-      template_dirs.sort(key=lambda x: x[0].name.lower())
-    
-    logger.debug(f"Found {len(template_dirs)} templates in module '{module_name}'")
-    return template_dirs
+    """Represents a single library with a specific path."""
+
+    def __init__(
+        self, name: str, path: Path, priority: int = 0, library_type: str = "git"
+    ) -> None:
+        """Initialize a library instance.
+
+        Args:
+          name: Display name for the library
+          path: Path to the library directory
+          priority: Priority for library lookup (higher = checked first)
+          library_type: Type of library ("git" or "static")
+        """
+        if library_type not in ("git", "static"):
+            raise ValueError(
+                f"Invalid library type: {library_type}. Must be 'git' or 'static'."
+            )
+
+        self.name = name
+        self.path = path
+        self.priority = priority  # Higher priority = checked first
+        self.library_type = library_type
+
+    def _is_template_draft(self, template_path: Path) -> bool:
+        """Check if a template is marked as draft."""
+        # Find the template file
+        for filename in ("template.yaml", "template.yml"):
+            template_file = template_path / filename
+            if template_file.exists():
+                break
+        else:
+            return False
+
+        try:
+            with open(template_file, "r", encoding="utf-8") as f:
+                docs = [doc for doc in yaml.safe_load_all(f) if doc]
+                return (
+                    docs[0].get("metadata", {}).get("draft", False) if docs else False
+                )
+        except (yaml.YAMLError, IOError, OSError) as e:
+            logger.warning(f"Error checking draft status for {template_path}: {e}")
+            return False
+
+    def find_by_id(self, module_name: str, template_id: str) -> tuple[Path, str]:
+        """Find a template by its ID in this library.
+
+        Args:
+            module_name: The module name (e.g., 'compose', 'terraform')
+            template_id: The template ID to find
+
+        Returns:
+            Path to the template directory if found
+
+        Raises:
+            FileNotFoundError: If the template ID is not found in this library or is marked as draft
+        """
+        logger.debug(
+            f"Looking for template '{template_id}' in module '{module_name}' in library '{self.name}'"
+        )
+
+        # Build the path to the specific template directory
+        template_path = self.path / module_name / template_id
+
+        # Check if template directory exists with a template file
+        has_template = template_path.is_dir() and any(
+            (template_path / f).exists() for f in ("template.yaml", "template.yml")
+        )
+
+        if not has_template or self._is_template_draft(template_path):
+            raise TemplateNotFoundError(template_id, module_name)
+
+        logger.debug(f"Found template '{template_id}' at: {template_path}")
+        return template_path, self.name
+
+    def find(
+        self, module_name: str, sort_results: bool = False
+    ) -> list[tuple[Path, str]]:
+        """Find templates in this library for a specific module.
+
+        Excludes templates marked as draft.
+
+        Args:
+            module_name: The module name (e.g., 'compose', 'terraform')
+            sort_results: Whether to return results sorted alphabetically
+
+        Returns:
+            List of Path objects representing template directories (excluding drafts)
+
+        Raises:
+            FileNotFoundError: If the module directory is not found in this library
+        """
+        logger.debug(
+            f"Looking for templates in module '{module_name}' in library '{self.name}'"
+        )
+
+        # Build the path to the module directory
+        module_path = self.path / module_name
+
+        # Check if the module directory exists
+        if not module_path.is_dir():
+            raise LibraryError(
+                f"Module '{module_name}' not found in library '{self.name}'"
+            )
+
+        # Track seen IDs to detect duplicates within this library
+        seen_ids = {}
+        template_dirs = []
+        try:
+            for item in module_path.iterdir():
+                has_template = item.is_dir() and any(
+                    (item / f).exists() for f in ("template.yaml", "template.yml")
+                )
+                if has_template and not self._is_template_draft(item):
+                    template_id = item.name
+
+                    # Check for duplicate within same library
+                    if template_id in seen_ids:
+                        raise DuplicateTemplateError(template_id, self.name)
+
+                    seen_ids[template_id] = True
+                    template_dirs.append((item, self.name))
+                elif has_template:
+                    logger.debug(f"Skipping draft template: {item.name}")
+        except PermissionError as e:
+            raise LibraryError(
+                f"Permission denied accessing module '{module_name}' in library '{self.name}': {e}"
+            )
+
+        # Sort if requested
+        if sort_results:
+            template_dirs.sort(key=lambda x: x[0].name.lower())
+
+        logger.debug(f"Found {len(template_dirs)} templates in module '{module_name}'")
+        return template_dirs
+
 
 class LibraryManager:
-  """Manages multiple libraries and provides methods to find templates."""
-  
-  def __init__(self) -> None:
-    """Initialize LibraryManager with git-based libraries from config."""
-    from .config import ConfigManager
-    
-    self.config = ConfigManager()
-    self.libraries = self._load_libraries_from_config()
-  
-  def _load_libraries_from_config(self) -> list[Library]:
-    """Load libraries from configuration.
-    
-    Returns:
-        List of Library instances
-    """
-    libraries = []
-    libraries_path = self.config.get_libraries_path()
-    
-    # Get library configurations from config
-    library_configs = self.config.get_libraries()
-    
-    for i, lib_config in enumerate(library_configs):
-      # Skip disabled libraries
-      if not lib_config.get("enabled", True):
-        logger.debug(f"Skipping disabled library: {lib_config.get('name')}")
-        continue
-      
-      name = lib_config.get("name")
-      directory = lib_config.get("directory", ".")
-      
-      # Build path to library: ~/.config/boilerplates/libraries/{name}/{directory}/
-      # For sparse-checkout, files remain in the specified directory
-      library_base = libraries_path / name
-      if directory and directory != ".":
-        library_path = library_base / directory
-      else:
-        library_path = library_base
-      
-      # Check if library path exists
-      if not library_path.exists():
-        logger.warning(
-          f"Library '{name}' not found at {library_path}. "
-          f"Run 'repo update' to sync libraries."
+    """Manages multiple libraries and provides methods to find templates."""
+
+    def __init__(self) -> None:
+        """Initialize LibraryManager with git-based libraries from config."""
+        from .config import ConfigManager
+
+        self.config = ConfigManager()
+        self.libraries = self._load_libraries_from_config()
+
+    def _load_libraries_from_config(self) -> list[Library]:
+        """Load libraries from configuration.
+
+        Returns:
+            List of Library instances
+        """
+        libraries = []
+        libraries_path = self.config.get_libraries_path()
+
+        # Get library configurations from config
+        library_configs = self.config.get_libraries()
+
+        for i, lib_config in enumerate(library_configs):
+            # Skip disabled libraries
+            if not lib_config.get("enabled", True):
+                logger.debug(f"Skipping disabled library: {lib_config.get('name')}")
+                continue
+
+            name = lib_config.get("name")
+            lib_type = lib_config.get(
+                "type", "git"
+            )  # Default to "git" for backward compat
+
+            # Handle library type-specific path resolution
+            if lib_type == "git":
+                # Existing git logic
+                directory = lib_config.get("directory", ".")
+
+                # Build path to library: ~/.config/boilerplates/libraries/{name}/{directory}/
+                # For sparse-checkout, files remain in the specified directory
+                library_base = libraries_path / name
+                if directory and directory != ".":
+                    library_path = library_base / directory
+                else:
+                    library_path = library_base
+
+            elif lib_type == "static":
+                # New static logic - use path directly
+                path_str = lib_config.get("path")
+                if not path_str:
+                    logger.warning(f"Static library '{name}' has no path configured")
+                    continue
+
+                # Expand ~ and resolve relative paths
+                library_path = Path(path_str).expanduser()
+                if not library_path.is_absolute():
+                    # Resolve relative to config directory
+                    library_path = (
+                        self.config.config_path.parent / library_path
+                    ).resolve()
+
+            else:
+                logger.warning(
+                    f"Unknown library type '{lib_type}' for library '{name}'"
+                )
+                continue
+
+            # Check if library path exists
+            if not library_path.exists():
+                if lib_type == "git":
+                    logger.warning(
+                        f"Library '{name}' not found at {library_path}. "
+                        f"Run 'repo update' to sync libraries."
+                    )
+                else:
+                    logger.warning(
+                        f"Static library '{name}' not found at {library_path}"
+                    )
+                continue
+
+            # Create Library instance with type and priority based on order (first = highest priority)
+            priority = len(library_configs) - i
+            libraries.append(
+                Library(
+                    name=name,
+                    path=library_path,
+                    priority=priority,
+                    library_type=lib_type,
+                )
+            )
+            logger.debug(
+                f"Loaded {lib_type} library '{name}' from {library_path} with priority {priority}"
+            )
+
+        if not libraries:
+            logger.warning("No libraries loaded. Run 'repo update' to sync libraries.")
+
+        return libraries
+
+    def find_by_id(
+        self, module_name: str, template_id: str
+    ) -> Optional[tuple[Path, str]]:
+        """Find a template by its ID across all libraries.
+
+        Supports both simple IDs and qualified IDs (template.library format).
+
+        Args:
+            module_name: The module name (e.g., 'compose', 'terraform')
+            template_id: The template ID to find (simple or qualified)
+
+        Returns:
+            Tuple of (template_path, library_name) if found, None otherwise
+        """
+        logger.debug(
+            f"Searching for template '{template_id}' in module '{module_name}' across all libraries"
         )
-        continue
-      
-      # Create Library instance with priority based on order (first = highest priority)
-      priority = len(library_configs) - i
-      libraries.append(Library(name=name, path=library_path, priority=priority))
-      logger.debug(f"Loaded library '{name}' from {library_path} with priority {priority}")
-    
-    if not libraries:
-      logger.warning("No libraries loaded. Run 'repo update' to sync libraries.")
-    
-    return libraries
-
-  def find_by_id(self, module_name: str, template_id: str) -> Optional[tuple[Path, str]]:
-    """Find a template by its ID across all libraries.
-    
-    Args:
-        module_name: The module name (e.g., 'compose', 'terraform')
-        template_id: The template ID to find
-    
-    Returns:
-        Path to the template directory if found, None otherwise
-    """
-    logger.debug(f"Searching for template '{template_id}' in module '{module_name}' across all libraries")
-    
-    for library in sorted(self.libraries, key=lambda x: x.priority, reverse=True):
-      try:
-        template_path, lib_name = library.find_by_id(module_name, template_id)
-        logger.debug(f"Found template '{template_id}' in library '{library.name}'")
-        return template_path, lib_name
-      except TemplateNotFoundError:
-        # Continue searching in next library
-        continue
-    
-    logger.debug(f"Template '{template_id}' not found in any library")
-    return None
-  
-  def find(self, module_name: str, sort_results: bool = False) -> list[tuple[Path, str]]:
-    """Find templates across all libraries for a specific module.
-    
-    Args:
-        module_name: The module name (e.g., 'compose', 'terraform')
-        sort_results: Whether to return results sorted alphabetically
-    
-    Returns:
-        List of Path objects representing template directories from all libraries
-    """
-    logger.debug(f"Searching for templates in module '{module_name}' across all libraries")
-    
-    all_templates = []
-    
-    for library in sorted(self.libraries, key=lambda x: x.priority, reverse=True):
-      try:
-        templates = library.find(module_name, sort_results=False)  # Sort at the end
-        all_templates.extend(templates)
-        logger.debug(f"Found {len(templates)} templates in library '{library.name}'")
-      except LibraryError:
-        # Module not found in this library, continue with next
-        logger.debug(f"Module '{module_name}' not found in library '{library.name}'")
-        continue
-    
-    # Remove duplicates based on template name (directory name)
-    seen_names = set()
-    unique_templates = []
-    for template in all_templates:
-      name, library_name = template
-      if name.name not in seen_names:
-        unique_templates.append((name, library_name))
-        seen_names.add(name.name)
-    
-    # Sort if requested
-    if sort_results:
-      unique_templates.sort(key=lambda x: x[0].name.lower())
-    
-    logger.debug(f"Found {len(unique_templates)} unique templates total")
-    return unique_templates
+
+        # Check if this is a qualified ID (contains '.')
+        if "." in template_id:
+            parts = template_id.rsplit(".", 1)
+            if len(parts) == 2:
+                base_id, requested_lib = parts
+                logger.debug(
+                    f"Parsing qualified ID: base='{base_id}', library='{requested_lib}'"
+                )
+
+                # Try to find in the specific library
+                for library in self.libraries:
+                    if library.name == requested_lib:
+                        try:
+                            template_path, lib_name = library.find_by_id(
+                                module_name, base_id
+                            )
+                            logger.debug(
+                                f"Found template '{base_id}' in library '{requested_lib}'"
+                            )
+                            return template_path, lib_name
+                        except TemplateNotFoundError:
+                            logger.debug(
+                                f"Template '{base_id}' not found in library '{requested_lib}'"
+                            )
+                            return None
+
+                logger.debug(f"Library '{requested_lib}' not found")
+                return None
+
+        # Simple ID - search by priority
+        for library in sorted(self.libraries, key=lambda x: x.priority, reverse=True):
+            try:
+                template_path, lib_name = library.find_by_id(module_name, template_id)
+                logger.debug(
+                    f"Found template '{template_id}' in library '{library.name}'"
+                )
+                return template_path, lib_name
+            except TemplateNotFoundError:
+                # Continue searching in next library
+                continue
+
+        logger.debug(f"Template '{template_id}' not found in any library")
+        return None
+
+    def find(
+        self, module_name: str, sort_results: bool = False
+    ) -> list[tuple[Path, str, bool]]:
+        """Find templates across all libraries for a specific module.
+
+        Handles duplicates by qualifying IDs with library names when needed.
+
+        Args:
+            module_name: The module name (e.g., 'compose', 'terraform')
+            sort_results: Whether to return results sorted alphabetically
+
+        Returns:
+            List of tuples (template_path, library_name, needs_qualification)
+            where needs_qualification is True if the template ID appears in multiple libraries
+        """
+        logger.debug(
+            f"Searching for templates in module '{module_name}' across all libraries"
+        )
+
+        all_templates = []
+
+        # Collect templates from all libraries
+        for library in sorted(self.libraries, key=lambda x: x.priority, reverse=True):
+            try:
+                templates = library.find(module_name, sort_results=False)
+                all_templates.extend(templates)
+                logger.debug(
+                    f"Found {len(templates)} templates in library '{library.name}'"
+                )
+            except (LibraryError, DuplicateTemplateError) as e:
+                # DuplicateTemplateError from library.find() should propagate up
+                if isinstance(e, DuplicateTemplateError):
+                    raise
+                logger.debug(
+                    f"Module '{module_name}' not found in library '{library.name}'"
+                )
+                continue
+
+        # Track template IDs and their libraries to detect cross-library duplicates
+        id_to_occurrences = {}
+        for template_path, library_name in all_templates:
+            template_id = template_path.name
+            if template_id not in id_to_occurrences:
+                id_to_occurrences[template_id] = []
+            id_to_occurrences[template_id].append((template_path, library_name))
+
+        # Build result with qualification markers for duplicates
+        result = []
+        for template_id, occurrences in id_to_occurrences.items():
+            if len(occurrences) > 1:
+                # Duplicate across libraries - mark for qualified IDs
+                lib_names = ", ".join(lib for _, lib in occurrences)
+                logger.info(
+                    f"Template '{template_id}' found in multiple libraries: {lib_names}. "
+                    f"Using qualified IDs."
+                )
+                for template_path, library_name in occurrences:
+                    # Mark that this ID needs qualification
+                    result.append((template_path, library_name, True))
+            else:
+                # Unique template - no qualification needed
+                template_path, library_name = occurrences[0]
+                result.append((template_path, library_name, False))
+
+        # Sort if requested
+        if sort_results:
+            result.sort(key=lambda x: x[0].name.lower())
+
+        logger.debug(f"Found {len(result)} templates total")
+        return result

+ 1261 - 937
cli/core/module.py

@@ -1,21 +1,20 @@
 from __future__ import annotations
 
 import logging
-import sys
 from abc import ABC
 from pathlib import Path
-from typing import Any, Optional, List, Dict, Tuple
+from typing import Any, Optional, List, Dict
 
 from rich.console import Console
 from rich.panel import Panel
 from rich.prompt import Confirm
-from typer import Argument, Context, Option, Typer, Exit
+from typer import Argument, Option, Typer, Exit
 
 from .display import DisplayManager
 from .exceptions import (
     TemplateRenderError,
     TemplateSyntaxError,
-    TemplateValidationError
+    TemplateValidationError,
 )
 from .library import LibraryManager
 from .prompt import PromptHandler
@@ -27,946 +26,1271 @@ console_err = Console(stderr=True)
 
 
 def parse_var_inputs(var_options: List[str], extra_args: List[str]) -> Dict[str, Any]:
-  """Parse variable inputs from --var options and extra args.
-  
-  Supports formats:
-    --var KEY=VALUE
-    --var KEY VALUE
-    
-  Args:
-    var_options: List of variable options from CLI
-    extra_args: Additional arguments that may contain values
-    
-  Returns:
-    Dictionary of parsed variables
-  """
-  variables = {}
-  
-  # Parse --var KEY=VALUE format
-  for var_option in var_options:
-    if '=' in var_option:
-      key, value = var_option.split('=', 1)
-      variables[key] = value
-    else:
-      # --var KEY VALUE format - value should be in extra_args
-      if extra_args:
-        variables[var_option] = extra_args.pop(0)
-      else:
-        logger.warning(f"No value provided for variable '{var_option}'")
-  
-  return variables
+    """Parse variable inputs from --var options and extra args.
+
+    Supports formats:
+      --var KEY=VALUE
+      --var KEY VALUE
 
-class Module(ABC):
-  """Streamlined base module that auto-detects variables from templates."""
-
-  def __init__(self) -> None:
-    if not all([self.name, self.description]):
-      raise ValueError(
-        f"Module {self.__class__.__name__} must define name and description"
-      )
-    
-    logger.info(f"Initializing module '{self.name}'")
-    logger.debug(f"Module '{self.name}' configuration: description='{self.description}'")
-    self.libraries = LibraryManager()
-    self.display = DisplayManager()
-
-  def list(
-    self,
-    raw: bool = Option(False, "--raw", help="Output raw list format instead of rich table")
-  ) -> list[Template]:
-    """List all templates."""
-    logger.debug(f"Listing templates for module '{self.name}'")
-    templates = []
-
-    entries = self.libraries.find(self.name, sort_results=True)
-    for template_dir, library_name in entries:
-      try:
-        template = Template(template_dir, library_name=library_name)
-        templates.append(template)
-      except Exception as exc:
-        logger.error(f"Failed to load template from {template_dir}: {exc}")
-        continue
-    
-    filtered_templates = templates
-    
-    if filtered_templates:
-      if raw:
-        # Output raw format (tab-separated values for easy filtering with awk/sed/cut)
-        # Format: ID\tNAME\tTAGS\tVERSION\tLIBRARY
-        for template in filtered_templates:
-          name = template.metadata.name or "Unnamed Template"
-          tags_list = template.metadata.tags or []
-          tags = ",".join(tags_list) if tags_list else "-"
-          version = str(template.metadata.version) if template.metadata.version else "-"
-          library = template.metadata.library or "-"
-          print(f"{template.id}\t{name}\t{tags}\t{version}\t{library}")
-      else:
-        # Output rich table format
-        self.display.display_templates_table(
-          filtered_templates,
-          self.name,
-          f"{self.name.capitalize()} templates"
-        )
-    else:
-      logger.info(f"No templates found for module '{self.name}'")
-
-    return filtered_templates
-
-  def search(
-    self,
-    query: str = Argument(..., help="Search string to filter templates by ID")
-  ) -> list[Template]:
-    """Search for templates by ID containing the search string."""
-    logger.debug(f"Searching templates for module '{self.name}' with query='{query}'")
-    templates = []
-
-    entries = self.libraries.find(self.name, sort_results=True)
-    for template_dir, library_name in entries:
-      try:
-        template = Template(template_dir, library_name=library_name)
-        templates.append(template)
-      except Exception as exc:
-        logger.error(f"Failed to load template from {template_dir}: {exc}")
-        continue
-    
-    # Apply search filtering
-    filtered_templates = [t for t in templates if query.lower() in t.id.lower()]
-    
-    if filtered_templates:
-      logger.info(f"Found {len(filtered_templates)} templates matching '{query}' for module '{self.name}'")
-      self.display.display_templates_table(
-        filtered_templates,
-        self.name,
-        f"{self.name.capitalize()} templates matching '{query}'"
-      )
-    else:
-      logger.info(f"No templates found matching '{query}' for module '{self.name}'")
-      self.display.display_warning(f"No templates found matching '{query}'", context=f"module '{self.name}'")
-
-    return filtered_templates
-
-
-  def show(
-    self,
-    id: str,
-  ) -> None:
-    """Show template details."""
-    logger.debug(f"Showing template '{id}' from module '{self.name}'")
-    template = self._load_template_by_id(id)
-
-    if not template:
-      self.display.display_error(f"Template '{id}' not found", context=f"module '{self.name}'")
-      return
-    
-    # Apply config defaults (same as in generate)
-    # This ensures the display shows the actual defaults that will be used
-    if template.variables:
-      from .config import ConfigManager
-      config = ConfigManager()
-      config_defaults = config.get_defaults(self.name)
-      
-      if config_defaults:
-        logger.debug(f"Loading config defaults for module '{self.name}'")
-        # Apply config defaults (this respects the variable types and validation)
-        successful = template.variables.apply_defaults(config_defaults, "config")
-        if successful:
-          logger.debug(f"Applied config defaults for: {', '.join(successful)}")
-      
-      # Re-sort sections after applying config (toggle values may have changed)
-      template.variables.sort_sections()
-    
-    self._display_template_details(template, id)
-
-  def _apply_variable_defaults(self, template: Template) -> None:
-    """Apply config defaults and CLI overrides to template variables.
-    
-    Args:
-        template: Template instance with variables to configure
-    """
-    if not template.variables:
-      return
-    
-    from .config import ConfigManager
-    config = ConfigManager()
-    config_defaults = config.get_defaults(self.name)
-    
-    if config_defaults:
-      logger.info(f"Loading config defaults for module '{self.name}'")
-      successful = template.variables.apply_defaults(config_defaults, "config")
-      if successful:
-        logger.debug(f"Applied config defaults for: {', '.join(successful)}")
-
-  def _apply_cli_overrides(self, template: Template, var: Optional[List[str]], ctx=None) -> None:
-    """Apply CLI variable overrides to template.
-    
-    Args:
-        template: Template instance to apply overrides to
-        var: List of variable override strings from --var flags
-        ctx: Context object containing extra args (optional, will get current context if None)
-    """
-    if not template.variables:
-      return
-    
-    # Get context if not provided (compatible with all Typer versions)
-    if ctx is None:
-      import click
-      try:
-        ctx = click.get_current_context()
-      except RuntimeError:
-        ctx = None
-    
-    extra_args = list(ctx.args) if ctx and hasattr(ctx, "args") else []
-    cli_overrides = parse_var_inputs(var or [], extra_args)
-    
-    if cli_overrides:
-      logger.info(f"Received {len(cli_overrides)} variable overrides from CLI")
-      successful_overrides = template.variables.apply_defaults(cli_overrides, "cli")
-      if successful_overrides:
-        logger.debug(f"Applied CLI overrides for: {', '.join(successful_overrides)}")
-
-  def _collect_variable_values(self, template: Template, interactive: bool) -> Dict[str, Any]:
-    """Collect variable values from user prompts and template defaults.
-    
-    Args:
-        template: Template instance with variables
-        interactive: Whether to prompt user for values interactively
-        
-    Returns:
-        Dictionary of variable names to values
-    """
-    variable_values = {}
-    
-    # Collect values interactively if enabled
-    if interactive and template.variables:
-      prompt_handler = PromptHandler()
-      collected_values = prompt_handler.collect_variables(template.variables)
-      if collected_values:
-        variable_values.update(collected_values)
-        logger.info(f"Collected {len(collected_values)} variable values from user input")
-    
-    # Add satisfied variable values (respects dependencies and toggles)
-    if template.variables:
-      variable_values.update(template.variables.get_satisfied_values())
-    
-    return variable_values
-  def _check_output_directory(self, output_dir: Path, rendered_files: Dict[str, str], 
-                              interactive: bool) -> Optional[List[Path]]:
-    """Check output directory for conflicts and get user confirmation if needed.
-    
-    Args:
-        output_dir: Directory where files will be written
-        rendered_files: Dictionary of file paths to rendered content
-        interactive: Whether to prompt user for confirmation
-        
-    Returns:
-        List of existing files that will be overwritten, or None to cancel
-    """
-    dir_exists = output_dir.exists()
-    dir_not_empty = dir_exists and any(output_dir.iterdir())
-    
-    # Check which files already exist
-    existing_files = []
-    if dir_exists:
-      for file_path in rendered_files.keys():
-        full_path = output_dir / file_path
-        if full_path.exists():
-          existing_files.append(full_path)
-    
-    # Warn if directory is not empty
-    if dir_not_empty:
-      if interactive:
-        details = []
-        if existing_files:
-          details.append(f"{len(existing_files)} file(s) will be overwritten.")
-        
-        if not self.display.display_warning_with_confirmation(
-          f"Directory '{output_dir}' is not empty.",
-          details if details else None,
-          default=False
-        ):
-          self.display.display_info("Generation cancelled")
-          return None
-      else:
-        # Non-interactive mode: show warning but continue
-        logger.warning(f"Directory '{output_dir}' is not empty")
-        if existing_files:
-          logger.warning(f"{len(existing_files)} file(s) will be overwritten")
-    
-    return existing_files
-
-  def _get_generation_confirmation(self, output_dir: Path, rendered_files: Dict[str, str], 
-                                    existing_files: Optional[List[Path]], dir_not_empty: bool, 
-                                    dry_run: bool, interactive: bool) -> bool:
-    """Display file generation confirmation and get user approval.
-    
     Args:
-        output_dir: Output directory path
-        rendered_files: Dictionary of file paths to content
-        existing_files: List of existing files that will be overwritten
-        dir_not_empty: Whether output directory already contains files
-        dry_run: Whether this is a dry run
-        interactive: Whether to prompt for confirmation
-        
+      var_options: List of variable options from CLI
+      extra_args: Additional arguments that may contain values
+
     Returns:
-        True if user confirms generation, False to cancel
-    """
-    if not interactive:
-      return True
-    
-    self.display.display_file_generation_confirmation(
-      output_dir, 
-      rendered_files, 
-      existing_files if existing_files else None
-    )
-    
-    # Final confirmation (only if we didn't already ask about overwriting)
-    if not dir_not_empty and not dry_run:
-      if not Confirm.ask("Generate these files?", default=True):
-        self.display.display_info("Generation cancelled")
-        return False
-    
-    return True
-
-  def _execute_dry_run(self, id: str, output_dir: Path, rendered_files: Dict[str, str], show_files: bool) -> None:
-    """Execute dry run mode with comprehensive simulation.
-    
-    Simulates all filesystem operations that would occur during actual generation,
-    including directory creation, file writing, and permission checks.
-    
-    Args:
-        id: Template ID
-        output_dir: Directory where files would be written
-        rendered_files: Dictionary of file paths to rendered content
-        show_files: Whether to display file contents
+      Dictionary of parsed variables
     """
-    import os
-    
-    console.print()
-    console.print("[bold cyan]Dry Run Mode - Simulating File Generation[/bold cyan]")
-    console.print()
-    
-    # Simulate directory creation
-    self.display.display_heading("Directory Operations", icon_type="folder")
-    
-    # Check if output directory exists
-    if output_dir.exists():
-      self.display.display_success(f"Output directory exists: [cyan]{output_dir}[/cyan]")
-      # Check if we have write permissions
-      if os.access(output_dir, os.W_OK):
-        self.display.display_success("Write permission verified")
-      else:
-        self.display.display_warning("Write permission may be denied")
-    else:
-      console.print(f"  [dim]→[/dim] Would create output directory: [cyan]{output_dir}[/cyan]")
-      # Check if parent directory exists and is writable
-      parent = output_dir.parent
-      if parent.exists() and os.access(parent, os.W_OK):
-        self.display.display_success("Parent directory writable")
-      else:
-        self.display.display_warning("Parent directory may not be writable")
-    
-    # Collect unique subdirectories that would be created
-    subdirs = set()
-    for file_path in rendered_files.keys():
-      parts = Path(file_path).parts
-      for i in range(1, len(parts)):
-        subdirs.add(Path(*parts[:i]))
-    
-    if subdirs:
-      console.print(f"  [dim]→[/dim] Would create {len(subdirs)} subdirectory(ies)")
-      for subdir in sorted(subdirs):
-        console.print(f"    [dim]📁[/dim] {subdir}/")
-    
-    console.print()
-    
-    # Display file operations in a table
-    self.display.display_heading("File Operations", icon_type="file")
-    
-    total_size = 0
-    new_files = 0
-    overwrite_files = 0
-    file_operations = []
-    
-    for file_path, content in sorted(rendered_files.items()):
-      full_path = output_dir / file_path
-      file_size = len(content.encode('utf-8'))
-      total_size += file_size
-      
-      # Determine status
-      if full_path.exists():
-        status = "Overwrite"
-        overwrite_files += 1
-      else:
-        status = "Create"
-        new_files += 1
-      
-      file_operations.append((file_path, file_size, status))
-    
-    self.display.display_file_operation_table(file_operations)
-    console.print()
-    
-    # Summary statistics
-    if total_size < 1024:
-      size_str = f"{total_size}B"
-    elif total_size < 1024 * 1024:
-      size_str = f"{total_size / 1024:.1f}KB"
-    else:
-      size_str = f"{total_size / (1024 * 1024):.1f}MB"
-    
-    summary_items = {
-      "Total files:": str(len(rendered_files)),
-      "New files:": str(new_files),
-      "Files to overwrite:": str(overwrite_files),
-      "Total size:": size_str
-    }
-    self.display.display_summary_table("Summary", summary_items)
-    console.print()
-    
-    # Show file contents if requested
-    if show_files:
-      console.print("[bold cyan]Generated File Contents:[/bold cyan]")
-      console.print()
-      for file_path, content in sorted(rendered_files.items()):
-        console.print(f"[cyan]File:[/cyan] {file_path}")
-        print(f"{'─'*80}")
-        print(content)
-        print()  # Add blank line after content
-      console.print()
-    
-    self.display.display_success("Dry run complete - no files were written")
-    console.print(f"[dim]Files would have been generated in '{output_dir}'[/dim]")
-    logger.info(f"Dry run completed for template '{id}' - {len(rendered_files)} files, {total_size} bytes")
-
-  def _write_generated_files(self, output_dir: Path, rendered_files: Dict[str, str], quiet: bool = False) -> None:
-    """Write rendered files to the output directory.
-    
-    Args:
-        output_dir: Directory to write files to
-        rendered_files: Dictionary of file paths to rendered content
-        quiet: Suppress output messages
-    """
-    output_dir.mkdir(parents=True, exist_ok=True)
-    
-    for file_path, content in rendered_files.items():
-      full_path = output_dir / file_path
-      full_path.parent.mkdir(parents=True, exist_ok=True)
-      with open(full_path, 'w', encoding='utf-8') as f:
-        f.write(content)
-      if not quiet:
-        console.print(f"[green]Generated file: {file_path}[/green]")  # Keep simple per-file output
-    
-    if not quiet:
-      self.display.display_success(f"Template generated successfully in '{output_dir}'")
-    logger.info(f"Template written to directory: {output_dir}")
-
-  def generate(
-    self,
-    id: str = Argument(..., help="Template ID"),
-    directory: Optional[str] = Argument(None, help="Output directory (defaults to template ID)"),
-    interactive: bool = Option(True, "--interactive/--no-interactive", "-i/-n", help="Enable interactive prompting for variables"),
-    var: Optional[list[str]] = Option(None, "--var", "-v", help="Variable override (repeatable). Supports: KEY=VALUE or KEY VALUE"),
-    dry_run: bool = Option(False, "--dry-run", help="Preview template generation without writing files"),
-    show_files: bool = Option(False, "--show-files", help="Display generated file contents in plain text (use with --dry-run)"),
-    quiet: bool = Option(False, "--quiet", "-q", help="Suppress all non-error output"),
-  ) -> None:
-    """Generate from template.
-    
-    Variable precedence chain (lowest to highest):
-    1. Module spec (defined in cli/modules/*.py)
-    2. Template spec (from template.yaml)
-    3. Config defaults (from ~/.config/boilerplates/config.yaml)
-    4. CLI overrides (--var flags)
-    
-    Examples:
-        # Generate to directory named after template
-        cli compose generate traefik
-        
-        # Generate to custom directory
-        cli compose generate traefik my-proxy
-        
-        # Generate with variables
-        cli compose generate traefik --var traefik_enabled=false
-        
-        # Preview without writing files (dry run)
-        cli compose generate traefik --dry-run
-        
-        # Preview and show generated file contents
-        cli compose generate traefik --dry-run --show-files
-    """
-    logger.info(f"Starting generation for template '{id}' from module '{self.name}'")
-    
-    # Create a display manager with quiet mode if needed
-    display = DisplayManager(quiet=quiet) if quiet else self.display
-    
-    template = self._load_template_by_id(id)
-
-    # Apply defaults and overrides
-    self._apply_variable_defaults(template)
-    self._apply_cli_overrides(template, var)
-    
-    # Re-sort sections after all overrides (toggle values may have changed)
-    if template.variables:
-      template.variables.sort_sections()
-
-    if not quiet:
-      self._display_template_details(template, id)
-      console.print()
-
-    # Collect variable values
-    variable_values = self._collect_variable_values(template, interactive)
-
-    try:
-      # Validate and render template
-      if template.variables:
-        template.variables.validate_all()
-      
-      # Check if we're in debug mode (logger level is DEBUG)
-      debug_mode = logger.isEnabledFor(logging.DEBUG)
-      
-      rendered_files, variable_values = template.render(template.variables, debug=debug_mode)
-      
-      if not rendered_files:
-        display.display_error("Template rendering returned no files", context="template generation")
-        raise Exit(code=1)
-      
-      logger.info(f"Successfully rendered template '{id}'")
-      
-      # Determine output directory
-      output_dir = Path(directory) if directory else Path(id)
-      
-      # Check for conflicts and get confirmation (skip in quiet mode)
-      if not quiet:
-        existing_files = self._check_output_directory(output_dir, rendered_files, interactive)
-        if existing_files is None:
-          return  # User cancelled
-        
-        # Get final confirmation for generation
-        dir_not_empty = output_dir.exists() and any(output_dir.iterdir())
-        if not self._get_generation_confirmation(output_dir, rendered_files, existing_files, 
-                                                 dir_not_empty, dry_run, interactive):
-          return  # User cancelled
-      else:
-        # In quiet mode, just check for existing files without prompts
+    variables = {}
+
+    # Parse --var KEY=VALUE format
+    for var_option in var_options:
+        if "=" in var_option:
+            key, value = var_option.split("=", 1)
+            variables[key] = value
+        else:
+            # --var KEY VALUE format - value should be in extra_args
+            if extra_args:
+                variables[var_option] = extra_args.pop(0)
+            else:
+                logger.warning(f"No value provided for variable '{var_option}'")
+
+    return variables
+
+
+class Module(ABC):
+    """Streamlined base module that auto-detects variables from templates."""
+
+    # Schema version supported by this module (override in subclasses)
+    schema_version: str = "1.0"
+
+    def __init__(self) -> None:
+        if not all([self.name, self.description]):
+            raise ValueError(
+                f"Module {self.__class__.__name__} must define name and description"
+            )
+
+        logger.info(f"Initializing module '{self.name}'")
+        logger.debug(
+            f"Module '{self.name}' configuration: description='{self.description}'"
+        )
+        self.libraries = LibraryManager()
+        self.display = DisplayManager()
+
+    def list(
+        self,
+        raw: bool = Option(
+            False, "--raw", help="Output raw list format instead of rich table"
+        ),
+    ) -> list[Template]:
+        """List all templates."""
+        logger.debug(f"Listing templates for module '{self.name}'")
+        templates = []
+
+        entries = self.libraries.find(self.name, sort_results=True)
+        for entry in entries:
+            # Unpack entry - now returns (path, library_name, needs_qualification)
+            template_dir = entry[0]
+            library_name = entry[1]
+            needs_qualification = entry[2] if len(entry) > 2 else False
+
+            try:
+                # Get library object to determine type
+                library = next(
+                    (
+                        lib
+                        for lib in self.libraries.libraries
+                        if lib.name == library_name
+                    ),
+                    None,
+                )
+                library_type = library.library_type if library else "git"
+
+                template = Template(
+                    template_dir, library_name=library_name, library_type=library_type
+                )
+
+                # Validate schema version compatibility
+                template._validate_schema_version(self.schema_version, self.name)
+
+                # If template ID needs qualification, set qualified ID
+                if needs_qualification:
+                    template.set_qualified_id()
+
+                templates.append(template)
+            except Exception as exc:
+                logger.error(f"Failed to load template from {template_dir}: {exc}")
+                continue
+
+        filtered_templates = templates
+
+        if filtered_templates:
+            if raw:
+                # Output raw format (tab-separated values for easy filtering with awk/sed/cut)
+                # Format: ID\tNAME\tTAGS\tVERSION\tLIBRARY
+                for template in filtered_templates:
+                    name = template.metadata.name or "Unnamed Template"
+                    tags_list = template.metadata.tags or []
+                    tags = ",".join(tags_list) if tags_list else "-"
+                    version = (
+                        str(template.metadata.version)
+                        if template.metadata.version
+                        else "-"
+                    )
+                    library = template.metadata.library or "-"
+                    print(f"{template.id}\t{name}\t{tags}\t{version}\t{library}")
+            else:
+                # Output rich table format
+                self.display.display_templates_table(
+                    filtered_templates, self.name, f"{self.name.capitalize()} templates"
+                )
+        else:
+            logger.info(f"No templates found for module '{self.name}'")
+
+        return filtered_templates
+
+    def search(
+        self, query: str = Argument(..., help="Search string to filter templates by ID")
+    ) -> list[Template]:
+        """Search for templates by ID containing the search string."""
+        logger.debug(
+            f"Searching templates for module '{self.name}' with query='{query}'"
+        )
+        templates = []
+
+        entries = self.libraries.find(self.name, sort_results=True)
+        for entry in entries:
+            # Unpack entry - now returns (path, library_name, needs_qualification)
+            template_dir = entry[0]
+            library_name = entry[1]
+            needs_qualification = entry[2] if len(entry) > 2 else False
+
+            try:
+                # Get library object to determine type
+                library = next(
+                    (
+                        lib
+                        for lib in self.libraries.libraries
+                        if lib.name == library_name
+                    ),
+                    None,
+                )
+                library_type = library.library_type if library else "git"
+
+                template = Template(
+                    template_dir, library_name=library_name, library_type=library_type
+                )
+
+                # Validate schema version compatibility
+                template._validate_schema_version(self.schema_version, self.name)
+
+                # If template ID needs qualification, set qualified ID
+                if needs_qualification:
+                    template.set_qualified_id()
+
+                templates.append(template)
+            except Exception as exc:
+                logger.error(f"Failed to load template from {template_dir}: {exc}")
+                continue
+
+        # Apply search filtering
+        filtered_templates = [t for t in templates if query.lower() in t.id.lower()]
+
+        if filtered_templates:
+            logger.info(
+                f"Found {len(filtered_templates)} templates matching '{query}' for module '{self.name}'"
+            )
+            self.display.display_templates_table(
+                filtered_templates,
+                self.name,
+                f"{self.name.capitalize()} templates matching '{query}'",
+            )
+        else:
+            logger.info(
+                f"No templates found matching '{query}' for module '{self.name}'"
+            )
+            self.display.display_warning(
+                f"No templates found matching '{query}'",
+                context=f"module '{self.name}'",
+            )
+
+        return filtered_templates
+
+    def show(
+        self,
+        id: str,
+    ) -> None:
+        """Show template details."""
+        logger.debug(f"Showing template '{id}' from module '{self.name}'")
+        template = self._load_template_by_id(id)
+
+        if not template:
+            self.display.display_error(
+                f"Template '{id}' not found", context=f"module '{self.name}'"
+            )
+            return
+
+        # Apply config defaults (same as in generate)
+        # This ensures the display shows the actual defaults that will be used
+        if template.variables:
+            from .config import ConfigManager
+
+            config = ConfigManager()
+            config_defaults = config.get_defaults(self.name)
+
+            if config_defaults:
+                logger.debug(f"Loading config defaults for module '{self.name}'")
+                # Apply config defaults (this respects the variable types and validation)
+                successful = template.variables.apply_defaults(
+                    config_defaults, "config"
+                )
+                if successful:
+                    logger.debug(
+                        f"Applied config defaults for: {', '.join(successful)}"
+                    )
+
+            # Re-sort sections after applying config (toggle values may have changed)
+            template.variables.sort_sections()
+            
+            # Reset disabled bool variables to False to prevent confusion
+            reset_vars = template.variables.reset_disabled_bool_variables()
+            if reset_vars:
+                logger.debug(f"Reset {len(reset_vars)} disabled bool variables to False")
+
+        self._display_template_details(template, id)
+
+    def _apply_variable_defaults(self, template: Template) -> None:
+        """Apply config defaults and CLI overrides to template variables.
+
+        Args:
+            template: Template instance with variables to configure
+        """
+        if not template.variables:
+            return
+
+        from .config import ConfigManager
+
+        config = ConfigManager()
+        config_defaults = config.get_defaults(self.name)
+
+        if config_defaults:
+            logger.info(f"Loading config defaults for module '{self.name}'")
+            successful = template.variables.apply_defaults(config_defaults, "config")
+            if successful:
+                logger.debug(f"Applied config defaults for: {', '.join(successful)}")
+
+    def _apply_cli_overrides(
+        self, template: Template, var: Optional[List[str]], ctx=None
+    ) -> None:
+        """Apply CLI variable overrides to template.
+
+        Args:
+            template: Template instance to apply overrides to
+            var: List of variable override strings from --var flags
+            ctx: Context object containing extra args (optional, will get current context if None)
+        """
+        if not template.variables:
+            return
+
+        # Get context if not provided (compatible with all Typer versions)
+        if ctx is None:
+            import click
+
+            try:
+                ctx = click.get_current_context()
+            except RuntimeError:
+                ctx = None
+
+        extra_args = list(ctx.args) if ctx and hasattr(ctx, "args") else []
+        cli_overrides = parse_var_inputs(var or [], extra_args)
+
+        if cli_overrides:
+            logger.info(f"Received {len(cli_overrides)} variable overrides from CLI")
+            successful_overrides = template.variables.apply_defaults(
+                cli_overrides, "cli"
+            )
+            if successful_overrides:
+                logger.debug(
+                    f"Applied CLI overrides for: {', '.join(successful_overrides)}"
+                )
+
+    def _collect_variable_values(
+        self, template: Template, interactive: bool
+    ) -> Dict[str, Any]:
+        """Collect variable values from user prompts and template defaults.
+
+        Args:
+            template: Template instance with variables
+            interactive: Whether to prompt user for values interactively
+
+        Returns:
+            Dictionary of variable names to values
+        """
+        variable_values = {}
+
+        # Collect values interactively if enabled
+        if interactive and template.variables:
+            prompt_handler = PromptHandler()
+            collected_values = prompt_handler.collect_variables(template.variables)
+            if collected_values:
+                variable_values.update(collected_values)
+                logger.info(
+                    f"Collected {len(collected_values)} variable values from user input"
+                )
+
+        # Add satisfied variable values (respects dependencies and toggles)
+        if template.variables:
+            variable_values.update(template.variables.get_satisfied_values())
+
+        return variable_values
+
+    def _check_output_directory(
+        self, output_dir: Path, rendered_files: Dict[str, str], interactive: bool
+    ) -> Optional[List[Path]]:
+        """Check output directory for conflicts and get user confirmation if needed.
+
+        Args:
+            output_dir: Directory where files will be written
+            rendered_files: Dictionary of file paths to rendered content
+            interactive: Whether to prompt user for confirmation
+
+        Returns:
+            List of existing files that will be overwritten, or None to cancel
+        """
+        dir_exists = output_dir.exists()
+        dir_not_empty = dir_exists and any(output_dir.iterdir())
+
+        # Check which files already exist
         existing_files = []
-      
-      # Execute generation (dry run or actual)
-      if dry_run:
-        if not quiet:
-          self._execute_dry_run(id, output_dir, rendered_files, show_files)
-      else:
-        self._write_generated_files(output_dir, rendered_files, quiet=quiet)
-      
-      # Display next steps (not in quiet mode)
-      if template.metadata.next_steps and not quiet:
-        display.display_next_steps(template.metadata.next_steps, variable_values)
-
-    except TemplateRenderError as e:
-      # Display enhanced error information for template rendering errors (always show errors)
-      display.display_template_render_error(e, context=f"template '{id}'")
-      raise Exit(code=1)
-    except Exception as e:
-      display.display_error(str(e), context=f"generating template '{id}'")
-      raise Exit(code=1)
-
-  def config_get(
-    self,
-    var_name: Optional[str] = Argument(None, help="Variable name to get (omit to show all defaults)"),
-  ) -> None:
-    """Get default value(s) for this module.
-    
-    Examples:
-        # Get all defaults for module
-        cli compose defaults get
-        
-        # Get specific variable default
-        cli compose defaults get service_name
-    """
-    from .config import ConfigManager
-    config = ConfigManager()
-    
-    if var_name:
-      # Get specific variable default
-      value = config.get_default_value(self.name, var_name)
-      if value is not None:
-        console.print(f"[green]{var_name}[/green] = [yellow]{value}[/yellow]")
-      else:
-        self.display.display_warning(f"No default set for variable '{var_name}'", context=f"module '{self.name}'")
-    else:
-      # Show all defaults (flat list)
-      defaults = config.get_defaults(self.name)
-      if defaults:
-        console.print(f"[bold]Config defaults for module '{self.name}':[/bold]\n")
-        for var_name, var_value in defaults.items():
-          console.print(f"  [green]{var_name}[/green] = [yellow]{var_value}[/yellow]")
-      else:
-        console.print(f"[yellow]No defaults configured for module '{self.name}'[/yellow]")
-
-  def config_set(
-    self,
-    var_name: str = Argument(..., help="Variable name or var=value format"),
-    value: Optional[str] = Argument(None, help="Default value (not needed if using var=value format)"),
-  ) -> None:
-    """Set a default value for a variable.
-    
-    This only sets the DEFAULT VALUE, not the variable spec.
-    The variable must be defined in the module or template spec.
-    
-    Supports both formats:
-      - var_name value
-      - var_name=value
-    
-    Examples:
-        # Set default value (format 1)
-        cli compose defaults set service_name my-awesome-app
-        
-        # Set default value (format 2)
-        cli compose defaults set service_name=my-awesome-app
-        
-        # Set author for all compose templates
-        cli compose defaults set author "Christian Lempa"
-    """
-    from .config import ConfigManager
-    config = ConfigManager()
-    
-    # Parse var_name and value - support both "var value" and "var=value" formats
-    if '=' in var_name and value is None:
-      # Format: var_name=value
-      parts = var_name.split('=', 1)
-      actual_var_name = parts[0]
-      actual_value = parts[1]
-    elif value is not None:
-      # Format: var_name value
-      actual_var_name = var_name
-      actual_value = value
-    else:
-      self.display.display_error(f"Missing value for variable '{var_name}'", context="config set")
-      console.print(f"[dim]Usage: defaults set VAR_NAME VALUE or defaults set VAR_NAME=VALUE[/dim]")
-      raise Exit(code=1)
-    
-    # Set the default value
-    config.set_default_value(self.name, actual_var_name, actual_value)
-    self.display.display_success(f"Set default: [cyan]{actual_var_name}[/cyan] = [yellow]{actual_value}[/yellow]")
-    console.print(f"\n[dim]This will be used as the default value when generating templates with this module.[/dim]")
-
-  def config_remove(
-    self,
-    var_name: str = Argument(..., help="Variable name to remove"),
-  ) -> None:
-    """Remove a specific default variable value.
-    
-    Examples:
-        # Remove a default value
-        cli compose defaults rm service_name
-    """
-    from .config import ConfigManager
-    config = ConfigManager()
-    defaults = config.get_defaults(self.name)
-    
-    if not defaults:
-      console.print(f"[yellow]No defaults configured for module '{self.name}'[/yellow]")
-      return
-    
-    if var_name in defaults:
-      del defaults[var_name]
-      config.set_defaults(self.name, defaults)
-      self.display.display_success(f"Removed default for '{var_name}'")
-    else:
-      self.display.display_error(f"No default found for variable '{var_name}'")
-
-  def config_clear(
-    self,
-    var_name: Optional[str] = Argument(None, help="Variable name to clear (omit to clear all defaults)"),
-    force: bool = Option(False, "--force", "-f", help="Skip confirmation prompt"),
-  ) -> None:
-    """Clear default value(s) for this module.
-    
-    Examples:
-        # Clear specific variable default
-        cli compose defaults clear service_name
-        
-        # Clear all defaults for module
-        cli compose defaults clear --force
-    """
-    from .config import ConfigManager
-    config = ConfigManager()
-    defaults = config.get_defaults(self.name)
-    
-    if not defaults:
-      console.print(f"[yellow]No defaults configured for module '{self.name}'[/yellow]")
-      return
-    
-    if var_name:
-      # Clear specific variable
-      if var_name in defaults:
-        del defaults[var_name]
-        config.set_defaults(self.name, defaults)
-        self.display.display_success(f"Cleared default for '{var_name}'")
-      else:
-        self.display.display_error(f"No default found for variable '{var_name}'")
-    else:
-      # Clear all defaults
-      if not force:
-        detail_lines = [f"This will clear ALL defaults for module '{self.name}':", ""]
-        for var_name, var_value in defaults.items():
-          detail_lines.append(f"  [green]{var_name}[/green] = [yellow]{var_value}[/yellow]")
-        
-        self.display.display_warning("Warning: This will clear ALL defaults")
+        if dir_exists:
+            for file_path in rendered_files.keys():
+                full_path = output_dir / file_path
+                if full_path.exists():
+                    existing_files.append(full_path)
+
+        # Warn if directory is not empty
+        if dir_not_empty:
+            if interactive:
+                details = []
+                if existing_files:
+                    details.append(
+                        f"{len(existing_files)} file(s) will be overwritten."
+                    )
+
+                if not self.display.display_warning_with_confirmation(
+                    f"Directory '{output_dir}' is not empty.",
+                    details if details else None,
+                    default=False,
+                ):
+                    self.display.display_info("Generation cancelled")
+                    return None
+            else:
+                # Non-interactive mode: show warning but continue
+                logger.warning(f"Directory '{output_dir}' is not empty")
+                if existing_files:
+                    logger.warning(f"{len(existing_files)} file(s) will be overwritten")
+
+        return existing_files
+
+    def _get_generation_confirmation(
+        self,
+        output_dir: Path,
+        rendered_files: Dict[str, str],
+        existing_files: Optional[List[Path]],
+        dir_not_empty: bool,
+        dry_run: bool,
+        interactive: bool,
+    ) -> bool:
+        """Display file generation confirmation and get user approval.
+
+        Args:
+            output_dir: Output directory path
+            rendered_files: Dictionary of file paths to content
+            existing_files: List of existing files that will be overwritten
+            dir_not_empty: Whether output directory already contains files
+            dry_run: Whether this is a dry run
+            interactive: Whether to prompt for confirmation
+
+        Returns:
+            True if user confirms generation, False to cancel
+        """
+        if not interactive:
+            return True
+
+        self.display.display_file_generation_confirmation(
+            output_dir, rendered_files, existing_files if existing_files else None
+        )
+
+        # Final confirmation (only if we didn't already ask about overwriting)
+        if not dir_not_empty and not dry_run:
+            if not Confirm.ask("Generate these files?", default=True):
+                self.display.display_info("Generation cancelled")
+                return False
+
+        return True
+
+    def _execute_dry_run(
+        self,
+        id: str,
+        output_dir: Path,
+        rendered_files: Dict[str, str],
+        show_files: bool,
+    ) -> None:
+        """Execute dry run mode with comprehensive simulation.
+
+        Simulates all filesystem operations that would occur during actual generation,
+        including directory creation, file writing, and permission checks.
+
+        Args:
+            id: Template ID
+            output_dir: Directory where files would be written
+            rendered_files: Dictionary of file paths to rendered content
+            show_files: Whether to display file contents
+        """
+        import os
+
         console.print()
-        for line in detail_lines:
-          console.print(line)
+        console.print(
+            "[bold cyan]Dry Run Mode - Simulating File Generation[/bold cyan]"
+        )
         console.print()
-        if not Confirm.ask(f"[bold red]Are you sure?[/bold red]", default=False):
-          console.print("[green]Operation cancelled.[/green]")
-          return
-      
-      config.clear_defaults(self.name)
-      self.display.display_success(f"Cleared all defaults for module '{self.name}'")
-
-  def config_list(self) -> None:
-    """Display the defaults for this specific module in YAML format.
-    
-    Examples:
-        # Show the defaults for the current module
-        cli compose defaults list
-    """
-    from .config import ConfigManager
-    import yaml
-    
-    config = ConfigManager()
-    
-    # Get only the defaults for this module
-    defaults = config.get_defaults(self.name)
-    
-    if not defaults:
-      console.print(f"[yellow]No configuration found for module '{self.name}'[/yellow]")
-      console.print(f"\n[dim]Config file location: {config.get_config_path()}[/dim]")
-      return
-    
-    # Create a minimal config structure with only this module's defaults
-    module_config = {
-      "defaults": {
-        self.name: defaults
-      }
-    }
-    
-    # Convert config to YAML string
-    yaml_output = yaml.dump(module_config, default_flow_style=False, sort_keys=False)
-    
-    console.print(f"[bold]Configuration for module:[/bold] [cyan]{self.name}[/cyan]")
-    console.print(f"[dim]Config file: {config.get_config_path()}[/dim]\n")
-    console.print(Panel(yaml_output, title=f"{self.name.capitalize()} Config", border_style="blue"))
-
-  def validate(
-    self,
-    template_id: str = Argument(None, help="Template ID to validate (if omitted, validates all templates)"),
-    path: Optional[str] = Option(None, "--path", "-p", help="Validate a template from a specific directory path"),
-    verbose: bool = Option(False, "--verbose", "-v", help="Show detailed validation information"),
-    semantic: bool = Option(True, "--semantic/--no-semantic", help="Enable semantic validation (Docker Compose schema, etc.)")
-  ) -> None:
-    """Validate templates for Jinja2 syntax, undefined variables, and semantic correctness.
-    
-    Validation includes:
-    - Jinja2 syntax checking
-    - Variable definition checking
-    - Semantic validation (when --semantic is enabled):
-      - Docker Compose file structure
-      - YAML syntax
-      - Configuration best practices
-    
-    Examples:
-        # Validate all templates in this module
-        cli compose validate
-        
-        # Validate a specific template
-        cli compose validate gitlab
-        
-        # Validate a template from a specific path
-        cli compose validate --path /path/to/template
-        
-        # Validate with verbose output
-        cli compose validate --verbose
-        
-        # Skip semantic validation (only Jinja2)
-        cli compose validate --no-semantic
-    """
-    from rich.table import Table
-    from .validators import get_validator_registry
-    
-    # Validate from path takes precedence
-    if path:
-      try:
-        template_path = Path(path).resolve()
-        if not template_path.exists():
-          self.display.display_error(f"Path does not exist: {path}")
-          raise Exit(code=1)
-        if not template_path.is_dir():
-          self.display.display_error(f"Path is not a directory: {path}")
-          raise Exit(code=1)
-        
-        console.print(f"[bold]Validating template from path:[/bold] [cyan]{template_path}[/cyan]\n")
-        template = Template(template_path, library_name="local")
-        template_id = template.id
-      except Exception as e:
-        self.display.display_error(f"Failed to load template from path '{path}': {e}")
-        raise Exit(code=1)
-    elif template_id:
-      # Validate a specific template by ID
-      try:
-        template = self._load_template_by_id(template_id)
-        console.print(f"[bold]Validating template:[/bold] [cyan]{template_id}[/cyan]\n")
-      except Exception as e:
-        self.display.display_error(f"Failed to load template '{template_id}': {e}")
-        raise Exit(code=1)
-    else:
-      # Validate all templates - handled separately below
-      template = None
-    
-    # Single template validation
-    if template:
-      try:
-        # Trigger validation by accessing used_variables
-        _ = template.used_variables
-        # Trigger variable definition validation by accessing variables
-        _ = template.variables
-        self.display.display_success("Jinja2 validation passed")
-        
-        # Semantic validation
-        if semantic:
-          console.print(f"\n[bold cyan]Running semantic validation...[/bold cyan]")
-          registry = get_validator_registry()
-          has_semantic_errors = False
-          
-          # Render template with default values for validation
-          debug_mode = logger.isEnabledFor(logging.DEBUG)
-          rendered_files, _ = template.render(template.variables, debug=debug_mode)
-          
-          for file_path, content in rendered_files.items():
-            result = registry.validate_file(content, file_path)
+
+        # Simulate directory creation
+        self.display.display_heading("Directory Operations", icon_type="folder")
+
+        # Check if output directory exists
+        if output_dir.exists():
+            self.display.display_success(
+                f"Output directory exists: [cyan]{output_dir}[/cyan]"
+            )
+            # Check if we have write permissions
+            if os.access(output_dir, os.W_OK):
+                self.display.display_success("Write permission verified")
+            else:
+                self.display.display_warning("Write permission may be denied")
+        else:
+            console.print(
+                f"  [dim]→[/dim] Would create output directory: [cyan]{output_dir}[/cyan]"
+            )
+            # Check if parent directory exists and is writable
+            parent = output_dir.parent
+            if parent.exists() and os.access(parent, os.W_OK):
+                self.display.display_success("Parent directory writable")
+            else:
+                self.display.display_warning("Parent directory may not be writable")
+
+        # Collect unique subdirectories that would be created
+        subdirs = set()
+        for file_path in rendered_files.keys():
+            parts = Path(file_path).parts
+            for i in range(1, len(parts)):
+                subdirs.add(Path(*parts[:i]))
+
+        if subdirs:
+            console.print(
+                f"  [dim]→[/dim] Would create {len(subdirs)} subdirectory(ies)"
+            )
+            for subdir in sorted(subdirs):
+                console.print(f"    [dim]📁[/dim] {subdir}/")
+
+        console.print()
+
+        # Display file operations in a table
+        self.display.display_heading("File Operations", icon_type="file")
+
+        total_size = 0
+        new_files = 0
+        overwrite_files = 0
+        file_operations = []
+
+        for file_path, content in sorted(rendered_files.items()):
+            full_path = output_dir / file_path
+            file_size = len(content.encode("utf-8"))
+            total_size += file_size
+
+            # Determine status
+            if full_path.exists():
+                status = "Overwrite"
+                overwrite_files += 1
+            else:
+                status = "Create"
+                new_files += 1
+
+            file_operations.append((file_path, file_size, status))
+
+        self.display.display_file_operation_table(file_operations)
+        console.print()
+
+        # Summary statistics
+        if total_size < 1024:
+            size_str = f"{total_size}B"
+        elif total_size < 1024 * 1024:
+            size_str = f"{total_size / 1024:.1f}KB"
+        else:
+            size_str = f"{total_size / (1024 * 1024):.1f}MB"
+
+        summary_items = {
+            "Total files:": str(len(rendered_files)),
+            "New files:": str(new_files),
+            "Files to overwrite:": str(overwrite_files),
+            "Total size:": size_str,
+        }
+        self.display.display_summary_table("Summary", summary_items)
+        console.print()
+
+        # Show file contents if requested
+        if show_files:
+            console.print("[bold cyan]Generated File Contents:[/bold cyan]")
+            console.print()
+            for file_path, content in sorted(rendered_files.items()):
+                console.print(f"[cyan]File:[/cyan] {file_path}")
+                print(f"{'─' * 80}")
+                print(content)
+                print()  # Add blank line after content
+            console.print()
+
+        self.display.display_success("Dry run complete - no files were written")
+        console.print(f"[dim]Files would have been generated in '{output_dir}'[/dim]")
+        logger.info(
+            f"Dry run completed for template '{id}' - {len(rendered_files)} files, {total_size} bytes"
+        )
+
+    def _write_generated_files(
+        self, output_dir: Path, rendered_files: Dict[str, str], quiet: bool = False
+    ) -> None:
+        """Write rendered files to the output directory.
+
+        Args:
+            output_dir: Directory to write files to
+            rendered_files: Dictionary of file paths to rendered content
+            quiet: Suppress output messages
+        """
+        output_dir.mkdir(parents=True, exist_ok=True)
+
+        for file_path, content in rendered_files.items():
+            full_path = output_dir / file_path
+            full_path.parent.mkdir(parents=True, exist_ok=True)
+            with open(full_path, "w", encoding="utf-8") as f:
+                f.write(content)
+            if not quiet:
+                console.print(
+                    f"[green]Generated file: {file_path}[/green]"
+                )  # Keep simple per-file output
+
+        if not quiet:
+            self.display.display_success(
+                f"Template generated successfully in '{output_dir}'"
+            )
+        logger.info(f"Template written to directory: {output_dir}")
+
+    def generate(
+        self,
+        id: str = Argument(..., help="Template ID"),
+        directory: Optional[str] = Argument(
+            None, help="Output directory (defaults to template ID)"
+        ),
+        interactive: bool = Option(
+            True,
+            "--interactive/--no-interactive",
+            "-i/-n",
+            help="Enable interactive prompting for variables",
+        ),
+        var: Optional[list[str]] = Option(
+            None,
+            "--var",
+            "-v",
+            help="Variable override (repeatable). Supports: KEY=VALUE or KEY VALUE",
+        ),
+        dry_run: bool = Option(
+            False, "--dry-run", help="Preview template generation without writing files"
+        ),
+        show_files: bool = Option(
+            False,
+            "--show-files",
+            help="Display generated file contents in plain text (use with --dry-run)",
+        ),
+        quiet: bool = Option(
+            False, "--quiet", "-q", help="Suppress all non-error output"
+        ),
+    ) -> None:
+        """Generate from template.
+
+        Variable precedence chain (lowest to highest):
+        1. Module spec (defined in cli/modules/*.py)
+        2. Template spec (from template.yaml)
+        3. Config defaults (from ~/.config/boilerplates/config.yaml)
+        4. CLI overrides (--var flags)
+
+        Examples:
+            # Generate to directory named after template
+            cli compose generate traefik
+
+            # Generate to custom directory
+            cli compose generate traefik my-proxy
+
+            # Generate with variables
+            cli compose generate traefik --var traefik_enabled=false
+
+            # Preview without writing files (dry run)
+            cli compose generate traefik --dry-run
+
+            # Preview and show generated file contents
+            cli compose generate traefik --dry-run --show-files
+        """
+        logger.info(
+            f"Starting generation for template '{id}' from module '{self.name}'"
+        )
+
+        # Create a display manager with quiet mode if needed
+        display = DisplayManager(quiet=quiet) if quiet else self.display
+
+        template = self._load_template_by_id(id)
+
+        # Apply defaults and overrides
+        self._apply_variable_defaults(template)
+        self._apply_cli_overrides(template, var)
+
+        # Re-sort sections after all overrides (toggle values may have changed)
+        if template.variables:
+            template.variables.sort_sections()
             
-            if result.errors or result.warnings or (verbose and result.info):
-              console.print(f"\n[cyan]File:[/cyan] {file_path}")
-              result.display(f"{file_path}")
-              
-              if result.errors:
-                has_semantic_errors = True
-          
-          if not has_semantic_errors:
-            self.display.display_success("Semantic validation passed")
-          else:
-            self.display.display_error("Semantic validation found errors")
-            raise Exit(code=1)
-        
-        if verbose:
-          console.print(f"\n[dim]Template path: {template.template_dir}[/dim]")
-          console.print(f"[dim]Found {len(template.used_variables)} variables[/dim]")
-          if semantic:
-            console.print(f"[dim]Generated {len(rendered_files)} files[/dim]")
-      
-      except TemplateRenderError as e:
-        # Display enhanced error information for template rendering errors
-        self.display.display_template_render_error(e, context=f"template '{template_id}'")
-        raise Exit(code=1)
-      except (TemplateSyntaxError, TemplateValidationError, ValueError) as e:
-        self.display.display_error(f"Validation failed for '{template_id}':")
-        console.print(f"\n{e}")
-        raise Exit(code=1)
-      except Exception as e:
-        self.display.display_error(f"Unexpected error validating '{template_id}': {e}")
-        raise Exit(code=1)
-      
-      return
-    else:
-      # Validate all templates
-      console.print(f"[bold]Validating all {self.name} templates...[/bold]\n")
-      
-      entries = self.libraries.find(self.name, sort_results=True)
-      total = len(entries)
-      valid_count = 0
-      invalid_count = 0
-      errors = []
-      
-      for template_dir, library_name in entries:
-        template_id = template_dir.name
+            # Reset disabled bool variables to False to prevent confusion
+            reset_vars = template.variables.reset_disabled_bool_variables()
+            if reset_vars:
+                logger.debug(f"Reset {len(reset_vars)} disabled bool variables to False")
+
+        if not quiet:
+            self._display_template_details(template, id)
+            console.print()
+
+        # Collect variable values
+        variable_values = self._collect_variable_values(template, interactive)
+
         try:
-          template = Template(template_dir, library_name=library_name)
-          # Trigger validation
-          _ = template.used_variables
-          _ = template.variables
-          valid_count += 1
-          if verbose:
-            self.display.display_success(template_id)
-        except ValueError as e:
-          invalid_count += 1
-          errors.append((template_id, str(e)))
-          if verbose:
-            self.display.display_error(template_id)
+            # Validate and render template
+            if template.variables:
+                template.variables.validate_all()
+
+            # Check if we're in debug mode (logger level is DEBUG)
+            debug_mode = logger.isEnabledFor(logging.DEBUG)
+
+            rendered_files, variable_values = template.render(
+                template.variables, debug=debug_mode
+            )
+
+            if not rendered_files:
+                display.display_error(
+                    "Template rendering returned no files",
+                    context="template generation",
+                )
+                raise Exit(code=1)
+
+            logger.info(f"Successfully rendered template '{id}'")
+
+            # Determine output directory
+            if directory:
+                output_dir = Path(directory)
+                # Check if path looks like an absolute path but is missing the leading slash
+                # This handles cases like "Users/username/path" which should be "/Users/username/path"
+                if not output_dir.is_absolute() and str(output_dir).startswith(
+                    ("Users/", "home/", "usr/", "opt/", "var/", "tmp/")
+                ):
+                    output_dir = Path("/") / output_dir
+                    logger.debug(
+                        f"Normalized relative-looking absolute path to: {output_dir}"
+                    )
+            else:
+                output_dir = Path(id)
+
+            # Check for conflicts and get confirmation (skip in quiet mode)
+            if not quiet:
+                existing_files = self._check_output_directory(
+                    output_dir, rendered_files, interactive
+                )
+                if existing_files is None:
+                    return  # User cancelled
+
+                # Get final confirmation for generation
+                dir_not_empty = output_dir.exists() and any(output_dir.iterdir())
+                if not self._get_generation_confirmation(
+                    output_dir,
+                    rendered_files,
+                    existing_files,
+                    dir_not_empty,
+                    dry_run,
+                    interactive,
+                ):
+                    return  # User cancelled
+            else:
+                # In quiet mode, just check for existing files without prompts
+                existing_files = []
+
+            # Execute generation (dry run or actual)
+            if dry_run:
+                if not quiet:
+                    self._execute_dry_run(id, output_dir, rendered_files, show_files)
+            else:
+                self._write_generated_files(output_dir, rendered_files, quiet=quiet)
+
+            # Display next steps (not in quiet mode)
+            if template.metadata.next_steps and not quiet:
+                display.display_next_steps(
+                    template.metadata.next_steps, variable_values
+                )
+
+        except TemplateRenderError as e:
+            # Display enhanced error information for template rendering errors (always show errors)
+            display.display_template_render_error(e, context=f"template '{id}'")
+            raise Exit(code=1)
         except Exception as e:
-          invalid_count += 1
-          errors.append((template_id, f"Load error: {e}"))
-          if verbose:
-            self.display.display_warning(template_id)
-      
-      # Summary
-      summary_items = {
-        "Total templates:": str(total),
-        "[green]Valid:[/green]": str(valid_count),
-        "[red]Invalid:[/red]": str(invalid_count)
-      }
-      self.display.display_summary_table("Validation Summary", summary_items)
-      
-      # Show errors if any
-      if errors:
-        console.print(f"\n[bold red]Validation Errors:[/bold red]")
-        for template_id, error_msg in errors:
-          console.print(f"\n[yellow]Template:[/yellow] [cyan]{template_id}[/cyan]")
-          console.print(f"[dim]{error_msg}[/dim]")
-        raise Exit(code=1)
-      else:
-        self.display.display_success("All templates are valid!")
-
-  @classmethod
-  def register_cli(cls, app: Typer) -> None:
-    """Register module commands with the main app."""
-    logger.debug(f"Registering CLI commands for module '{cls.name}'")
-    
-    module_instance = cls()
-    
-    module_app = Typer(help=cls.description)
-    
-    module_app.command("list")(module_instance.list)
-    module_app.command("search")(module_instance.search)
-    module_app.command("show")(module_instance.show)
-    module_app.command("validate")(module_instance.validate)
-    
-    module_app.command(
-      "generate", 
-      context_settings={"allow_extra_args": True, "ignore_unknown_options": True}
-    )(module_instance.generate)
-    
-    # Add defaults commands (simplified - only manage default values)
-    defaults_app = Typer(help="Manage default values for template variables")
-    defaults_app.command("get", help="Get default value(s)")(module_instance.config_get)
-    defaults_app.command("set", help="Set a default value")(module_instance.config_set)
-    defaults_app.command("rm", help="Remove a specific default value")(module_instance.config_remove)
-    defaults_app.command("clear", help="Clear default value(s)")(module_instance.config_clear)
-    defaults_app.command("list", help="Display the config for this module in YAML format")(module_instance.config_list)
-    module_app.add_typer(defaults_app, name="defaults")
-    
-    app.add_typer(module_app, name=cls.name, help=cls.description)
-    logger.info(f"Module '{cls.name}' CLI commands registered")
-
-  def _load_template_by_id(self, id: str) -> Template:
-    result = self.libraries.find_by_id(self.name, id)
-    if not result:
-      raise FileNotFoundError(f"Template '{id}' not found in module '{self.name}'")
-    
-    template_dir, library_name = result
-    try:
-      return Template(template_dir, library_name=library_name)
-    except Exception as exc:
-      logger.error(f"Failed to load template '{id}': {exc}")
-      raise FileNotFoundError(f"Template '{id}' could not be loaded: {exc}") from exc
-
-  def _display_template_details(self, template: Template, id: str) -> None:
-    """Display template information panel and variables table."""
-    self.display.display_template_details(template, id)
+            display.display_error(str(e), context=f"generating template '{id}'")
+            raise Exit(code=1)
+
+    def config_get(
+        self,
+        var_name: Optional[str] = Argument(
+            None, help="Variable name to get (omit to show all defaults)"
+        ),
+    ) -> None:
+        """Get default value(s) for this module.
+
+        Examples:
+            # Get all defaults for module
+            cli compose defaults get
+
+            # Get specific variable default
+            cli compose defaults get service_name
+        """
+        from .config import ConfigManager
+
+        config = ConfigManager()
+
+        if var_name:
+            # Get specific variable default
+            value = config.get_default_value(self.name, var_name)
+            if value is not None:
+                console.print(f"[green]{var_name}[/green] = [yellow]{value}[/yellow]")
+            else:
+                self.display.display_warning(
+                    f"No default set for variable '{var_name}'",
+                    context=f"module '{self.name}'",
+                )
+        else:
+            # Show all defaults (flat list)
+            defaults = config.get_defaults(self.name)
+            if defaults:
+                console.print(
+                    f"[bold]Config defaults for module '{self.name}':[/bold]\n"
+                )
+                for var_name, var_value in defaults.items():
+                    console.print(
+                        f"  [green]{var_name}[/green] = [yellow]{var_value}[/yellow]"
+                    )
+            else:
+                console.print(
+                    f"[yellow]No defaults configured for module '{self.name}'[/yellow]"
+                )
+
+    def config_set(
+        self,
+        var_name: str = Argument(..., help="Variable name or var=value format"),
+        value: Optional[str] = Argument(
+            None, help="Default value (not needed if using var=value format)"
+        ),
+    ) -> None:
+        """Set a default value for a variable.
+
+        This only sets the DEFAULT VALUE, not the variable spec.
+        The variable must be defined in the module or template spec.
+
+        Supports both formats:
+          - var_name value
+          - var_name=value
+
+        Examples:
+            # Set default value (format 1)
+            cli compose defaults set service_name my-awesome-app
+
+            # Set default value (format 2)
+            cli compose defaults set service_name=my-awesome-app
+
+            # Set author for all compose templates
+            cli compose defaults set author "Christian Lempa"
+        """
+        from .config import ConfigManager
+
+        config = ConfigManager()
+
+        # Parse var_name and value - support both "var value" and "var=value" formats
+        if "=" in var_name and value is None:
+            # Format: var_name=value
+            parts = var_name.split("=", 1)
+            actual_var_name = parts[0]
+            actual_value = parts[1]
+        elif value is not None:
+            # Format: var_name value
+            actual_var_name = var_name
+            actual_value = value
+        else:
+            self.display.display_error(
+                f"Missing value for variable '{var_name}'", context="config set"
+            )
+            console.print(
+                "[dim]Usage: defaults set VAR_NAME VALUE or defaults set VAR_NAME=VALUE[/dim]"
+            )
+            raise Exit(code=1)
+
+        # Set the default value
+        config.set_default_value(self.name, actual_var_name, actual_value)
+        self.display.display_success(
+            f"Set default: [cyan]{actual_var_name}[/cyan] = [yellow]{actual_value}[/yellow]"
+        )
+        console.print(
+            "\n[dim]This will be used as the default value when generating templates with this module.[/dim]"
+        )
+
+    def config_remove(
+        self,
+        var_name: str = Argument(..., help="Variable name to remove"),
+    ) -> None:
+        """Remove a specific default variable value.
+
+        Examples:
+            # Remove a default value
+            cli compose defaults rm service_name
+        """
+        from .config import ConfigManager
+
+        config = ConfigManager()
+        defaults = config.get_defaults(self.name)
+
+        if not defaults:
+            console.print(
+                f"[yellow]No defaults configured for module '{self.name}'[/yellow]"
+            )
+            return
+
+        if var_name in defaults:
+            del defaults[var_name]
+            config.set_defaults(self.name, defaults)
+            self.display.display_success(f"Removed default for '{var_name}'")
+        else:
+            self.display.display_error(f"No default found for variable '{var_name}'")
+
+    def config_clear(
+        self,
+        var_name: Optional[str] = Argument(
+            None, help="Variable name to clear (omit to clear all defaults)"
+        ),
+        force: bool = Option(False, "--force", "-f", help="Skip confirmation prompt"),
+    ) -> None:
+        """Clear default value(s) for this module.
+
+        Examples:
+            # Clear specific variable default
+            cli compose defaults clear service_name
+
+            # Clear all defaults for module
+            cli compose defaults clear --force
+        """
+        from .config import ConfigManager
+
+        config = ConfigManager()
+        defaults = config.get_defaults(self.name)
+
+        if not defaults:
+            console.print(
+                f"[yellow]No defaults configured for module '{self.name}'[/yellow]"
+            )
+            return
+
+        if var_name:
+            # Clear specific variable
+            if var_name in defaults:
+                del defaults[var_name]
+                config.set_defaults(self.name, defaults)
+                self.display.display_success(f"Cleared default for '{var_name}'")
+            else:
+                self.display.display_error(
+                    f"No default found for variable '{var_name}'"
+                )
+        else:
+            # Clear all defaults
+            if not force:
+                detail_lines = [
+                    f"This will clear ALL defaults for module '{self.name}':",
+                    "",
+                ]
+                for var_name, var_value in defaults.items():
+                    detail_lines.append(
+                        f"  [green]{var_name}[/green] = [yellow]{var_value}[/yellow]"
+                    )
+
+                self.display.display_warning("Warning: This will clear ALL defaults")
+                console.print()
+                for line in detail_lines:
+                    console.print(line)
+                console.print()
+                if not Confirm.ask("[bold red]Are you sure?[/bold red]", default=False):
+                    console.print("[green]Operation cancelled.[/green]")
+                    return
+
+            config.clear_defaults(self.name)
+            self.display.display_success(
+                f"Cleared all defaults for module '{self.name}'"
+            )
+
+    def config_list(self) -> None:
+        """Display the defaults for this specific module in YAML format.
+
+        Examples:
+            # Show the defaults for the current module
+            cli compose defaults list
+        """
+        from .config import ConfigManager
+        import yaml
+
+        config = ConfigManager()
+
+        # Get only the defaults for this module
+        defaults = config.get_defaults(self.name)
+
+        if not defaults:
+            console.print(
+                f"[yellow]No configuration found for module '{self.name}'[/yellow]"
+            )
+            console.print(
+                f"\n[dim]Config file location: {config.get_config_path()}[/dim]"
+            )
+            return
+
+        # Create a minimal config structure with only this module's defaults
+        module_config = {"defaults": {self.name: defaults}}
+
+        # Convert config to YAML string
+        yaml_output = yaml.dump(
+            module_config, default_flow_style=False, sort_keys=False
+        )
+
+        console.print(
+            f"[bold]Configuration for module:[/bold] [cyan]{self.name}[/cyan]"
+        )
+        console.print(f"[dim]Config file: {config.get_config_path()}[/dim]\n")
+        console.print(
+            Panel(
+                yaml_output,
+                title=f"{self.name.capitalize()} Config",
+                border_style="blue",
+            )
+        )
+
+    def validate(
+        self,
+        template_id: str = Argument(
+            None, help="Template ID to validate (if omitted, validates all templates)"
+        ),
+        path: Optional[str] = Option(
+            None,
+            "--path",
+            "-p",
+            help="Validate a template from a specific directory path",
+        ),
+        verbose: bool = Option(
+            False, "--verbose", "-v", help="Show detailed validation information"
+        ),
+        semantic: bool = Option(
+            True,
+            "--semantic/--no-semantic",
+            help="Enable semantic validation (Docker Compose schema, etc.)",
+        ),
+    ) -> None:
+        """Validate templates for Jinja2 syntax, undefined variables, and semantic correctness.
+
+        Validation includes:
+        - Jinja2 syntax checking
+        - Variable definition checking
+        - Semantic validation (when --semantic is enabled):
+          - Docker Compose file structure
+          - YAML syntax
+          - Configuration best practices
+
+        Examples:
+            # Validate all templates in this module
+            cli compose validate
+
+            # Validate a specific template
+            cli compose validate gitlab
+
+            # Validate a template from a specific path
+            cli compose validate --path /path/to/template
+
+            # Validate with verbose output
+            cli compose validate --verbose
+
+            # Skip semantic validation (only Jinja2)
+            cli compose validate --no-semantic
+        """
+        from .validators import get_validator_registry
+
+        # Validate from path takes precedence
+        if path:
+            try:
+                template_path = Path(path).resolve()
+                if not template_path.exists():
+                    self.display.display_error(f"Path does not exist: {path}")
+                    raise Exit(code=1)
+                if not template_path.is_dir():
+                    self.display.display_error(f"Path is not a directory: {path}")
+                    raise Exit(code=1)
+
+                console.print(
+                    f"[bold]Validating template from path:[/bold] [cyan]{template_path}[/cyan]\n"
+                )
+                template = Template(template_path, library_name="local")
+                template_id = template.id
+            except Exception as e:
+                self.display.display_error(
+                    f"Failed to load template from path '{path}': {e}"
+                )
+                raise Exit(code=1)
+        elif template_id:
+            # Validate a specific template by ID
+            try:
+                template = self._load_template_by_id(template_id)
+                console.print(
+                    f"[bold]Validating template:[/bold] [cyan]{template_id}[/cyan]\n"
+                )
+            except Exception as e:
+                self.display.display_error(
+                    f"Failed to load template '{template_id}': {e}"
+                )
+                raise Exit(code=1)
+        else:
+            # Validate all templates - handled separately below
+            template = None
+
+        # Single template validation
+        if template:
+            try:
+                # Trigger validation by accessing used_variables
+                _ = template.used_variables
+                # Trigger variable definition validation by accessing variables
+                _ = template.variables
+                self.display.display_success("Jinja2 validation passed")
+
+                # Semantic validation
+                if semantic:
+                    console.print(
+                        "\n[bold cyan]Running semantic validation...[/bold cyan]"
+                    )
+                    registry = get_validator_registry()
+                    has_semantic_errors = False
+
+                    # Render template with default values for validation
+                    debug_mode = logger.isEnabledFor(logging.DEBUG)
+                    rendered_files, _ = template.render(
+                        template.variables, debug=debug_mode
+                    )
+
+                    for file_path, content in rendered_files.items():
+                        result = registry.validate_file(content, file_path)
+
+                        if (
+                            result.errors
+                            or result.warnings
+                            or (verbose and result.info)
+                        ):
+                            console.print(f"\n[cyan]File:[/cyan] {file_path}")
+                            result.display(f"{file_path}")
+
+                            if result.errors:
+                                has_semantic_errors = True
+
+                    if not has_semantic_errors:
+                        self.display.display_success("Semantic validation passed")
+                    else:
+                        self.display.display_error("Semantic validation found errors")
+                        raise Exit(code=1)
+
+                if verbose:
+                    console.print(
+                        f"\n[dim]Template path: {template.template_dir}[/dim]"
+                    )
+                    console.print(
+                        f"[dim]Found {len(template.used_variables)} variables[/dim]"
+                    )
+                    if semantic:
+                        console.print(
+                            f"[dim]Generated {len(rendered_files)} files[/dim]"
+                        )
+
+            except TemplateRenderError as e:
+                # Display enhanced error information for template rendering errors
+                self.display.display_template_render_error(
+                    e, context=f"template '{template_id}'"
+                )
+                raise Exit(code=1)
+            except (TemplateSyntaxError, TemplateValidationError, ValueError) as e:
+                self.display.display_error(f"Validation failed for '{template_id}':")
+                console.print(f"\n{e}")
+                raise Exit(code=1)
+            except Exception as e:
+                self.display.display_error(
+                    f"Unexpected error validating '{template_id}': {e}"
+                )
+                raise Exit(code=1)
+
+            return
+        else:
+            # Validate all templates
+            console.print(f"[bold]Validating all {self.name} templates...[/bold]\n")
+
+            entries = self.libraries.find(self.name, sort_results=True)
+            total = len(entries)
+            valid_count = 0
+            invalid_count = 0
+            errors = []
+
+            for template_dir, library_name in entries:
+                template_id = template_dir.name
+                try:
+                    template = Template(template_dir, library_name=library_name)
+                    # Trigger validation
+                    _ = template.used_variables
+                    _ = template.variables
+                    valid_count += 1
+                    if verbose:
+                        self.display.display_success(template_id)
+                except ValueError as e:
+                    invalid_count += 1
+                    errors.append((template_id, str(e)))
+                    if verbose:
+                        self.display.display_error(template_id)
+                except Exception as e:
+                    invalid_count += 1
+                    errors.append((template_id, f"Load error: {e}"))
+                    if verbose:
+                        self.display.display_warning(template_id)
+
+            # Summary
+            summary_items = {
+                "Total templates:": str(total),
+                "[green]Valid:[/green]": str(valid_count),
+                "[red]Invalid:[/red]": str(invalid_count),
+            }
+            self.display.display_summary_table("Validation Summary", summary_items)
+
+            # Show errors if any
+            if errors:
+                console.print("\n[bold red]Validation Errors:[/bold red]")
+                for template_id, error_msg in errors:
+                    console.print(
+                        f"\n[yellow]Template:[/yellow] [cyan]{template_id}[/cyan]"
+                    )
+                    console.print(f"[dim]{error_msg}[/dim]")
+                raise Exit(code=1)
+            else:
+                self.display.display_success("All templates are valid!")
+
+    @classmethod
+    def register_cli(cls, app: Typer) -> None:
+        """Register module commands with the main app."""
+        logger.debug(f"Registering CLI commands for module '{cls.name}'")
+
+        module_instance = cls()
+
+        module_app = Typer(help=cls.description)
+
+        module_app.command("list")(module_instance.list)
+        module_app.command("search")(module_instance.search)
+        module_app.command("show")(module_instance.show)
+        module_app.command("validate")(module_instance.validate)
+
+        module_app.command(
+            "generate",
+            context_settings={"allow_extra_args": True, "ignore_unknown_options": True},
+        )(module_instance.generate)
+
+        # Add defaults commands (simplified - only manage default values)
+        defaults_app = Typer(help="Manage default values for template variables")
+        defaults_app.command("get", help="Get default value(s)")(
+            module_instance.config_get
+        )
+        defaults_app.command("set", help="Set a default value")(
+            module_instance.config_set
+        )
+        defaults_app.command("rm", help="Remove a specific default value")(
+            module_instance.config_remove
+        )
+        defaults_app.command("clear", help="Clear default value(s)")(
+            module_instance.config_clear
+        )
+        defaults_app.command(
+            "list", help="Display the config for this module in YAML format"
+        )(module_instance.config_list)
+        module_app.add_typer(defaults_app, name="defaults")
+
+        app.add_typer(module_app, name=cls.name, help=cls.description)
+        logger.info(f"Module '{cls.name}' CLI commands registered")
+
+    def _load_template_by_id(self, id: str) -> Template:
+        """Load a template by its ID, supporting qualified IDs.
+
+        Supports both formats:
+        - Simple: "alloy" (uses priority system)
+        - Qualified: "alloy.default" (loads from specific library)
+
+        Args:
+            id: Template ID (simple or qualified)
+
+        Returns:
+            Template instance
+
+        Raises:
+            FileNotFoundError: If template is not found
+        """
+        logger.debug(f"Loading template with ID '{id}' from module '{self.name}'")
+
+        # find_by_id now handles both simple and qualified IDs
+        result = self.libraries.find_by_id(self.name, id)
+
+        if not result:
+            raise FileNotFoundError(
+                f"Template '{id}' not found in module '{self.name}'"
+            )
+
+        template_dir, library_name = result
+
+        # Get library type
+        library = next(
+            (lib for lib in self.libraries.libraries if lib.name == library_name), None
+        )
+        library_type = library.library_type if library else "git"
+
+        try:
+            template = Template(
+                template_dir, library_name=library_name, library_type=library_type
+            )
+
+            # Validate schema version compatibility
+            template._validate_schema_version(self.schema_version, self.name)
+
+            # If the original ID was qualified, preserve it
+            if "." in id:
+                template.id = id
+
+            return template
+        except Exception as exc:
+            logger.error(f"Failed to load template '{id}': {exc}")
+            raise FileNotFoundError(
+                f"Template '{id}' could not be loaded: {exc}"
+            ) from exc
+
+    def _display_template_details(
+        self, template: Template, id: str
+    ) -> None:
+        """Display template information panel and variables table.
+
+        Args:
+            template: Template instance to display
+            id: Template ID
+        """
+        self.display.display_template_details(template, id)

+ 269 - 206
cli/core/prompt.py

@@ -1,10 +1,9 @@
 from __future__ import annotations
 
-from typing import Dict, Any, List, Callable
+from typing import Dict, Any, Callable
 import logging
 from rich.console import Console
 from rich.prompt import Prompt, Confirm, IntPrompt
-from rich.table import Table
 
 from .display import DisplayManager
 from .variable import Variable
@@ -14,209 +13,273 @@ logger = logging.getLogger(__name__)
 
 
 class PromptHandler:
-  """Simple interactive prompt handler for collecting template variables."""
-
-  def __init__(self) -> None:
-    self.console = Console()
-    self.display = DisplayManager()
-
-  def collect_variables(self, variables: VariableCollection) -> dict[str, Any]:
-    """Collect values for variables by iterating through sections.
-    
-    Args:
-        variables: VariableCollection with organized sections and variables
-        
-    Returns:
-        Dict of variable names to collected values
-    """
-    if not Confirm.ask("Customize any settings?", default=False):
-      logger.info("User opted to keep all default values")
-      return {}
-
-    collected: Dict[str, Any] = {}
-
-    # Process each section
-    for section_key, section in variables.get_sections().items():
-      if not section.variables:
-        continue
-
-      # Check if dependencies are satisfied
-      if not variables.is_section_satisfied(section_key):
-        # Get list of unsatisfied dependencies for better user feedback
-        unsatisfied_keys = [dep for dep in section.needs if not variables.is_section_satisfied(dep)]
-        # Convert section keys to titles for user-friendly display
-        unsatisfied_titles = []
-        for dep_key in unsatisfied_keys:
-          dep_section = variables.get_section(dep_key)
-          if dep_section:
-            unsatisfied_titles.append(dep_section.title)
-          else:
-            unsatisfied_titles.append(dep_key)
-        dep_names = ", ".join(unsatisfied_titles) if unsatisfied_titles else "unknown"
-        self.display.display_skipped(section.title, f"requires {dep_names} to be enabled")
-        logger.debug(f"Skipping section '{section_key}' - dependencies not satisfied: {dep_names}")
-        continue
-
-      # Always show section header first
-      self.display.display_section_header(section.title, section.description)
-
-      # Handle section toggle - skip for required sections
-      if section.required:
-        # Required sections are always processed, no toggle prompt needed
-        logger.debug(f"Processing required section '{section.key}' without toggle prompt")
-      elif section.toggle:
-        toggle_var = section.variables.get(section.toggle)
-        if toggle_var:
-          # Use description for prompt if available, otherwise use title
-          prompt_text = section.description if section.description else f"Enable {section.title}?"
-          current_value = toggle_var.convert(toggle_var.value)
-          new_value = self._prompt_bool(prompt_text, current_value)
-          
-          if new_value != current_value:
-            collected[toggle_var.name] = new_value
-            toggle_var.value = new_value
-          
-          # Use section's native is_enabled() method
-          if not section.is_enabled():
-            continue
-
-      # Collect variables in this section
-      for var_name, variable in section.variables.items():
-        # Skip toggle variable (already handled)
-        if section.toggle and var_name == section.toggle:
-          continue
-          
-        current_value = variable.convert(variable.value)
-        # Pass section.required so _prompt_variable can enforce required inputs
-        new_value = self._prompt_variable(variable, required=section.required)
-        
-        # For autogenerated variables, always update even if None (signals autogeneration)
-        if variable.autogenerated and new_value is None:
-          collected[var_name] = None
-          variable.value = None
-        elif new_value != current_value:
-          collected[var_name] = new_value
-          variable.value = new_value
-
-    logger.info(f"Variable collection completed. Collected {len(collected)} values")
-    return collected
-
-  def _prompt_variable(self, variable: Variable, required: bool = False) -> Any:
-    """Prompt for a single variable value based on its type.
-    
-    Args:
-        variable: The variable to prompt for
-        required: Whether the containing section is required (for context/display)
-        
-    Returns:
-        The validated value entered by the user
-    """
-    logger.debug(f"Prompting for variable '{variable.name}' (type: {variable.type})")
-    
-    # Use variable's native methods for prompt text and default value
-    prompt_text = variable.get_prompt_text()
-    default_value = variable.get_normalized_default()
-
-    # Add lock icon before default value for sensitive or autogenerated variables
-    if variable.sensitive or variable.autogenerated:
-      # Format: "Prompt text 🔒 (default)"
-      # The lock icon goes between the text and the default value in parentheses
-      prompt_text = f"{prompt_text} {self.display.get_lock_icon()}"
-
-    # Check if this specific variable is required (has no default and not autogenerated)
-    var_is_required = variable.is_required()
-    
-    # If variable is required, mark it in the prompt
-    if var_is_required:
-      prompt_text = f"{prompt_text} [bold red]*required[/bold red]"
-
-    handler = self._get_prompt_handler(variable)
-
-    # Add validation hint (includes both extra text and enum options)
-    hint = variable.get_validation_hint()
-    if hint:
-      # Show options/extra inline inside parentheses, before the default
-      prompt_text = f"{prompt_text} [dim]({hint})[/dim]"
-
-    while True:
-      try:
-        raw = handler(prompt_text, default_value)
-        # Use Variable's centralized validation method that handles:
-        # - Type conversion
-        # - Autogenerated variable detection
-        # - Required field validation
-        converted = variable.validate_and_convert(raw, check_required=True)
-        
-        # Return the converted value (caller will update variable.value)
-        return converted
-      except ValueError as exc:
-        # Conversion/validation failed — show a consistent error message and retry
-        self._show_validation_error(str(exc))
-      except Exception as e:
-        # Unexpected error — log and retry using the stored (unconverted) value
-        logger.error(f"Error prompting for variable '{variable.name}': {str(e)}")
-        default_value = variable.value
+    """Simple interactive prompt handler for collecting template variables."""
+
+    def __init__(self) -> None:
+        self.console = Console()
+        self.display = DisplayManager()
+
+    def collect_variables(self, variables: VariableCollection) -> dict[str, Any]:
+        """Collect values for variables by iterating through sections.
+
+        Args:
+            variables: VariableCollection with organized sections and variables
+
+        Returns:
+            Dict of variable names to collected values
+        """
+        if not Confirm.ask("Customize any settings?", default=False):
+            logger.info("User opted to keep all default values")
+            return {}
+
+        collected: Dict[str, Any] = {}
+        prompted_variables: set[str] = (
+            set()
+        )  # Track which variables we've already prompted for
+
+        # Process each section
+        for section_key, section in variables.get_sections().items():
+            if not section.variables:
+                continue
+
+            # Check if dependencies are satisfied
+            if not variables.is_section_satisfied(section_key):
+                # Get list of unsatisfied dependencies for better user feedback
+                unsatisfied_keys = [
+                    dep
+                    for dep in section.needs
+                    if not variables.is_section_satisfied(dep)
+                ]
+                # Convert section keys to titles for user-friendly display
+                unsatisfied_titles = []
+                for dep_key in unsatisfied_keys:
+                    dep_section = variables.get_section(dep_key)
+                    if dep_section:
+                        unsatisfied_titles.append(dep_section.title)
+                    else:
+                        unsatisfied_titles.append(dep_key)
+                dep_names = (
+                    ", ".join(unsatisfied_titles) if unsatisfied_titles else "unknown"
+                )
+                self.display.display_skipped(
+                    section.title, f"requires {dep_names} to be enabled"
+                )
+                logger.debug(
+                    f"Skipping section '{section_key}' - dependencies not satisfied: {dep_names}"
+                )
+                continue
+
+            # Always show section header first
+            self.display.display_section_header(section.title, section.description)
+
+            # Track whether this section will be enabled
+            section_will_be_enabled = True
+
+            # Handle section toggle - skip for required sections
+            if section.required:
+                # Required sections are always processed, no toggle prompt needed
+                logger.debug(
+                    f"Processing required section '{section.key}' without toggle prompt"
+                )
+            elif section.toggle:
+                toggle_var = section.variables.get(section.toggle)
+                if toggle_var:
+                    # Prompt for toggle variable using standard variable prompting logic
+                    # This ensures consistent handling of description, extra text, validation hints, etc.
+                    current_value = toggle_var.convert(toggle_var.value)
+                    new_value = self._prompt_variable(toggle_var, required=section.required)
+
+                    if new_value != current_value:
+                        collected[toggle_var.name] = new_value
+                        toggle_var.value = new_value
+
+                    # Use section's native is_enabled() method
+                    if not section.is_enabled():
+                        section_will_be_enabled = False
+
+            # Collect variables in this section
+            for var_name, variable in section.variables.items():
+                # Skip toggle variable (already handled)
+                if section.toggle and var_name == section.toggle:
+                    continue
+
+                # Skip variables with unsatisfied needs (similar to display logic)
+                if not variables.is_variable_satisfied(var_name):
+                    logger.debug(
+                        f"Skipping variable '{var_name}' - needs not satisfied"
+                    )
+                    continue
+
+                # Skip all variables if section is disabled
+                if not section_will_be_enabled:
+                    logger.debug(
+                        f"Skipping variable '{var_name}' from disabled section '{section_key}'"
+                    )
+                    continue
+
+                # Prompt for the variable
+                current_value = variable.convert(variable.value)
+                # Pass section.required so _prompt_variable can enforce required inputs
+                new_value = self._prompt_variable(variable, required=section.required)
+
+                # Track that we've prompted for this variable
+                prompted_variables.add(var_name)
+
+                # For autogenerated variables, always update even if None (signals autogeneration)
+                if variable.autogenerated and new_value is None:
+                    collected[var_name] = None
+                    variable.value = None
+                elif new_value != current_value:
+                    collected[var_name] = new_value
+                    variable.value = new_value
+
+        logger.info(f"Variable collection completed. Collected {len(collected)} values")
+        return collected
+
+    def _prompt_variable(self, variable: Variable, required: bool = False) -> Any:
+        """Prompt for a single variable value based on its type.
+
+        Args:
+            variable: The variable to prompt for
+            required: Whether the containing section is required (for context/display)
+
+        Returns:
+            The validated value entered by the user
+        """
+        logger.debug(
+            f"Prompting for variable '{variable.name}' (type: {variable.type})"
+        )
+
+        # Use variable's native methods for prompt text and default value
+        prompt_text = variable.get_prompt_text()
+        default_value = variable.get_normalized_default()
+
+        # Add lock icon before default value for sensitive or autogenerated variables
+        if variable.sensitive or variable.autogenerated:
+            # Format: "Prompt text 🔒 (default)"
+            # The lock icon goes between the text and the default value in parentheses
+            prompt_text = f"{prompt_text} {self.display.get_lock_icon()}"
+
+        # Check if this specific variable is required (has no default and not autogenerated)
+        var_is_required = variable.is_required()
+
+        # If variable is required, mark it in the prompt
+        if var_is_required:
+            prompt_text = f"{prompt_text} [bold red]*required[/bold red]"
+
         handler = self._get_prompt_handler(variable)
 
-  def _get_prompt_handler(self, variable: Variable) -> Callable:
-    """Return the prompt function for a variable type."""
-    handlers = {
-      "bool": self._prompt_bool,
-      "int": self._prompt_int,
-      # For enum prompts we pass the variable.extra through so options and extra
-      # can be combined into a single inline hint.
-      "enum": lambda text, default: self._prompt_enum(text, variable.options or [], default, extra=getattr(variable, 'extra', None)),
-    }
-    return handlers.get(variable.type, lambda text, default: self._prompt_string(text, default, is_sensitive=variable.sensitive))
-
-  def _show_validation_error(self, message: str) -> None:
-    """Display validation feedback consistently."""
-    self.display.display_validation_error(message)
-
-  def _prompt_string(self, prompt_text: str, default: Any = None, is_sensitive: bool = False) -> str | None:
-    value = Prompt.ask(
-      prompt_text,
-      default=str(default) if default is not None else "",
-      show_default=True,
-      password=is_sensitive
-    )
-    stripped = value.strip() if value else None
-    return stripped if stripped else None
-
-  def _prompt_bool(self, prompt_text: str, default: Any = None) -> bool | None:
-    if default is None:
-      return Confirm.ask(prompt_text, default=None)
-    converted = default if isinstance(default, bool) else str(default).lower() in ("true", "1", "yes", "on")
-    return Confirm.ask(prompt_text, default=converted)
-
-  def _prompt_int(self, prompt_text: str, default: Any = None) -> int | None:
-    converted = None
-    if default is not None:
-      try:
-        converted = int(default)
-      except (ValueError, TypeError):
-        logger.warning(f"Invalid default integer value: {default}")
-    return IntPrompt.ask(prompt_text, default=converted)
-
-  def _prompt_enum(self, prompt_text: str, options: list[str], default: Any = None, extra: str | None = None) -> str:
-    """Prompt for enum selection with validation.
-    
-    Note: prompt_text should already include hint from variable.get_validation_hint()
-    but we keep this for backward compatibility and fallback.
-    """
-    if not options:
-      return self._prompt_string(prompt_text, default)
-
-    # Validate default is in options
-    if default and str(default) not in options:
-      default = options[0]
-
-    while True:
-      value = Prompt.ask(
-        prompt_text,
-        default=str(default) if default else options[0],
-        show_default=True,
-      )
-      if value in options:
-        return value
-      self.console.print(f"[red]Invalid choice. Select from: {', '.join(options)}[/red]")
+        # Add validation hint (includes both extra text and enum options)
+        hint = variable.get_validation_hint()
+        if hint:
+            # Show options/extra inline inside parentheses, before the default
+            prompt_text = f"{prompt_text} [dim]({hint})[/dim]"
+
+        while True:
+            try:
+                raw = handler(prompt_text, default_value)
+                # Use Variable's centralized validation method that handles:
+                # - Type conversion
+                # - Autogenerated variable detection
+                # - Required field validation
+                converted = variable.validate_and_convert(raw, check_required=True)
+
+                # Return the converted value (caller will update variable.value)
+                return converted
+            except ValueError as exc:
+                # Conversion/validation failed — show a consistent error message and retry
+                self._show_validation_error(str(exc))
+            except Exception as e:
+                # Unexpected error — log and retry using the stored (unconverted) value
+                logger.error(
+                    f"Error prompting for variable '{variable.name}': {str(e)}"
+                )
+                default_value = variable.value
+                handler = self._get_prompt_handler(variable)
+
+    def _get_prompt_handler(self, variable: Variable) -> Callable:
+        """Return the prompt function for a variable type."""
+        handlers = {
+            "bool": self._prompt_bool,
+            "int": self._prompt_int,
+            # For enum prompts we pass the variable.extra through so options and extra
+            # can be combined into a single inline hint.
+            "enum": lambda text, default: self._prompt_enum(
+                text,
+                variable.options or [],
+                default,
+                extra=getattr(variable, "extra", None),
+            ),
+        }
+        return handlers.get(
+            variable.type,
+            lambda text, default: self._prompt_string(
+                text, default, is_sensitive=variable.sensitive
+            ),
+        )
+
+    def _show_validation_error(self, message: str) -> None:
+        """Display validation feedback consistently."""
+        self.display.display_validation_error(message)
+
+    def _prompt_string(
+        self, prompt_text: str, default: Any = None, is_sensitive: bool = False
+    ) -> str | None:
+        value = Prompt.ask(
+            prompt_text,
+            default=str(default) if default is not None else "",
+            show_default=True,
+            password=is_sensitive,
+        )
+        stripped = value.strip() if value else None
+        return stripped if stripped else None
+
+    def _prompt_bool(self, prompt_text: str, default: Any = None) -> bool | None:
+        if default is None:
+            return Confirm.ask(prompt_text, default=None)
+        converted = (
+            default
+            if isinstance(default, bool)
+            else str(default).lower() in ("true", "1", "yes", "on")
+        )
+        return Confirm.ask(prompt_text, default=converted)
+
+    def _prompt_int(self, prompt_text: str, default: Any = None) -> int | None:
+        converted = None
+        if default is not None:
+            try:
+                converted = int(default)
+            except (ValueError, TypeError):
+                logger.warning(f"Invalid default integer value: {default}")
+        return IntPrompt.ask(prompt_text, default=converted)
+
+    def _prompt_enum(
+        self,
+        prompt_text: str,
+        options: list[str],
+        default: Any = None,
+        extra: str | None = None,
+    ) -> str:
+        """Prompt for enum selection with validation.
+
+        Note: prompt_text should already include hint from variable.get_validation_hint()
+        but we keep this for backward compatibility and fallback.
+        """
+        if not options:
+            return self._prompt_string(prompt_text, default)
+
+        # Validate default is in options
+        if default and str(default) not in options:
+            default = options[0]
+
+        while True:
+            value = Prompt.ask(
+                prompt_text,
+                default=str(default) if default else options[0],
+                show_default=True,
+            )
+            if value in options:
+                return value
+            self.console.print(
+                f"[red]Invalid choice. Select from: {', '.join(options)}[/red]"
+            )

+ 31 - 23
cli/core/registry.py

@@ -1,4 +1,5 @@
 """Module registry system."""
+
 from __future__ import annotations
 
 import logging
@@ -8,29 +9,36 @@ logger = logging.getLogger(__name__)
 
 
 class ModuleRegistry:
-  """Simple module registry without magic."""
-  
-  def __init__(self) -> None:
-    self._modules = {}
-    logger.debug("Initializing module registry")
-  
-  def register(self, module_class: Type) -> None:
-    """Register a module class."""
-    # Module class defines its own name attribute
-    logger.debug(f"Attempting to register module class '{module_class.name}'")
-    
-    if module_class.name in self._modules:
-      logger.warning(f"Module '{module_class.name}' already registered, replacing with new implementation")
-    
-    self._modules[module_class.name] = module_class
-    logger.info(f"Registered module '{module_class.name}' (total modules: {len(self._modules)})")
-    logger.debug(f"Module '{module_class.name}' details: description='{module_class.description}'")
-  
-  def iter_module_classes(self) -> Iterator[tuple[str, Type]]:
-    """Yield registered module classes without instantiating them."""
-    logger.debug(f"Iterating over {len(self._modules)} registered module classes")
-    for name in sorted(self._modules.keys()):
-      yield name, self._modules[name]
+    """Simple module registry without magic."""
+
+    def __init__(self) -> None:
+        self._modules = {}
+        logger.debug("Initializing module registry")
+
+    def register(self, module_class: Type) -> None:
+        """Register a module class."""
+        # Module class defines its own name attribute
+        logger.debug(f"Attempting to register module class '{module_class.name}'")
+
+        if module_class.name in self._modules:
+            logger.warning(
+                f"Module '{module_class.name}' already registered, replacing with new implementation"
+            )
+
+        self._modules[module_class.name] = module_class
+        logger.info(
+            f"Registered module '{module_class.name}' (total modules: {len(self._modules)})"
+        )
+        logger.debug(
+            f"Module '{module_class.name}' details: description='{module_class.description}'"
+        )
+
+    def iter_module_classes(self) -> Iterator[tuple[str, Type]]:
+        """Yield registered module classes without instantiating them."""
+        logger.debug(f"Iterating over {len(self._modules)} registered module classes")
+        for name in sorted(self._modules.keys()):
+            yield name, self._modules[name]
+
 
 # Global registry
 registry = ModuleRegistry()

+ 207 - 106
cli/core/repo.py

@@ -1,4 +1,5 @@
 """Repository management module for syncing library repositories."""
+
 from __future__ import annotations
 
 import logging
@@ -7,7 +8,6 @@ from pathlib import Path
 from typing import Optional
 
 from rich.console import Console
-from rich.panel import Panel
 from rich.progress import Progress, SpinnerColumn, TextColumn
 from rich.table import Table
 from typer import Argument, Option, Typer
@@ -24,13 +24,15 @@ display = DisplayManager()
 app = Typer(help="Manage library repositories")
 
 
-def _run_git_command(args: list[str], cwd: Optional[Path] = None) -> tuple[bool, str, str]:
+def _run_git_command(
+    args: list[str], cwd: Optional[Path] = None
+) -> tuple[bool, str, str]:
     """Run a git command and return the result.
-    
+
     Args:
         args: Git command arguments (without 'git' prefix)
         cwd: Working directory for the command
-        
+
     Returns:
         Tuple of (success, stdout, stderr)
     """
@@ -40,7 +42,7 @@ def _run_git_command(args: list[str], cwd: Optional[Path] = None) -> tuple[bool,
             cwd=cwd,
             capture_output=True,
             text=True,
-            timeout=300  # 5 minute timeout
+            timeout=300,  # 5 minute timeout
         )
         return result.returncode == 0, result.stdout, result.stderr
     except subprocess.TimeoutExpired:
@@ -51,32 +53,37 @@ def _run_git_command(args: list[str], cwd: Optional[Path] = None) -> tuple[bool,
         return False, "", str(e)
 
 
-def _clone_or_pull_repo(name: str, url: str, target_path: Path, branch: Optional[str] = None, sparse_dir: Optional[str] = None) -> tuple[bool, str]:
+def _clone_or_pull_repo(
+    name: str,
+    url: str,
+    target_path: Path,
+    branch: Optional[str] = None,
+    sparse_dir: Optional[str] = None,
+) -> tuple[bool, str]:
     """Clone or pull a git repository with optional sparse-checkout.
-    
+
     Args:
         name: Library name
         url: Git repository URL
         target_path: Target directory for the repository
         branch: Git branch to clone/pull (optional)
         sparse_dir: Directory to sparse-checkout (optional, use None or "." for full clone)
-        
+
     Returns:
         Tuple of (success, message)
     """
     if target_path.exists() and (target_path / ".git").exists():
         # Repository exists, pull updates
         logger.debug(f"Pulling updates for library '{name}' at {target_path}")
-        
+
         # Determine which branch to pull
         pull_branch = branch if branch else "main"
-        
+
         # Pull updates from specific branch
         success, stdout, stderr = _run_git_command(
-            ["pull", "--ff-only", "origin", pull_branch],
-            cwd=target_path
+            ["pull", "--ff-only", "origin", pull_branch], cwd=target_path
         )
-        
+
         if success:
             # Check if anything was updated
             if "Already up to date" in stdout or "Already up-to-date" in stdout:
@@ -90,69 +97,74 @@ def _clone_or_pull_repo(name: str, url: str, target_path: Path, branch: Optional
     else:
         # Repository doesn't exist, clone it
         logger.debug(f"Cloning library '{name}' from {url} to {target_path}")
-        
+
         # Ensure parent directory exists
         target_path.parent.mkdir(parents=True, exist_ok=True)
-        
+
         # Determine if we should use sparse-checkout
         use_sparse = sparse_dir and sparse_dir != "."
-        
+
         if use_sparse:
             # Use sparse-checkout to clone only specific directory
             logger.debug(f"Using sparse-checkout for directory: {sparse_dir}")
-            
+
             # Initialize empty repo
             success, stdout, stderr = _run_git_command(["init"], cwd=None)
             if success:
                 # Create target directory
                 target_path.mkdir(parents=True, exist_ok=True)
-                
+
                 # Initialize git repo
                 success, stdout, stderr = _run_git_command(["init"], cwd=target_path)
                 if not success:
                     return False, f"Failed to initialize repo: {stderr or stdout}"
-                
+
                 # Add remote
-                success, stdout, stderr = _run_git_command(["remote", "add", "origin", url], cwd=target_path)
+                success, stdout, stderr = _run_git_command(
+                    ["remote", "add", "origin", url], cwd=target_path
+                )
                 if not success:
                     return False, f"Failed to add remote: {stderr or stdout}"
-                
+
                 # Enable sparse-checkout (non-cone mode to exclude root files)
                 success, stdout, stderr = _run_git_command(
-                    ["sparse-checkout", "init", "--no-cone"], 
-                    cwd=target_path
+                    ["sparse-checkout", "init", "--no-cone"], cwd=target_path
                 )
                 if not success:
-                    return False, f"Failed to enable sparse-checkout: {stderr or stdout}"
-                
+                    return (
+                        False,
+                        f"Failed to enable sparse-checkout: {stderr or stdout}",
+                    )
+
                 # Set sparse-checkout to specific directory (non-cone uses patterns)
                 success, stdout, stderr = _run_git_command(
-                    ["sparse-checkout", "set", f"{sparse_dir}/*"],
-                    cwd=target_path
+                    ["sparse-checkout", "set", f"{sparse_dir}/*"], cwd=target_path
                 )
                 if not success:
-                    return False, f"Failed to set sparse-checkout directory: {stderr or stdout}"
-                
+                    return (
+                        False,
+                        f"Failed to set sparse-checkout directory: {stderr or stdout}",
+                    )
+
                 # Fetch specific branch (without attempting to update local ref)
                 fetch_args = ["fetch", "--depth", "1", "origin"]
                 if branch:
                     fetch_args.append(branch)
                 else:
                     fetch_args.append("main")
-                
+
                 success, stdout, stderr = _run_git_command(fetch_args, cwd=target_path)
                 if not success:
                     return False, f"Fetch failed: {stderr or stdout}"
-                
+
                 # Checkout the branch
                 checkout_branch = branch if branch else "main"
                 success, stdout, stderr = _run_git_command(
-                    ["checkout", checkout_branch],
-                    cwd=target_path
+                    ["checkout", checkout_branch], cwd=target_path
                 )
                 if not success:
                     return False, f"Checkout failed: {stderr or stdout}"
-                
+
                 # Done! Files are in target_path/sparse_dir/
                 return True, "Cloned successfully (sparse)"
             else:
@@ -163,9 +175,9 @@ def _clone_or_pull_repo(name: str, url: str, target_path: Path, branch: Optional
             if branch:
                 clone_args.extend(["--branch", branch])
             clone_args.extend([url, str(target_path)])
-            
+
             success, stdout, stderr = _run_git_command(clone_args)
-            
+
             if success:
                 return True, "Cloned successfully"
             else:
@@ -177,36 +189,39 @@ def _clone_or_pull_repo(name: str, url: str, target_path: Path, branch: Optional
 @app.command()
 def update(
     library_name: Optional[str] = Argument(
-        None,
-        help="Name of specific library to update (updates all if not specified)"
+        None, help="Name of specific library to update (updates all if not specified)"
     ),
-    verbose: bool = Option(False, "--verbose", "-v", help="Show detailed output")
+    verbose: bool = Option(False, "--verbose", "-v", help="Show detailed output"),
 ) -> None:
     """Update library repositories by cloning or pulling from git.
-    
+
     This command syncs all configured libraries from their git repositories.
     If a library doesn't exist locally, it will be cloned. If it exists, it will be pulled.
     """
     config = ConfigManager()
     libraries = config.get_libraries()
-    
+
     if not libraries:
         display.display_warning("No libraries configured")
-        console.print("Libraries are auto-configured on first run with a default library.")
+        console.print(
+            "Libraries are auto-configured on first run with a default library."
+        )
         return
-    
+
     # Filter to specific library if requested
     if library_name:
         libraries = [lib for lib in libraries if lib.get("name") == library_name]
         if not libraries:
-            console_err.print(f"[red]Error:[/red] Library '{library_name}' not found in configuration")
+            console_err.print(
+                f"[red]Error:[/red] Library '{library_name}' not found in configuration"
+            )
             return
-    
+
     libraries_path = config.get_libraries_path()
-    
+
     # Create results table
     results = []
-    
+
     with Progress(
         SpinnerColumn(),
         TextColumn("[progress.description]{task.description}"),
@@ -214,52 +229,68 @@ def update(
     ) as progress:
         for lib in libraries:
             name = lib.get("name")
-            url = lib.get("url")
-            branch = lib.get("branch")
-            directory = lib.get("directory", "library")
+            lib_type = lib.get("type", "git")
             enabled = lib.get("enabled", True)
-            
+
             if not enabled:
                 if verbose:
                     console.print(f"[dim]Skipping disabled library: {name}[/dim]")
                 results.append((name, "Skipped (disabled)", False))
                 continue
-            
+
+            # Skip static libraries (no sync needed)
+            if lib_type == "static":
+                if verbose:
+                    console.print(
+                        f"[dim]Skipping static library: {name} (no sync needed)[/dim]"
+                    )
+                results.append((name, "N/A (static)", True))
+                continue
+
+            # Handle git libraries
+            url = lib.get("url")
+            branch = lib.get("branch")
+            directory = lib.get("directory", "library")
+
             task = progress.add_task(f"Updating {name}...", total=None)
-            
+
             # Target path: ~/.config/boilerplates/libraries/{name}/
             target_path = libraries_path / name
-            
+
             # Clone or pull the repository with sparse-checkout if directory is specified
-            success, message = _clone_or_pull_repo(name, url, target_path, branch, directory)
-            
+            success, message = _clone_or_pull_repo(
+                name, url, target_path, branch, directory
+            )
+
             results.append((name, message, success))
             progress.remove_task(task)
-            
+
             if verbose:
                 if success:
                     display.display_success(f"{name}: {message}")
                 else:
                     display.display_error(f"{name}: {message}")
-    
+
     # Display summary table
     if not verbose:
         display.display_status_table(
-            "Library Update Summary",
-            results,
-            columns=("Library", "Status")
+            "Library Update Summary", results, columns=("Library", "Status")
         )
-    
+
     # Summary
     total = len(results)
     successful = sum(1 for _, _, success in results if success)
-    
+
     if successful == total:
-        console.print(f"\n[green]All libraries updated successfully ({successful}/{total})[/green]")
+        console.print(
+            f"\n[green]All libraries updated successfully ({successful}/{total})[/green]"
+        )
     elif successful > 0:
-        console.print(f"\n[yellow]Partially successful: {successful}/{total} libraries updated[/yellow]")
+        console.print(
+            f"\n[yellow]Partially successful: {successful}/{total} libraries updated[/yellow]"
+        )
     else:
-        console.print(f"\n[red]Failed to update libraries[/red]")
+        console.print("\n[red]Failed to update libraries[/red]")
 
 
 @app.command()
@@ -267,70 +298,139 @@ def list() -> None:
     """List all configured libraries."""
     config = ConfigManager()
     libraries = config.get_libraries()
-    
+
     if not libraries:
         console.print("[yellow]No libraries configured.[/yellow]")
         return
-    
+
     table = Table(title="Configured Libraries", show_header=True)
     table.add_column("Name", style="cyan", no_wrap=True)
-    table.add_column("URL", style="blue")
+    table.add_column("URL/Path", style="blue")
     table.add_column("Branch", style="yellow")
     table.add_column("Directory", style="magenta")
+    table.add_column("Type", style="cyan")
     table.add_column("Status", style="green")
-    
+
     libraries_path = config.get_libraries_path()
-    
+
     for lib in libraries:
         name = lib.get("name", "")
-        url = lib.get("url", "")
-        branch = lib.get("branch", "main")
-        directory = lib.get("directory", "library")
+        lib_type = lib.get("type", "git")
         enabled = lib.get("enabled", True)
-        
-        # Check if library exists locally
-        library_base = libraries_path / name
-        if directory and directory != ".":
-            library_path = library_base / directory
+
+        if lib_type == "git":
+            url_or_path = lib.get("url", "")
+            branch = lib.get("branch", "main")
+            directory = lib.get("directory", "library")
+
+            # Check if library exists locally
+            library_base = libraries_path / name
+            if directory and directory != ".":
+                library_path = library_base / directory
+            else:
+                library_path = library_base
+            exists = library_path.exists()
+
+        elif lib_type == "static":
+            url_or_path = lib.get("path", "")
+            branch = "-"
+            directory = "-"
+
+            # Check if static path exists
+            from pathlib import Path
+
+            library_path = Path(url_or_path).expanduser()
+            if not library_path.is_absolute():
+                library_path = (config.config_path.parent / library_path).resolve()
+            exists = library_path.exists()
+
         else:
-            library_path = library_base
-        exists = library_path.exists()
-        
+            # Unknown type
+            url_or_path = "<unknown type>"
+            branch = "-"
+            directory = "-"
+            exists = False
+
+        type_display = lib_type
+
         status_parts = []
         if not enabled:
             status_parts.append("[dim]disabled[/dim]")
         elif exists:
-            status_parts.append("[green]synced[/green]")
+            status_parts.append("[green]available[/green]")
         else:
-            status_parts.append("[yellow]not synced[/yellow]")
-        
+            status_parts.append("[yellow]not found[/yellow]")
+
         status = " ".join(status_parts)
-        
-        table.add_row(name, url, branch, directory, status)
-    
+
+        table.add_row(name, url_or_path, branch, directory, type_display, status)
+
     console.print(table)
 
 
 @app.command()
 def add(
     name: str = Argument(..., help="Unique name for the library"),
-    url: str = Argument(..., help="Git repository URL"),
-    branch: str = Option("main", "--branch", "-b", help="Git branch to use"),
-    directory: str = Option("library", "--directory", "-d", help="Directory within repo containing templates (metadata only)"),
-    enabled: bool = Option(True, "--enabled/--disabled", help="Enable or disable the library"),
-    sync: bool = Option(True, "--sync/--no-sync", help="Sync the library after adding")
+    library_type: str = Option(
+        "git", "--type", "-t", help="Library type (git or static)"
+    ),
+    url: Optional[str] = Option(
+        None, "--url", "-u", help="Git repository URL (for git type)"
+    ),
+    branch: str = Option("main", "--branch", "-b", help="Git branch (for git type)"),
+    directory: str = Option(
+        "library", "--directory", "-d", help="Directory in repo (for git type)"
+    ),
+    path: Optional[str] = Option(
+        None, "--path", "-p", help="Local path (for static type)"
+    ),
+    enabled: bool = Option(
+        True, "--enabled/--disabled", help="Enable or disable the library"
+    ),
+    sync: bool = Option(True, "--sync/--no-sync", help="Sync after adding (git only)"),
 ) -> None:
-    """Add a new library to the configuration."""
+    """Add a new library to the configuration.
+
+    Examples:
+      # Add a git library
+      repo add mylib --type git --url https://github.com/user/templates.git
+
+      # Add a static library
+      repo add local --type static --path ~/my-templates
+    """
     config = ConfigManager()
-    
+
     try:
-        config.add_library(name, url, directory, branch, enabled)
-        display.display_success(f"Added library '{name}'")
-        
-        if sync and enabled:
+        if library_type == "git":
+            if not url:
+                display.display_error("--url is required for git libraries")
+                return
+            config.add_library(
+                name,
+                library_type="git",
+                url=url,
+                branch=branch,
+                directory=directory,
+                enabled=enabled,
+            )
+        elif library_type == "static":
+            if not path:
+                display.display_error("--path is required for static libraries")
+                return
+            config.add_library(name, library_type="static", path=path, enabled=enabled)
+        else:
+            display.display_error(
+                f"Invalid library type: {library_type}. Must be 'git' or 'static'."
+            )
+            return
+
+        display.display_success(f"Added {library_type} library '{name}'")
+
+        if library_type == "git" and sync and enabled:
             console.print(f"\nSyncing library '{name}'...")
-            # Call update for this specific library
             update(library_name=name, verbose=True)
+        elif library_type == "static":
+            display.display_info(f"Static library points to: {path}")
     except ConfigError as e:
         display.display_error(str(e))
 
@@ -338,23 +438,26 @@ def add(
 @app.command()
 def remove(
     name: str = Argument(..., help="Name of the library to remove"),
-    keep_files: bool = Option(False, "--keep-files", help="Keep the local library files (don't delete)")
+    keep_files: bool = Option(
+        False, "--keep-files", help="Keep the local library files (don't delete)"
+    ),
 ) -> None:
     """Remove a library from the configuration and delete its local files."""
     config = ConfigManager()
-    
+
     try:
         # Remove from config
         config.remove_library(name)
         display.display_success(f"Removed library '{name}' from configuration")
-        
+
         # Delete local files unless --keep-files is specified
         if not keep_files:
             libraries_path = config.get_libraries_path()
             library_path = libraries_path / name
-            
+
             if library_path.exists():
                 import shutil
+
                 shutil.rmtree(library_path)
                 display.display_success(f"Deleted local files at {library_path}")
             else:
@@ -363,8 +466,6 @@ def remove(
         display.display_error(str(e))
 
 
-
-
 # Register the repo command with the CLI
 def register_cli(parent_app: Typer) -> None:
     """Register the repo command with the parent Typer app."""

+ 187 - 104
cli/core/section.py

@@ -7,107 +7,190 @@ from .variable import Variable
 
 
 class VariableSection:
-  """Groups variables together with shared metadata for presentation."""
-
-  def __init__(self, data: dict[str, Any]) -> None:
-    """Initialize VariableSection from a dictionary.
-    
-    Args:
-        data: Dictionary containing section specification with required 'key' and 'title' keys
-    """
-    if not isinstance(data, dict):
-      raise ValueError("VariableSection data must be a dictionary")
-    
-    if "key" not in data:
-      raise ValueError("VariableSection data must contain 'key'")
-    
-    if "title" not in data:
-      raise ValueError("VariableSection data must contain 'title'")
-    
-    self.key: str = data["key"]
-    self.title: str = data["title"]
-    self.variables: OrderedDict[str, Variable] = OrderedDict()
-    self.description: Optional[str] = data.get("description")
-    self.toggle: Optional[str] = data.get("toggle")
-    # Default "general" section to required=True, all others to required=False
-    self.required: bool = data.get("required", data["key"] == "general")
-    # Section dependencies - can be string or list of strings
-    needs_value = data.get("needs")
-    if needs_value:
-      if isinstance(needs_value, str):
-        self.needs: List[str] = [needs_value]
-      elif isinstance(needs_value, list):
-        self.needs: List[str] = needs_value
-      else:
-        raise ValueError(f"Section '{self.key}' has invalid 'needs' value: must be string or list")
-    else:
-      self.needs: List[str] = []
-
-  def to_dict(self) -> Dict[str, Any]:
-    """Serialize VariableSection to a dictionary for storage."""
-    section_dict = {
-      'required': self.required,
-      'vars': {name: var.to_dict() for name, var in self.variables.items()}
-    }
-    
-    # Add optional fields if present
-    for field in ('title', 'description', 'toggle'):
-      if value := getattr(self, field):
-        section_dict[field] = value
-    
-    # Store dependencies (single value if only one, list otherwise)
-    if self.needs:
-      section_dict['needs'] = self.needs[0] if len(self.needs) == 1 else self.needs
-    
-    return section_dict
-  
-  def is_enabled(self) -> bool:
-    """Check if section is currently enabled based on toggle variable.
-    
-    Returns:
-        True if section is enabled (no toggle or toggle is True), False otherwise
-    """
-    if not self.toggle:
-      return True
-    
-    toggle_var = self.variables.get(self.toggle)
-    if not toggle_var:
-      return True
-    
-    try:
-      return bool(toggle_var.convert(toggle_var.value))
-    except Exception:
-      return False
-  
-  def clone(self, origin_update: Optional[str] = None) -> 'VariableSection':
-    """Create a deep copy of the section with all variables.
-    
-    This is more efficient than converting to dict and back when copying sections.
-    
-    Args:
-        origin_update: Optional origin string to apply to all cloned variables
-        
-    Returns:
-        New VariableSection instance with deep-copied variables
-        
-    Example:
-        section2 = section1.clone(origin_update='template')
-    """
-    # Create new section with same metadata
-    cloned = VariableSection({
-      'key': self.key,
-      'title': self.title,
-      'description': self.description,
-      'toggle': self.toggle,
-      'required': self.required,
-      'needs': self.needs.copy() if self.needs else None,
-    })
-    
-    # Deep copy all variables
-    for var_name, variable in self.variables.items():
-      if origin_update:
-        cloned.variables[var_name] = variable.clone(update={'origin': origin_update})
-      else:
-        cloned.variables[var_name] = variable.clone()
-    
-    return cloned
+    """Groups variables together with shared metadata for presentation."""
+
+    def __init__(self, data: dict[str, Any]) -> None:
+        """Initialize VariableSection from a dictionary.
+
+        Args:
+            data: Dictionary containing section specification with required 'key' and 'title' keys
+        """
+        if not isinstance(data, dict):
+            raise ValueError("VariableSection data must be a dictionary")
+
+        if "key" not in data:
+            raise ValueError("VariableSection data must contain 'key'")
+
+        if "title" not in data:
+            raise ValueError("VariableSection data must contain 'title'")
+
+        self.key: str = data["key"]
+        self.title: str = data["title"]
+        self.variables: OrderedDict[str, Variable] = OrderedDict()
+        self.description: Optional[str] = data.get("description")
+        self.toggle: Optional[str] = data.get("toggle")
+        # Track which fields were explicitly provided (to support explicit clears)
+        self._explicit_fields: set[str] = set(data.keys())
+        # Default "general" section to required=True, all others to required=False
+        self.required: bool = data.get("required", data["key"] == "general")
+        # Section dependencies - can be string or list of strings
+        # Supports semicolon-separated multiple conditions: "var1=value1;var2=value2,value3"
+        needs_value = data.get("needs")
+        if needs_value:
+            if isinstance(needs_value, str):
+                # Split by semicolon to support multiple AND conditions in a single string
+                # Example: "traefik_enabled=true;network_mode=bridge,macvlan"
+                self.needs: List[str] = [
+                    need.strip() for need in needs_value.split(";") if need.strip()
+                ]
+            elif isinstance(needs_value, list):
+                self.needs: List[str] = needs_value
+            else:
+                raise ValueError(
+                    f"Section '{self.key}' has invalid 'needs' value: must be string or list"
+                )
+        else:
+            self.needs: List[str] = []
+
+    def to_dict(self) -> Dict[str, Any]:
+        """Serialize VariableSection to a dictionary for storage."""
+        section_dict = {
+            "required": self.required,
+            "vars": {name: var.to_dict() for name, var in self.variables.items()},
+        }
+
+        # Add optional fields if present
+        for field in ("title", "description", "toggle"):
+            if value := getattr(self, field):
+                section_dict[field] = value
+
+        # Store dependencies (single value if only one, list otherwise)
+        if self.needs:
+            section_dict["needs"] = (
+                self.needs[0] if len(self.needs) == 1 else self.needs
+            )
+
+        return section_dict
+
+    def is_enabled(self) -> bool:
+        """Check if section is currently enabled based on toggle variable.
+
+        Returns:
+            True if section is enabled (required, no toggle, or toggle is True), False otherwise
+        """
+        # Required sections are always enabled, regardless of toggle
+        if self.required:
+            return True
+
+        if not self.toggle:
+            return True
+
+        toggle_var = self.variables.get(self.toggle)
+        if not toggle_var:
+            return True
+
+        try:
+            return bool(toggle_var.convert(toggle_var.value))
+        except Exception:
+            return False
+
+    def clone(self, origin_update: Optional[str] = None) -> "VariableSection":
+        """Create a deep copy of the section with all variables.
+
+        This is more efficient than converting to dict and back when copying sections.
+
+        Args:
+            origin_update: Optional origin string to apply to all cloned variables
+
+        Returns:
+            New VariableSection instance with deep-copied variables
+
+        Example:
+            section2 = section1.clone(origin_update='template')
+        """
+        # Create new section with same metadata
+        cloned = VariableSection(
+            {
+                "key": self.key,
+                "title": self.title,
+                "description": self.description,
+                "toggle": self.toggle,
+                "required": self.required,
+                "needs": self.needs.copy() if self.needs else None,
+            }
+        )
+
+        # Deep copy all variables
+        for var_name, variable in self.variables.items():
+            if origin_update:
+                cloned.variables[var_name] = variable.clone(
+                    update={"origin": origin_update}
+                )
+            else:
+                cloned.variables[var_name] = variable.clone()
+
+        return cloned
+
+    def sort_variables(self, is_need_satisfied_func=None) -> None:
+        """Sort variables within section for optimal display and user interaction.
+
+        Current sorting strategy:
+        - Variables with no dependencies come first
+        - Variables that depend on others come after their dependencies (topological sort)
+        - Original order is preserved for variables at the same dependency level
+
+        Future sorting strategies can be added here (e.g., by type, required first, etc.)
+
+        Args:
+            is_need_satisfied_func: Optional function to check if a variable need is satisfied
+                                   (reserved for future use in conditional sorting)
+        """
+        if not self.variables:
+            return
+
+        # Build dependency graph
+        var_list = list(self.variables.keys())
+        var_set = set(var_list)
+
+        # For each variable, find which OTHER variables in THIS section it depends on
+        dependencies = {var_name: [] for var_name in var_list}
+        for var_name in var_list:
+            variable = self.variables[var_name]
+            if variable.needs:
+                for need in variable.needs:
+                    # Parse need format: "variable_name=value"
+                    dep_var = need.split("=")[0] if "=" in need else need
+                    # Only track dependencies within THIS section
+                    if dep_var in var_set and dep_var != var_name:
+                        dependencies[var_name].append(dep_var)
+
+        # Topological sort using Kahn's algorithm
+        in_degree = {var_name: len(deps) for var_name, deps in dependencies.items()}
+        queue = [var for var, degree in in_degree.items() if degree == 0]
+        # Preserve original order for variables with same dependency level
+        queue.sort(key=lambda v: var_list.index(v))
+        result = []
+
+        while queue:
+            current = queue.pop(0)
+            result.append(current)
+
+            # Update in-degree for dependent variables
+            for var_name, deps in dependencies.items():
+                if current in deps:
+                    in_degree[var_name] -= 1
+                    if in_degree[var_name] == 0:
+                        queue.append(var_name)
+                        queue.sort(key=lambda v: var_list.index(v))
+
+        # If not all variables were sorted (cycle), append remaining in original order
+        if len(result) != len(var_list):
+            for var_name in var_list:
+                if var_name not in result:
+                    result.append(var_name)
+
+        # Rebuild variables OrderedDict in new order
+        sorted_vars = OrderedDict()
+        for var_name in result:
+            sorted_vars[var_name] = self.variables[var_name]
+        self.variables = sorted_vars

+ 907 - 714
cli/core/template.py

@@ -1,16 +1,15 @@
 from __future__ import annotations
 
-from .variable import Variable
 from .collection import VariableCollection
 from .exceptions import (
-    TemplateError,
     TemplateLoadError,
     TemplateSyntaxError,
     TemplateValidationError,
     TemplateRenderError,
     YAMLParseError,
-    ModuleLoadError
+    IncompatibleSchemaVersionError,
 )
+from .version import is_compatible
 from pathlib import Path
 from typing import Any, Dict, List, Set, Optional, Literal
 from dataclasses import dataclass, field
@@ -26,750 +25,944 @@ from jinja2.exceptions import (
     TemplateSyntaxError as Jinja2TemplateSyntaxError,
     UndefinedError,
     TemplateError as Jinja2TemplateError,
-    TemplateNotFound as Jinja2TemplateNotFound
+    TemplateNotFound as Jinja2TemplateNotFound,
 )
 
 logger = logging.getLogger(__name__)
 
 
 def _extract_error_context(
-    file_path: Path,
-    line_number: Optional[int],
-    context_size: int = 3
+    file_path: Path, line_number: Optional[int], context_size: int = 3
 ) -> List[str]:
-  """Extract lines of context around an error location.
-  
-  Args:
-      file_path: Path to the file with the error
-      line_number: Line number where error occurred (1-indexed)
-      context_size: Number of lines to show before and after
-      
-  Returns:
-      List of context lines with line numbers
-  """
-  if not line_number or not file_path.exists():
-    return []
-  
-  try:
-    with open(file_path, 'r', encoding='utf-8') as f:
-      lines = f.readlines()
-    
-    start_line = max(0, line_number - context_size - 1)
-    end_line = min(len(lines), line_number + context_size)
-    
-    context = []
-    for i in range(start_line, end_line):
-      line_num = i + 1
-      marker = '>>>' if line_num == line_number else '   '
-      context.append(f"{marker} {line_num:4d} | {lines[i].rstrip()}")
-    
-    return context
-  except (IOError, OSError):
-    return []
+    """Extract lines of context around an error location.
+
+    Args:
+        file_path: Path to the file with the error
+        line_number: Line number where error occurred (1-indexed)
+        context_size: Number of lines to show before and after
+
+    Returns:
+        List of context lines with line numbers
+    """
+    if not line_number or not file_path.exists():
+        return []
+
+    try:
+        with open(file_path, "r", encoding="utf-8") as f:
+            lines = f.readlines()
+
+        start_line = max(0, line_number - context_size - 1)
+        end_line = min(len(lines), line_number + context_size)
+
+        context = []
+        for i in range(start_line, end_line):
+            line_num = i + 1
+            marker = ">>>" if line_num == line_number else "   "
+            context.append(f"{marker} {line_num:4d} | {lines[i].rstrip()}")
+
+        return context
+    except (IOError, OSError):
+        return []
 
 
 def _get_common_jinja_suggestions(error_msg: str, available_vars: set) -> List[str]:
-  """Generate helpful suggestions based on common Jinja2 errors.
-  
-  Args:
-      error_msg: The error message from Jinja2
-      available_vars: Set of available variable names
-      
-  Returns:
-      List of actionable suggestions
-  """
-  suggestions = []
-  error_lower = error_msg.lower()
-  
-  # Undefined variable errors
-  if 'undefined' in error_lower or 'is not defined' in error_lower:
-    # Try to extract variable name from error message
-    import re
-    var_match = re.search(r"'([^']+)'.*is undefined", error_msg)
-    if not var_match:
-      var_match = re.search(r"'([^']+)'.*is not defined", error_msg)
-    
-    if var_match:
-      undefined_var = var_match.group(1)
-      suggestions.append(f"Variable '{undefined_var}' is not defined in the template spec")
-      
-      # Suggest similar variable names (basic fuzzy matching)
-      similar = [v for v in available_vars if undefined_var.lower() in v.lower() or v.lower() in undefined_var.lower()]
-      if similar:
-        suggestions.append(f"Did you mean one of these? {', '.join(sorted(similar)[:5])}")
-      
-      suggestions.append(f"Add '{undefined_var}' to your template.yaml spec with a default value")
-      suggestions.append("Or use the Jinja2 default filter: {{ " + undefined_var + " | default('value') }}")
-    else:
-      suggestions.append("Check that all variables used in templates are defined in template.yaml")
-      suggestions.append("Use the Jinja2 default filter for optional variables: {{ var | default('value') }}")
-  
-  # Syntax errors
-  elif 'unexpected' in error_lower or 'expected' in error_lower:
-    suggestions.append("Check for syntax errors in your Jinja2 template")
-    suggestions.append("Common issues: missing {% endfor %}, {% endif %}, or {% endblock %}")
-    suggestions.append("Make sure all {{ }} and {% %} tags are properly closed")
-  
-  # Filter errors
-  elif 'filter' in error_lower:
-    suggestions.append("Check that the filter name is spelled correctly")
-    suggestions.append("Verify the filter exists in Jinja2 built-in filters")
-    suggestions.append("Make sure filter arguments are properly formatted")
-  
-  # Template not found
-  elif 'not found' in error_lower or 'does not exist' in error_lower:
-    suggestions.append("Check that the included/imported template file exists")
-    suggestions.append("Verify the template path is relative to the template directory")
-    suggestions.append("Make sure the file has the .j2 extension if it's a Jinja2 template")
-  
-  # Type errors
-  elif 'type' in error_lower and ('int' in error_lower or 'str' in error_lower or 'bool' in error_lower):
-    suggestions.append("Check that variable values have the correct type")
-    suggestions.append("Use Jinja2 filters to convert types: {{ var | int }}, {{ var | string }}")
-  
-  # Add generic helpful tip
-  if not suggestions:
-    suggestions.append("Check the Jinja2 template syntax and variable usage")
-    suggestions.append("Enable --debug mode for more detailed rendering information")
-  
-  return suggestions
+    """Generate helpful suggestions based on common Jinja2 errors.
+
+    Args:
+        error_msg: The error message from Jinja2
+        available_vars: Set of available variable names
+
+    Returns:
+        List of actionable suggestions
+    """
+    suggestions = []
+    error_lower = error_msg.lower()
+
+    # Undefined variable errors
+    if "undefined" in error_lower or "is not defined" in error_lower:
+        # Try to extract variable name from error message
+        import re
+
+        var_match = re.search(r"'([^']+)'.*is undefined", error_msg)
+        if not var_match:
+            var_match = re.search(r"'([^']+)'.*is not defined", error_msg)
+
+        if var_match:
+            undefined_var = var_match.group(1)
+            suggestions.append(
+                f"Variable '{undefined_var}' is not defined in the template spec"
+            )
+
+            # Suggest similar variable names (basic fuzzy matching)
+            similar = [
+                v
+                for v in available_vars
+                if undefined_var.lower() in v.lower()
+                or v.lower() in undefined_var.lower()
+            ]
+            if similar:
+                suggestions.append(
+                    f"Did you mean one of these? {', '.join(sorted(similar)[:5])}"
+                )
+
+            suggestions.append(
+                f"Add '{undefined_var}' to your template.yaml spec with a default value"
+            )
+            suggestions.append(
+                "Or use the Jinja2 default filter: {{ "
+                + undefined_var
+                + " | default('value') }}"
+            )
+        else:
+            suggestions.append(
+                "Check that all variables used in templates are defined in template.yaml"
+            )
+            suggestions.append(
+                "Use the Jinja2 default filter for optional variables: {{ var | default('value') }}"
+            )
+
+    # Syntax errors
+    elif "unexpected" in error_lower or "expected" in error_lower:
+        suggestions.append("Check for syntax errors in your Jinja2 template")
+        suggestions.append(
+            "Common issues: missing {% endfor %}, {% endif %}, or {% endblock %}"
+        )
+        suggestions.append("Make sure all {{ }} and {% %} tags are properly closed")
+
+    # Filter errors
+    elif "filter" in error_lower:
+        suggestions.append("Check that the filter name is spelled correctly")
+        suggestions.append("Verify the filter exists in Jinja2 built-in filters")
+        suggestions.append("Make sure filter arguments are properly formatted")
+
+    # Template not found
+    elif "not found" in error_lower or "does not exist" in error_lower:
+        suggestions.append("Check that the included/imported template file exists")
+        suggestions.append(
+            "Verify the template path is relative to the template directory"
+        )
+        suggestions.append(
+            "Make sure the file has the .j2 extension if it's a Jinja2 template"
+        )
+
+    # Type errors
+    elif "type" in error_lower and (
+        "int" in error_lower or "str" in error_lower or "bool" in error_lower
+    ):
+        suggestions.append("Check that variable values have the correct type")
+        suggestions.append(
+            "Use Jinja2 filters to convert types: {{ var | int }}, {{ var | string }}"
+        )
+
+    # Add generic helpful tip
+    if not suggestions:
+        suggestions.append("Check the Jinja2 template syntax and variable usage")
+        suggestions.append(
+            "Enable --debug mode for more detailed rendering information"
+        )
+
+    return suggestions
 
 
 def _parse_jinja_error(
     error: Exception,
     template_file: TemplateFile,
     template_dir: Path,
-    available_vars: set
+    available_vars: set,
 ) -> tuple[str, Optional[int], Optional[int], List[str], List[str]]:
-  """Parse a Jinja2 exception to extract detailed error information.
-  
-  Args:
-      error: The Jinja2 exception
-      template_file: The TemplateFile being rendered
-      template_dir: Template directory path
-      available_vars: Set of available variable names
-      
-  Returns:
-      Tuple of (error_message, line_number, column, context_lines, suggestions)
-  """
-  error_msg = str(error)
-  line_number = None
-  column = None
-  context_lines = []
-  suggestions = []
-  
-  # Extract line number from Jinja2 errors
-  if hasattr(error, 'lineno'):
-    line_number = error.lineno
-  
-  # Extract file path and get context
-  file_path = template_dir / template_file.relative_path
-  if line_number and file_path.exists():
-    context_lines = _extract_error_context(file_path, line_number)
-  
-  # Generate suggestions based on error type
-  if isinstance(error, UndefinedError):
-    error_msg = f"Undefined variable: {error}"
-    suggestions = _get_common_jinja_suggestions(str(error), available_vars)
-  elif isinstance(error, Jinja2TemplateSyntaxError):
-    error_msg = f"Template syntax error: {error}"
-    suggestions = _get_common_jinja_suggestions(str(error), available_vars)
-  elif isinstance(error, Jinja2TemplateNotFound):
-    error_msg = f"Template file not found: {error}"
-    suggestions = _get_common_jinja_suggestions(str(error), available_vars)
-  else:
-    # Generic Jinja2 error
-    suggestions = _get_common_jinja_suggestions(error_msg, available_vars)
-  
-  return error_msg, line_number, column, context_lines, suggestions
+    """Parse a Jinja2 exception to extract detailed error information.
+
+    Args:
+        error: The Jinja2 exception
+        template_file: The TemplateFile being rendered
+        template_dir: Template directory path
+        available_vars: Set of available variable names
+
+    Returns:
+        Tuple of (error_message, line_number, column, context_lines, suggestions)
+    """
+    error_msg = str(error)
+    line_number = None
+    column = None
+    context_lines = []
+    suggestions = []
+
+    # Extract line number from Jinja2 errors
+    if hasattr(error, "lineno"):
+        line_number = error.lineno
+
+    # Extract file path and get context
+    file_path = template_dir / template_file.relative_path
+    if line_number and file_path.exists():
+        context_lines = _extract_error_context(file_path, line_number)
+
+    # Generate suggestions based on error type
+    if isinstance(error, UndefinedError):
+        error_msg = f"Undefined variable: {error}"
+        suggestions = _get_common_jinja_suggestions(str(error), available_vars)
+    elif isinstance(error, Jinja2TemplateSyntaxError):
+        error_msg = f"Template syntax error: {error}"
+        suggestions = _get_common_jinja_suggestions(str(error), available_vars)
+    elif isinstance(error, Jinja2TemplateNotFound):
+        error_msg = f"Template file not found: {error}"
+        suggestions = _get_common_jinja_suggestions(str(error), available_vars)
+    else:
+        # Generic Jinja2 error
+        suggestions = _get_common_jinja_suggestions(error_msg, available_vars)
+
+    return error_msg, line_number, column, context_lines, suggestions
 
 
 @dataclass
 class TemplateFile:
     """Represents a single file within a template directory."""
+
     relative_path: Path
-    file_type: Literal['j2', 'static']
-    output_path: Path # The path it will have in the output directory
+    file_type: Literal["j2", "static"]
+    output_path: Path  # The path it will have in the output directory
+
 
 @dataclass
 class TemplateMetadata:
-  """Represents template metadata with proper typing."""
-  name: str
-  description: str
-  author: str
-  date: str
-  version: str
-  module: str = ""
-  tags: List[str] = field(default_factory=list)
-  library: str = "unknown"
-  next_steps: str = ""
-  draft: bool = False
-
-  def __init__(self, template_data: dict, library_name: str | None = None) -> None:
-    """Initialize TemplateMetadata from parsed YAML template data.
-    
-    Args:
-        template_data: Parsed YAML data from template.yaml
-        library_name: Name of the library this template belongs to
-    """
-    # Validate metadata format first
-    self._validate_metadata(template_data)
-    
-    # Extract metadata section
-    metadata_section = template_data.get("metadata", {})
-    
-    self.name = metadata_section.get("name", "")
-    # YAML block scalar (|) preserves a trailing newline. Remove only trailing newlines
-    # while preserving internal newlines/formatting.
-    raw_description = metadata_section.get("description", "")
-    if isinstance(raw_description, str):
-      description = raw_description.rstrip("\n")
-    else:
-      description = str(raw_description)
-    self.description = description or "No description available"
-    self.author = metadata_section.get("author", "")
-    self.date = metadata_section.get("date", "")
-    self.version = metadata_section.get("version", "")
-    self.module = metadata_section.get("module", "")
-    self.tags = metadata_section.get("tags", []) or []
-    self.library = library_name or "unknown"
-    self.draft = metadata_section.get("draft", False)
-    
-    # Extract next_steps (optional)
-    raw_next_steps = metadata_section.get("next_steps", "")
-    if isinstance(raw_next_steps, str):
-      next_steps = raw_next_steps.rstrip("\n")
-    else:
-      next_steps = str(raw_next_steps) if raw_next_steps else ""
-    self.next_steps = next_steps
+    """Represents template metadata with proper typing."""
+
+    name: str
+    description: str
+    author: str
+    date: str
+    version: str
+    module: str = ""
+    tags: List[str] = field(default_factory=list)
+    library: str = "unknown"
+    library_type: str = "git"  # Type of library ("git" or "static")
+    next_steps: str = ""
+    draft: bool = False
+
+    def __init__(
+        self,
+        template_data: dict,
+        library_name: str | None = None,
+        library_type: str = "git",
+    ) -> None:
+        """Initialize TemplateMetadata from parsed YAML template data.
+
+        Args:
+            template_data: Parsed YAML data from template.yaml
+            library_name: Name of the library this template belongs to
+        """
+        # Validate metadata format first
+        self._validate_metadata(template_data)
+
+        # Extract metadata section
+        metadata_section = template_data.get("metadata", {})
+
+        self.name = metadata_section.get("name", "")
+        # YAML block scalar (|) preserves a trailing newline. Remove only trailing newlines
+        # while preserving internal newlines/formatting.
+        raw_description = metadata_section.get("description", "")
+        if isinstance(raw_description, str):
+            description = raw_description.rstrip("\n")
+        else:
+            description = str(raw_description)
+        self.description = description or "No description available"
+        self.author = metadata_section.get("author", "")
+        self.date = metadata_section.get("date", "")
+        self.version = metadata_section.get("version", "")
+        self.module = metadata_section.get("module", "")
+        self.tags = metadata_section.get("tags", []) or []
+        self.library = library_name or "unknown"
+        self.library_type = library_type
+        self.draft = metadata_section.get("draft", False)
+
+        # Extract next_steps (optional)
+        raw_next_steps = metadata_section.get("next_steps", "")
+        if isinstance(raw_next_steps, str):
+            next_steps = raw_next_steps.rstrip("\n")
+        else:
+            next_steps = str(raw_next_steps) if raw_next_steps else ""
+        self.next_steps = next_steps
+
+    @staticmethod
+    def _validate_metadata(template_data: dict) -> None:
+        """Validate that template has required 'metadata' section with all required fields.
+
+        Args:
+            template_data: Parsed YAML data from template.yaml
+
+        Raises:
+            ValueError: If metadata section is missing or incomplete
+        """
+        metadata_section = template_data.get("metadata")
+        if metadata_section is None:
+            raise ValueError("Template format error: missing 'metadata' section")
+
+        # Validate that metadata section has all required fields
+        required_fields = ["name", "author", "version", "date", "description"]
+        missing_fields = [
+            field for field in required_fields if not metadata_section.get(field)
+        ]
+
+        if missing_fields:
+            raise ValueError(
+                f"Template format error: missing required metadata fields: {missing_fields}"
+            )
 
-  @staticmethod
-  def _validate_metadata(template_data: dict) -> None:
-    """Validate that template has required 'metadata' section with all required fields.
-    
-    Args:
-        template_data: Parsed YAML data from template.yaml
-        
-    Raises:
-        ValueError: If metadata section is missing or incomplete
-    """
-    metadata_section = template_data.get("metadata")
-    if metadata_section is None:
-      raise ValueError("Template format error: missing 'metadata' section")
-    
-    # Validate that metadata section has all required fields
-    required_fields = ["name", "author", "version", "date", "description"]
-    missing_fields = [field for field in required_fields if not metadata_section.get(field)]
-    
-    if missing_fields:
-      raise ValueError(f"Template format error: missing required metadata fields: {missing_fields}")
 
 @dataclass
 class Template:
-  """Represents a template directory."""
-
-  def __init__(self, template_dir: Path, library_name: str) -> None:
-    """Create a Template instance from a directory path."""
-    logger.debug(f"Loading template from directory: {template_dir}")
-    self.template_dir = template_dir
-    self.id = template_dir.name
-    self.library_name = library_name
-
-    # Initialize caches for lazy loading
-    self.__module_specs: Optional[dict] = None
-    self.__merged_specs: Optional[dict] = None
-    self.__jinja_env: Optional[Environment] = None
-    self.__used_variables: Optional[Set[str]] = None
-    self.__variables: Optional[VariableCollection] = None
-    self.__template_files: Optional[List[TemplateFile]] = None # New attribute
+    """Represents a template directory."""
+
+    def __init__(
+        self, template_dir: Path, library_name: str, library_type: str = "git"
+    ) -> None:
+        """Create a Template instance from a directory path.
+
+        Args:
+            template_dir: Path to the template directory
+            library_name: Name of the library this template belongs to
+            library_type: Type of library ("git" or "static"), defaults to "git"
+        """
+        logger.debug(f"Loading template from directory: {template_dir}")
+        self.template_dir = template_dir
+        self.id = template_dir.name
+        self.original_id = template_dir.name  # Store the original ID
+        self.library_name = library_name
+        self.library_type = library_type
+
+        # Initialize caches for lazy loading
+        self.__module_specs: Optional[dict] = None
+        self.__merged_specs: Optional[dict] = None
+        self.__jinja_env: Optional[Environment] = None
+        self.__used_variables: Optional[Set[str]] = None
+        self.__variables: Optional[VariableCollection] = None
+        self.__template_files: Optional[List[TemplateFile]] = None  # New attribute
 
-    try:
-      # Find and parse the main template file (template.yaml or template.yml)
-      main_template_path = self._find_main_template_file()
-      with open(main_template_path, "r", encoding="utf-8") as f:
-        # Load all YAML documents (handles templates with empty lines before ---)
-        documents = list(yaml.safe_load_all(f))
-        
-        # Filter out None/empty documents and get the first non-empty one
-        valid_docs = [doc for doc in documents if doc is not None]
-        
-        if not valid_docs:
-          raise ValueError("Template file contains no valid YAML data")
-        
-        if len(valid_docs) > 1:
-          logger.warning(f"Template file contains multiple YAML documents, using the first one")
-        
-        self._template_data = valid_docs[0]
-      
-      # Validate template data
-      if not isinstance(self._template_data, dict):
-        raise ValueError("Template file must contain a valid YAML dictionary")
-
-      # Load metadata (always needed)
-      self.metadata = TemplateMetadata(self._template_data, library_name)
-      logger.debug(f"Loaded metadata: {self.metadata}")
-
-      # Validate 'kind' field (always needed)
-      self._validate_kind(self._template_data)
-
-      # NOTE: File collection is now lazy-loaded via the template_files property
-      # This significantly improves performance when listing many templates
-
-      logger.info(f"Loaded template '{self.id}' (v{self.metadata.version})")
-
-    except (ValueError, FileNotFoundError) as e:
-      logger.error(f"Error loading template from {template_dir}: {e}")
-      raise TemplateLoadError(f"Error loading template from {template_dir}: {e}")
-    except yaml.YAMLError as e:
-      logger.error(f"YAML parsing error in template {template_dir}: {e}")
-      raise YAMLParseError(str(template_dir / "template.y*ml"), e)
-    except (IOError, OSError) as e:
-      logger.error(f"File I/O error loading template {template_dir}: {e}")
-      raise TemplateLoadError(f"File I/O error loading template from {template_dir}: {e}")
-
-  def _find_main_template_file(self) -> Path:
-    """Find the main template file (template.yaml or template.yml)."""
-    for filename in ["template.yaml", "template.yml"]:
-      path = self.template_dir / filename
-      if path.exists():
-        return path
-    raise FileNotFoundError(f"Main template file (template.yaml or template.yml) not found in {self.template_dir}")
-
-  @staticmethod
-  @lru_cache(maxsize=32)
-  def _load_module_specs(kind: str) -> dict:
-    """Load specifications from the corresponding module with caching.
-    
-    Uses LRU cache to avoid re-loading the same module spec multiple times.
-    This significantly improves performance when listing many templates of the same kind.
-    
-    Args:
-        kind: The module kind (e.g., 'compose', 'terraform')
-        
-    Returns:
-        Dictionary containing the module's spec, or empty dict if kind is empty
-        
-    Raises:
-        ValueError: If module cannot be loaded or spec is invalid
-    """
-    if not kind:
-      return {}
-    try:
-      import importlib
-      module = importlib.import_module(f"cli.modules.{kind}")
-      spec = getattr(module, 'spec', {})
-      logger.debug(f"Loaded and cached module spec for kind '{kind}'")
-      return spec
-    except Exception as e:
-      raise ValueError(f"Error loading module specifications for kind '{kind}': {e}")
-
-  def _merge_specs(self, module_specs: dict, template_specs: dict) -> dict:
-    """Deep merge template specs with module specs using VariableCollection.
-    
-    Uses VariableCollection's native merge() method for consistent merging logic.
-    Module specs are base, template specs override with origin tracking.
-    """
-    # Create VariableCollection from module specs (base)
-    module_collection = VariableCollection(module_specs) if module_specs else VariableCollection({})
-    
-    # Set origin for module variables
-    for section in module_collection.get_sections().values():
-      for variable in section.variables.values():
-        if not variable.origin:
-          variable.origin = "module"
-    
-    # Merge template specs into module specs (template overrides)
-    if template_specs:
-      merged_collection = module_collection.merge(template_specs, origin="template")
-    else:
-      merged_collection = module_collection
-    
-    # Convert back to dict format
-    merged_spec = {}
-    for section_key, section in merged_collection.get_sections().items():
-      merged_spec[section_key] = section.to_dict()
-    
-    return merged_spec
-
-  def _collect_template_files(self) -> None:
-    """Collects all TemplateFile objects in the template directory."""
-    template_files: List[TemplateFile] = []
-    
-    for root, _, files in os.walk(self.template_dir):
-      for filename in files:
-        file_path = Path(root) / filename
-        relative_path = file_path.relative_to(self.template_dir)
-        
-        # Skip the main template file
-        if filename in ["template.yaml", "template.yml"]:
-          continue
-        
-        if filename.endswith(".j2"):
-          file_type: Literal['j2', 'static'] = 'j2'
-          output_path = relative_path.with_suffix('') # Remove .j2 suffix
-        else:
-          file_type = 'static'
-          output_path = relative_path # Static files keep their name
-        
-        template_files.append(TemplateFile(relative_path=relative_path, file_type=file_type, output_path=output_path))
-          
-    self.__template_files = template_files
-
-  def _extract_all_used_variables(self) -> Set[str]:
-    """Extract all undeclared variables from all .j2 files in the template directory.
-    
-    Raises:
-        ValueError: If any Jinja2 template has syntax errors
-    """
-    used_variables: Set[str] = set()
-    syntax_errors = []
-    
-    for template_file in self.template_files: # Iterate over TemplateFile objects
-      if template_file.file_type == 'j2':
-        file_path = self.template_dir / template_file.relative_path
         try:
-          with open(file_path, "r", encoding="utf-8") as f:
-            content = f.read()
-            ast = self.jinja_env.parse(content) # Use lazy-loaded jinja_env
-            used_variables.update(meta.find_undeclared_variables(ast))
-        except (IOError, OSError) as e:
-          relative_path = file_path.relative_to(self.template_dir)
-          syntax_errors.append(f"  - {relative_path}: File I/O error: {e}")
-        except Exception as e:
-          # Collect syntax errors for Jinja2 issues
-          relative_path = file_path.relative_to(self.template_dir)
-          syntax_errors.append(f"  - {relative_path}: {e}")
-    
-    # Raise error if any syntax errors were found
-    if syntax_errors:
-      logger.error(f"Jinja2 syntax errors found in template '{self.id}'")
-      raise TemplateSyntaxError(self.id, syntax_errors)
-    
-    return used_variables
-
-  def _extract_jinja_default_values(self) -> dict[str, object]:
-    """Scan all .j2 files and extract literal arguments to the `default` filter.
-
-    Returns a mapping var_name -> literal_value for simple cases like
-    {{ var | default("value") }} or {{ var | default(123) }}.
-    This does not attempt to evaluate complex expressions.
-    """
-    defaults: dict[str, object] = {}
+            # Find and parse the main template file (template.yaml or template.yml)
+            main_template_path = self._find_main_template_file()
+            with open(main_template_path, "r", encoding="utf-8") as f:
+                # Load all YAML documents (handles templates with empty lines before ---)
+                documents = list(yaml.safe_load_all(f))
 
-    class _DefaultVisitor(NodeVisitor):
-      def __init__(self):
-        self.found: dict[str, object] = {}
+                # Filter out None/empty documents and get the first non-empty one
+                valid_docs = [doc for doc in documents if doc is not None]
 
-      def visit_Filter(self, node: nodes.Filter) -> None:  # type: ignore[override]
-        try:
-          if getattr(node, 'name', None) == 'default' and node.args:
-            # target variable name when filter is applied directly to a Name
-            target = None
-            if isinstance(node.node, nodes.Name):
-              target = node.node.name
-
-            # first arg literal
-            first = node.args[0]
-            if isinstance(first, nodes.Const) and target:
-              self.found[target] = first.value
-        except Exception:
-          # Be resilient to unexpected node shapes
-          pass
-        # continue traversal
-        self.generic_visit(node)
-
-    visitor = _DefaultVisitor()
-
-    for template_file in self.template_files:
-      if template_file.file_type != 'j2':
-        continue
-      file_path = self.template_dir / template_file.relative_path
-      try:
-        with open(file_path, 'r', encoding='utf-8') as f:
-          content = f.read()
-        ast = self.jinja_env.parse(content)
-        visitor.visit(ast)
-      except (IOError, OSError, yaml.YAMLError):
-        # Skip failures - this extraction is best-effort only
-        continue
-
-    return visitor.found
-
-  def _filter_specs_to_used(self, used_variables: set, merged_specs: dict, module_specs: dict, template_specs: dict) -> dict:
-    """Filter specs to only include variables used in templates using VariableCollection.
-    
-    Uses VariableCollection's native filter_to_used() method.
-    Keeps sensitive variables only if they're defined in the template spec or actually used.
-    """
-    # Build set of variables explicitly defined in template spec
-    template_defined_vars = set()
-    for section_data in (template_specs or {}).values():
-      if isinstance(section_data, dict) and 'vars' in section_data:
-        template_defined_vars.update(section_data['vars'].keys())
-    
-    # Create VariableCollection from merged specs
-    merged_collection = VariableCollection(merged_specs)
-    
-    # Filter to only used variables (and sensitive ones that are template-defined)
-    # We keep sensitive variables that are either:
-    # 1. Actually used in template files, OR
-    # 2. Explicitly defined in the template spec (even if not yet used)
-    variables_to_keep = used_variables | template_defined_vars
-    filtered_collection = merged_collection.filter_to_used(variables_to_keep, keep_sensitive=False)
-    
-    # Convert back to dict format
-    filtered_specs = {}
-    for section_key, section in filtered_collection.get_sections().items():
-      filtered_specs[section_key] = section.to_dict()
-    
-    return filtered_specs
-
-  @staticmethod
-  def _validate_kind(template_data: dict) -> None:
-    """Validate that template has required 'kind' field.
-    
-    Args:
-        template_data: Parsed YAML data from template.yaml
-        
-    Raises:
-        ValueError: If 'kind' field is missing
-    """
-    if not template_data.get("kind"):
-      raise TemplateValidationError("Template format error: missing 'kind' field")
-
-  def _validate_variable_definitions(self, used_variables: set[str], merged_specs: dict[str, Any]) -> None:
-    """Validate that all variables used in Jinja2 content are defined in the spec."""
-    defined_variables = set()
-    for section_data in merged_specs.values():
-      if "vars" in section_data and isinstance(section_data["vars"], dict):
-        defined_variables.update(section_data["vars"].keys())
-    
-    undefined_variables = used_variables - defined_variables
-    if undefined_variables:
-      undefined_list = sorted(undefined_variables)
-      error_msg = (
-          f"Template validation error in '{self.id}': "
-          f"Variables used in template content but not defined in spec: {undefined_list}\n\n"
-          f"Please add these variables to your template's template.yaml spec. "
-          f"Each variable must have a default value.\n\n"
-          f"Example:\n"
-          f"spec:\n"
-          f"  general:\n"
-          f"    vars:\n"
-      )
-      for var_name in undefined_list:
-          error_msg += (
-              f"      {var_name}:\n"
-              f"        type: str\n"
-              f"        description: Description for {var_name}\n"
-              f"        default: <your_default_value_here>\n"
-          )
-      logger.error(error_msg)
-      raise TemplateValidationError(error_msg)
-
-  @staticmethod
-  def _create_jinja_env(searchpath: Path) -> Environment:
-    """Create sandboxed Jinja2 environment for secure template processing.
-    
-    Uses SandboxedEnvironment to prevent code injection vulnerabilities
-    when processing untrusted templates. This restricts access to dangerous
-    operations while still allowing safe template rendering.
-    
-    Returns:
-        SandboxedEnvironment configured for template processing.
-    """
-    # NOTE Use SandboxedEnvironment for security - prevents arbitrary code execution
-    return SandboxedEnvironment(
-      loader=FileSystemLoader(searchpath),
-      trim_blocks=True,
-      lstrip_blocks=True,
-      keep_trailing_newline=False,
-    )
-
-  def render(self, variables: VariableCollection, debug: bool = False) -> tuple[Dict[str, str], Dict[str, Any]]:
-    """Render all .j2 files in the template directory.
-    
-    Args:
-        variables: VariableCollection with values to use for rendering
-        debug: Enable debug mode with verbose output
-        
-    Returns:
-        Tuple of (rendered_files, variable_values) where variable_values includes autogenerated values
-    """
-    # Use get_satisfied_values() to exclude variables from sections with unsatisfied dependencies
-    variable_values = variables.get_satisfied_values()
-    
-    # Auto-generate values for autogenerated variables that are empty
-    import secrets
-    import string
-    for section in variables.get_sections().values():
-      for var_name, variable in section.variables.items():
-        if variable.autogenerated and (variable.value is None or variable.value == ""):
-          # Generate a secure random string (32 characters by default)
-          alphabet = string.ascii_letters + string.digits
-          generated_value = ''.join(secrets.choice(alphabet) for _ in range(32))
-          variable_values[var_name] = generated_value
-          logger.debug(f"Auto-generated value for variable '{var_name}'")
-    
-    if debug:
-      logger.info(f"Rendering template '{self.id}' in debug mode")
-      logger.info(f"Available variables: {sorted(variable_values.keys())}")
-      logger.info(f"Variable values: {variable_values}")
-    else:
-      logger.debug(f"Rendering template '{self.id}' with variables: {variable_values}")
-    
-    rendered_files = {}
-    available_vars = set(variable_values.keys())
-    
-    for template_file in self.template_files: # Iterate over TemplateFile objects
-      if template_file.file_type == 'j2':
+                if not valid_docs:
+                    raise ValueError("Template file contains no valid YAML data")
+
+                if len(valid_docs) > 1:
+                    logger.warning(
+                        "Template file contains multiple YAML documents, using the first one"
+                    )
+
+                self._template_data = valid_docs[0]
+
+            # Validate template data
+            if not isinstance(self._template_data, dict):
+                raise ValueError("Template file must contain a valid YAML dictionary")
+
+            # Load metadata (always needed)
+            self.metadata = TemplateMetadata(
+                self._template_data, library_name, library_type
+            )
+            logger.debug(f"Loaded metadata: {self.metadata}")
+
+            # Validate 'kind' field (always needed)
+            self._validate_kind(self._template_data)
+
+            # Extract schema version (default to 1.0 for backward compatibility)
+            self.schema_version = str(self._template_data.get("schema", "1.0"))
+            logger.debug(f"Template schema version: {self.schema_version}")
+
+            # Note: Schema version validation is done by the module when loading templates
+
+            # NOTE: File collection is now lazy-loaded via the template_files property
+            # This significantly improves performance when listing many templates
+
+            logger.info(f"Loaded template '{self.id}' (v{self.metadata.version})")
+
+        except (ValueError, FileNotFoundError) as e:
+            logger.error(f"Error loading template from {template_dir}: {e}")
+            raise TemplateLoadError(f"Error loading template from {template_dir}: {e}")
+        except yaml.YAMLError as e:
+            logger.error(f"YAML parsing error in template {template_dir}: {e}")
+            raise YAMLParseError(str(template_dir / "template.y*ml"), e)
+        except (IOError, OSError) as e:
+            logger.error(f"File I/O error loading template {template_dir}: {e}")
+            raise TemplateLoadError(
+                f"File I/O error loading template from {template_dir}: {e}"
+            )
+
+    def set_qualified_id(self, library_name: str | None = None) -> None:
+        """Set a qualified ID for this template (used when duplicates exist across libraries).
+
+        Args:
+            library_name: Name of the library to qualify with. If None, uses self.library_name
+        """
+        lib_name = library_name or self.library_name
+        self.id = f"{self.original_id}.{lib_name}"
+        logger.debug(f"Template ID qualified: {self.original_id} -> {self.id}")
+
+    def _find_main_template_file(self) -> Path:
+        """Find the main template file (template.yaml or template.yml)."""
+        for filename in ["template.yaml", "template.yml"]:
+            path = self.template_dir / filename
+            if path.exists():
+                return path
+        raise FileNotFoundError(
+            f"Main template file (template.yaml or template.yml) not found in {self.template_dir}"
+        )
+
+    @staticmethod
+    @lru_cache(maxsize=32)
+    def _load_module_specs_for_schema(kind: str, schema_version: str) -> dict:
+        """Load specifications from the corresponding module for a specific schema version.
+
+        Uses LRU cache to avoid re-loading the same module spec multiple times.
+        This significantly improves performance when listing many templates of the same kind.
+
+        Args:
+            kind: The module kind (e.g., 'compose', 'terraform')
+            schema_version: The schema version to load (e.g., '1.0', '1.1')
+
+        Returns:
+            Dictionary containing the module's spec for the requested schema version,
+            or empty dict if kind is empty
+
+        Raises:
+            ValueError: If module cannot be loaded or spec is invalid
+        """
+        if not kind:
+            return {}
         try:
-          if debug:
-            logger.info(f"Rendering Jinja2 template: {template_file.relative_path}")
-          
-          template = self.jinja_env.get_template(str(template_file.relative_path)) # Use lazy-loaded jinja_env
-          rendered_content = template.render(**variable_values)
-          
-          # Sanitize the rendered content to remove excessive blank lines
-          rendered_content = self._sanitize_content(rendered_content, template_file.output_path)
-          rendered_files[str(template_file.output_path)] = rendered_content
-          
-          if debug:
-            logger.info(f"Successfully rendered: {template_file.relative_path} -> {template_file.output_path}")
-        
-        except (UndefinedError, Jinja2TemplateSyntaxError, Jinja2TemplateNotFound, Jinja2TemplateError) as e:
-          # Parse Jinja2 error to extract detailed information
-          error_msg, line_num, col, context_lines, suggestions = _parse_jinja_error(
-              e, template_file, self.template_dir, available_vars
-          )
-          
-          logger.error(f"Error rendering template file {template_file.relative_path}: {error_msg}")
-          
-          # Create enhanced TemplateRenderError with all context
-          raise TemplateRenderError(
-              message=error_msg,
-              file_path=str(template_file.relative_path),
-              line_number=line_num,
-              column=col,
-              context_lines=context_lines,
-              variable_context={k: str(v) for k, v in variable_values.items()} if debug else {},
-              suggestions=suggestions,
-              original_error=e
-          )
-        
+            import importlib
+
+            module = importlib.import_module(f"cli.modules.{kind}")
+            
+            # Check if module has schema-specific specs (multi-schema support)
+            # Try SCHEMAS constant first (uppercase), then schemas attribute
+            schemas = getattr(module, "SCHEMAS", None) or getattr(module, "schemas", None)
+            if schemas and schema_version in schemas:
+                spec = schemas[schema_version]
+                logger.debug(
+                    f"Loaded and cached module spec for kind '{kind}' schema {schema_version}"
+                )
+            else:
+                # Fallback to default spec if schema mapping not available
+                spec = getattr(module, "spec", {})
+                logger.debug(
+                    f"Loaded and cached module spec for kind '{kind}' (default/no schema mapping)"
+                )
+            
+            return spec
         except Exception as e:
-          # Catch any other unexpected errors
-          logger.error(f"Unexpected error rendering template file {template_file.relative_path}: {e}")
-          raise TemplateRenderError(
-              message=f"Unexpected rendering error: {e}",
-              file_path=str(template_file.relative_path),
-              suggestions=["This is an unexpected error. Please check the template for issues."],
-              original_error=e
-          )
-      
-      elif template_file.file_type == 'static':
-          # For static files, just read their content and add to rendered_files
-          # This ensures static files are also part of the output dictionary
-          file_path = self.template_dir / template_file.relative_path
-          try:
-              if debug:
-                logger.info(f"Copying static file: {template_file.relative_path}")
-              
-              with open(file_path, "r", encoding="utf-8") as f:
-                  content = f.read()
-                  rendered_files[str(template_file.output_path)] = content
-          except (IOError, OSError) as e:
-              logger.error(f"Error reading static file {file_path}: {e}")
-              raise TemplateRenderError(
-                  message=f"Error reading static file: {e}",
-                  file_path=str(template_file.relative_path),
-                  suggestions=["Check that the file exists and has read permissions"],
-                  original_error=e
-              )
-          
-    return rendered_files, variable_values
-  
-  def _sanitize_content(self, content: str, file_path: Path) -> str:
-    """Sanitize rendered content by removing excessive blank lines and trailing whitespace."""
-    if not content:
-      return content
-    
-    lines = [line.rstrip() for line in content.split('\n')]
-    sanitized = []
-    prev_blank = False
-    
-    for line in lines:
-      is_blank = not line
-      if is_blank and prev_blank:
-        continue  # Skip consecutive blank lines
-      sanitized.append(line)
-      prev_blank = is_blank
-    
-    # Remove leading blanks and ensure single trailing newline
-    return '\n'.join(sanitized).lstrip('\n').rstrip('\n') + '\n'
-
-  
-  @property
-  def template_files(self) -> List[TemplateFile]:
-      if self.__template_files is None:
-          self._collect_template_files() # Populate self.__template_files
-      return self.__template_files
-
-  @property
-  def template_specs(self) -> dict:
-      """Get the spec section from template YAML data."""
-      return self._template_data.get("spec", {})
-
-  @property
-  def module_specs(self) -> dict:
-      """Get the spec from the module definition."""
-      if self.__module_specs is None:
-          kind = self._template_data.get("kind")
-          self.__module_specs = self._load_module_specs(kind)
-      return self.__module_specs
-
-  @property
-  def merged_specs(self) -> dict:
-      if self.__merged_specs is None:
-          self.__merged_specs = self._merge_specs(self.module_specs, self.template_specs)
-      return self.__merged_specs
-
-  @property
-  def jinja_env(self) -> Environment:
-      if self.__jinja_env is None:
-          self.__jinja_env = self._create_jinja_env(self.template_dir)
-      return self.__jinja_env
-
-  @property
-  def used_variables(self) -> Set[str]:
-      if self.__used_variables is None:
-          self.__used_variables = self._extract_all_used_variables()
-      return self.__used_variables
-
-  @property
-  def variables(self) -> VariableCollection:
-      if self.__variables is None:
-          # Validate that all used variables are defined
-          self._validate_variable_definitions(self.used_variables, self.merged_specs)
-          # Filter specs to only used variables
-          filtered_specs = self._filter_specs_to_used(self.used_variables, self.merged_specs, self.module_specs, self.template_specs)
-
-          # Best-effort: extract literal defaults from Jinja `default()` filter and
-          # merge them into the filtered_specs when no default exists there.
-          try:
-            jinja_defaults = self._extract_jinja_default_values()
-            for section_key, section_data in filtered_specs.items():
-              # Guard against None from empty YAML sections
-              vars_dict = section_data.get('vars') or {}
-              for var_name, var_data in vars_dict.items():
-                if 'default' not in var_data or var_data.get('default') in (None, ''):
-                  if var_name in jinja_defaults:
-                    var_data['default'] = jinja_defaults[var_name]
-          except (KeyError, TypeError, AttributeError):
-            # Keep behavior stable on any extraction errors
-            pass
-
-          self.__variables = VariableCollection(filtered_specs)
-          # Sort sections: required first, then enabled, then disabled
-          self.__variables.sort_sections()
-      return self.__variables
+            raise ValueError(
+                f"Error loading module specifications for kind '{kind}': {e}"
+            )
+
+    def _merge_specs(self, module_specs: dict, template_specs: dict) -> dict:
+        """Deep merge template specs with module specs using VariableCollection.
+
+        Uses VariableCollection's native merge() method for consistent merging logic.
+        Module specs are base, template specs override with origin tracking.
+        """
+        # Create VariableCollection from module specs (base)
+        module_collection = (
+            VariableCollection(module_specs) if module_specs else VariableCollection({})
+        )
+
+        # Set origin for module variables
+        for section in module_collection.get_sections().values():
+            for variable in section.variables.values():
+                if not variable.origin:
+                    variable.origin = "module"
+
+        # Merge template specs into module specs (template overrides)
+        if template_specs:
+            merged_collection = module_collection.merge(
+                template_specs, origin="template"
+            )
+        else:
+            merged_collection = module_collection
+
+        # Convert back to dict format
+        merged_spec = {}
+        for section_key, section in merged_collection.get_sections().items():
+            merged_spec[section_key] = section.to_dict()
+
+        return merged_spec
+
+    def _collect_template_files(self) -> None:
+        """Collects all TemplateFile objects in the template directory."""
+        template_files: List[TemplateFile] = []
+
+        for root, _, files in os.walk(self.template_dir):
+            for filename in files:
+                file_path = Path(root) / filename
+                relative_path = file_path.relative_to(self.template_dir)
+
+                # Skip the main template file
+                if filename in ["template.yaml", "template.yml"]:
+                    continue
+
+                if filename.endswith(".j2"):
+                    file_type: Literal["j2", "static"] = "j2"
+                    output_path = relative_path.with_suffix("")  # Remove .j2 suffix
+                else:
+                    file_type = "static"
+                    output_path = relative_path  # Static files keep their name
+
+                template_files.append(
+                    TemplateFile(
+                        relative_path=relative_path,
+                        file_type=file_type,
+                        output_path=output_path,
+                    )
+                )
+
+        self.__template_files = template_files
+
+    def _extract_all_used_variables(self) -> Set[str]:
+        """Extract all undeclared variables from all .j2 files in the template directory.
+
+        Raises:
+            ValueError: If any Jinja2 template has syntax errors
+        """
+        used_variables: Set[str] = set()
+        syntax_errors = []
+
+        for template_file in self.template_files:  # Iterate over TemplateFile objects
+            if template_file.file_type == "j2":
+                file_path = self.template_dir / template_file.relative_path
+                try:
+                    with open(file_path, "r", encoding="utf-8") as f:
+                        content = f.read()
+                        ast = self.jinja_env.parse(content)  # Use lazy-loaded jinja_env
+                        used_variables.update(meta.find_undeclared_variables(ast))
+                except (IOError, OSError) as e:
+                    relative_path = file_path.relative_to(self.template_dir)
+                    syntax_errors.append(f"  - {relative_path}: File I/O error: {e}")
+                except Exception as e:
+                    # Collect syntax errors for Jinja2 issues
+                    relative_path = file_path.relative_to(self.template_dir)
+                    syntax_errors.append(f"  - {relative_path}: {e}")
+
+        # Raise error if any syntax errors were found
+        if syntax_errors:
+            logger.error(f"Jinja2 syntax errors found in template '{self.id}'")
+            raise TemplateSyntaxError(self.id, syntax_errors)
+
+        return used_variables
+
+    def _extract_jinja_default_values(self) -> dict[str, object]:
+        """Scan all .j2 files and extract literal arguments to the `default` filter.
+
+        Returns a mapping var_name -> literal_value for simple cases like
+        {{ var | default("value") }} or {{ var | default(123) }}.
+        This does not attempt to evaluate complex expressions.
+        """
+
+        class _DefaultVisitor(NodeVisitor):
+            def __init__(self):
+                self.found: dict[str, object] = {}
+
+            def visit_Filter(self, node: nodes.Filter) -> None:  # type: ignore[override]
+                try:
+                    if getattr(node, "name", None) == "default" and node.args:
+                        # target variable name when filter is applied directly to a Name
+                        target = None
+                        if isinstance(node.node, nodes.Name):
+                            target = node.node.name
+
+                        # first arg literal
+                        first = node.args[0]
+                        if isinstance(first, nodes.Const) and target:
+                            self.found[target] = first.value
+                except Exception:
+                    # Be resilient to unexpected node shapes
+                    pass
+                # continue traversal
+                self.generic_visit(node)
+
+        visitor = _DefaultVisitor()
+
+        for template_file in self.template_files:
+            if template_file.file_type != "j2":
+                continue
+            file_path = self.template_dir / template_file.relative_path
+            try:
+                with open(file_path, "r", encoding="utf-8") as f:
+                    content = f.read()
+                ast = self.jinja_env.parse(content)
+                visitor.visit(ast)
+            except (IOError, OSError, yaml.YAMLError):
+                # Skip failures - this extraction is best-effort only
+                continue
+
+        return visitor.found
+
+    def _filter_specs_to_used(
+        self,
+        used_variables: set,
+        merged_specs: dict,
+        module_specs: dict,
+        template_specs: dict,
+    ) -> dict:
+        """Filter specs to only include variables used in templates using VariableCollection.
+
+        Uses VariableCollection's native filter_to_used() method.
+        Keeps sensitive variables only if they're defined in the template spec or actually used.
+        """
+        # Build set of variables explicitly defined in template spec
+        template_defined_vars = set()
+        for section_data in (template_specs or {}).values():
+            if isinstance(section_data, dict) and "vars" in section_data:
+                template_defined_vars.update(section_data["vars"].keys())
+
+        # Create VariableCollection from merged specs
+        merged_collection = VariableCollection(merged_specs)
+
+        # Filter to only used variables (and sensitive ones that are template-defined)
+        # We keep sensitive variables that are either:
+        # 1. Actually used in template files, OR
+        # 2. Explicitly defined in the template spec (even if not yet used)
+        variables_to_keep = used_variables | template_defined_vars
+        filtered_collection = merged_collection.filter_to_used(
+            variables_to_keep, keep_sensitive=False
+        )
+
+        # Convert back to dict format
+        filtered_specs = {}
+        for section_key, section in filtered_collection.get_sections().items():
+            filtered_specs[section_key] = section.to_dict()
+
+        return filtered_specs
+
+    def _validate_schema_version(self, module_schema: str, module_name: str) -> None:
+        """Validate that template schema version is supported by the module.
+
+        Args:
+            module_schema: Schema version supported by the module
+            module_name: Name of the module (for error messages)
+
+        Raises:
+            IncompatibleSchemaVersionError: If template schema > module schema
+        """
+        template_schema = self.schema_version
+
+        # Compare schema versions
+        if not is_compatible(module_schema, template_schema):
+            logger.error(
+                f"Template '{self.id}' uses schema version {template_schema}, "
+                f"but module '{module_name}' only supports up to {module_schema}"
+            )
+            raise IncompatibleSchemaVersionError(
+                template_id=self.id,
+                template_schema=template_schema,
+                module_schema=module_schema,
+                module_name=module_name,
+            )
+
+        logger.debug(
+            f"Template '{self.id}' schema version compatible: "
+            f"template uses {template_schema}, module supports {module_schema}"
+        )
+
+    @staticmethod
+    def _validate_kind(template_data: dict) -> None:
+        """Validate that template has required 'kind' field.
+
+        Args:
+            template_data: Parsed YAML data from template.yaml
+
+        Raises:
+            ValueError: If 'kind' field is missing
+        """
+        if not template_data.get("kind"):
+            raise TemplateValidationError("Template format error: missing 'kind' field")
+
+    def _validate_variable_definitions(
+        self, used_variables: set[str], merged_specs: dict[str, Any]
+    ) -> None:
+        """Validate that all variables used in Jinja2 content are defined in the spec."""
+        defined_variables = set()
+        for section_data in merged_specs.values():
+            if "vars" in section_data and isinstance(section_data["vars"], dict):
+                defined_variables.update(section_data["vars"].keys())
+
+        undefined_variables = used_variables - defined_variables
+        if undefined_variables:
+            undefined_list = sorted(undefined_variables)
+            error_msg = (
+                f"Template validation error in '{self.id}': "
+                f"Variables used in template content but not defined in spec: {undefined_list}\n\n"
+                f"Please add these variables to your template's template.yaml spec. "
+                f"Each variable must have a default value.\n\n"
+                f"Example:\n"
+                f"spec:\n"
+                f"  general:\n"
+                f"    vars:\n"
+            )
+            for var_name in undefined_list:
+                error_msg += (
+                    f"      {var_name}:\n"
+                    f"        type: str\n"
+                    f"        description: Description for {var_name}\n"
+                    f"        default: <your_default_value_here>\n"
+                )
+            logger.error(error_msg)
+            raise TemplateValidationError(error_msg)
+
+    @staticmethod
+    def _create_jinja_env(searchpath: Path) -> Environment:
+        """Create sandboxed Jinja2 environment for secure template processing.
+
+        Uses SandboxedEnvironment to prevent code injection vulnerabilities
+        when processing untrusted templates. This restricts access to dangerous
+        operations while still allowing safe template rendering.
+
+        Returns:
+            SandboxedEnvironment configured for template processing.
+        """
+        # NOTE Use SandboxedEnvironment for security - prevents arbitrary code execution
+        return SandboxedEnvironment(
+            loader=FileSystemLoader(searchpath),
+            trim_blocks=True,
+            lstrip_blocks=True,
+            keep_trailing_newline=False,
+        )
+
+    def render(
+        self, variables: VariableCollection, debug: bool = False
+    ) -> tuple[Dict[str, str], Dict[str, Any]]:
+        """Render all .j2 files in the template directory.
+
+        Args:
+            variables: VariableCollection with values to use for rendering
+            debug: Enable debug mode with verbose output
+
+        Returns:
+            Tuple of (rendered_files, variable_values) where variable_values includes autogenerated values
+        """
+        # Use get_satisfied_values() to exclude variables from sections with unsatisfied dependencies
+        variable_values = variables.get_satisfied_values()
+
+        # Auto-generate values for autogenerated variables that are empty
+        import secrets
+        import string
+
+        for section in variables.get_sections().values():
+            for var_name, variable in section.variables.items():
+                if variable.autogenerated and (
+                    variable.value is None or variable.value == ""
+                ):
+                    # Generate a secure random string (32 characters by default)
+                    alphabet = string.ascii_letters + string.digits
+                    generated_value = "".join(
+                        secrets.choice(alphabet) for _ in range(32)
+                    )
+                    variable_values[var_name] = generated_value
+                    logger.debug(f"Auto-generated value for variable '{var_name}'")
+
+        if debug:
+            logger.info(f"Rendering template '{self.id}' in debug mode")
+            logger.info(f"Available variables: {sorted(variable_values.keys())}")
+            logger.info(f"Variable values: {variable_values}")
+        else:
+            logger.debug(
+                f"Rendering template '{self.id}' with variables: {variable_values}"
+            )
+
+        rendered_files = {}
+        available_vars = set(variable_values.keys())
+
+        for template_file in self.template_files:  # Iterate over TemplateFile objects
+            if template_file.file_type == "j2":
+                try:
+                    if debug:
+                        logger.info(
+                            f"Rendering Jinja2 template: {template_file.relative_path}"
+                        )
+
+                    template = self.jinja_env.get_template(
+                        str(template_file.relative_path)
+                    )  # Use lazy-loaded jinja_env
+                    rendered_content = template.render(**variable_values)
+
+                    # Sanitize the rendered content to remove excessive blank lines
+                    rendered_content = self._sanitize_content(
+                        rendered_content, template_file.output_path
+                    )
+                    rendered_files[str(template_file.output_path)] = rendered_content
+
+                    if debug:
+                        logger.info(
+                            f"Successfully rendered: {template_file.relative_path} -> {template_file.output_path}"
+                        )
+
+                except (
+                    UndefinedError,
+                    Jinja2TemplateSyntaxError,
+                    Jinja2TemplateNotFound,
+                    Jinja2TemplateError,
+                ) as e:
+                    # Parse Jinja2 error to extract detailed information
+                    error_msg, line_num, col, context_lines, suggestions = (
+                        _parse_jinja_error(
+                            e, template_file, self.template_dir, available_vars
+                        )
+                    )
+
+                    logger.error(
+                        f"Error rendering template file {template_file.relative_path}: {error_msg}"
+                    )
+
+                    # Create enhanced TemplateRenderError with all context
+                    raise TemplateRenderError(
+                        message=error_msg,
+                        file_path=str(template_file.relative_path),
+                        line_number=line_num,
+                        column=col,
+                        context_lines=context_lines,
+                        variable_context={k: str(v) for k, v in variable_values.items()}
+                        if debug
+                        else {},
+                        suggestions=suggestions,
+                        original_error=e,
+                    )
+
+                except Exception as e:
+                    # Catch any other unexpected errors
+                    logger.error(
+                        f"Unexpected error rendering template file {template_file.relative_path}: {e}"
+                    )
+                    raise TemplateRenderError(
+                        message=f"Unexpected rendering error: {e}",
+                        file_path=str(template_file.relative_path),
+                        suggestions=[
+                            "This is an unexpected error. Please check the template for issues."
+                        ],
+                        original_error=e,
+                    )
+
+            elif template_file.file_type == "static":
+                # For static files, just read their content and add to rendered_files
+                # This ensures static files are also part of the output dictionary
+                file_path = self.template_dir / template_file.relative_path
+                try:
+                    if debug:
+                        logger.info(
+                            f"Copying static file: {template_file.relative_path}"
+                        )
+
+                    with open(file_path, "r", encoding="utf-8") as f:
+                        content = f.read()
+                        rendered_files[str(template_file.output_path)] = content
+                except (IOError, OSError) as e:
+                    logger.error(f"Error reading static file {file_path}: {e}")
+                    raise TemplateRenderError(
+                        message=f"Error reading static file: {e}",
+                        file_path=str(template_file.relative_path),
+                        suggestions=[
+                            "Check that the file exists and has read permissions"
+                        ],
+                        original_error=e,
+                    )
+
+        return rendered_files, variable_values
+
+    def _sanitize_content(self, content: str, file_path: Path) -> str:
+        """Sanitize rendered content by removing excessive blank lines and trailing whitespace."""
+        if not content:
+            return content
+
+        lines = [line.rstrip() for line in content.split("\n")]
+        sanitized = []
+        prev_blank = False
+
+        for line in lines:
+            is_blank = not line
+            if is_blank and prev_blank:
+                continue  # Skip consecutive blank lines
+            sanitized.append(line)
+            prev_blank = is_blank
+
+        # Remove leading blanks and ensure single trailing newline
+        return "\n".join(sanitized).lstrip("\n").rstrip("\n") + "\n"
+
+    @property
+    def template_files(self) -> List[TemplateFile]:
+        if self.__template_files is None:
+            self._collect_template_files()  # Populate self.__template_files
+        return self.__template_files
+
+    @property
+    def template_specs(self) -> dict:
+        """Get the spec section from template YAML data."""
+        return self._template_data.get("spec", {})
+
+    @property
+    def module_specs(self) -> dict:
+        """Get the spec from the module definition for this template's schema version."""
+        if self.__module_specs is None:
+            kind = self._template_data.get("kind")
+            self.__module_specs = self._load_module_specs_for_schema(
+                kind, self.schema_version
+            )
+        return self.__module_specs
+
+    @property
+    def merged_specs(self) -> dict:
+        if self.__merged_specs is None:
+            self.__merged_specs = self._merge_specs(
+                self.module_specs, self.template_specs
+            )
+        return self.__merged_specs
+
+    @property
+    def jinja_env(self) -> Environment:
+        if self.__jinja_env is None:
+            self.__jinja_env = self._create_jinja_env(self.template_dir)
+        return self.__jinja_env
+
+    @property
+    def used_variables(self) -> Set[str]:
+        if self.__used_variables is None:
+            self.__used_variables = self._extract_all_used_variables()
+        return self.__used_variables
+
+    @property
+    def variables(self) -> VariableCollection:
+        if self.__variables is None:
+            # Validate that all used variables are defined
+            self._validate_variable_definitions(self.used_variables, self.merged_specs)
+            # Filter specs to only used variables
+            filtered_specs = self._filter_specs_to_used(
+                self.used_variables,
+                self.merged_specs,
+                self.module_specs,
+                self.template_specs,
+            )
+
+            # Best-effort: extract literal defaults from Jinja `default()` filter and
+            # merge them into the filtered_specs when no default exists there.
+            try:
+                jinja_defaults = self._extract_jinja_default_values()
+                for section_key, section_data in filtered_specs.items():
+                    # Guard against None from empty YAML sections
+                    vars_dict = section_data.get("vars") or {}
+                    for var_name, var_data in vars_dict.items():
+                        if "default" not in var_data or var_data.get("default") in (
+                            None,
+                            "",
+                        ):
+                            if var_name in jinja_defaults:
+                                var_data["default"] = jinja_defaults[var_name]
+            except (KeyError, TypeError, AttributeError):
+                # Keep behavior stable on any extraction errors
+                pass
+
+            self.__variables = VariableCollection(filtered_specs)
+            # Sort sections: required first, then enabled, then disabled
+            self.__variables.sort_sections()
+        return self.__variables

+ 62 - 56
cli/core/validators.py

@@ -9,7 +9,7 @@ from __future__ import annotations
 import logging
 from abc import ABC, abstractmethod
 from pathlib import Path
-from typing import Any, Dict, List, Optional
+from typing import Any, List, Optional
 
 import yaml
 from rich.console import Console
@@ -20,81 +20,81 @@ console = Console()
 
 class ValidationResult:
     """Represents the result of a validation operation."""
-    
+
     def __init__(self):
         self.errors: List[str] = []
         self.warnings: List[str] = []
         self.info: List[str] = []
-    
+
     def add_error(self, message: str) -> None:
         """Add an error message."""
         self.errors.append(message)
         logger.error(f"Validation error: {message}")
-    
+
     def add_warning(self, message: str) -> None:
         """Add a warning message."""
         self.warnings.append(message)
         logger.warning(f"Validation warning: {message}")
-    
+
     def add_info(self, message: str) -> None:
         """Add an info message."""
         self.info.append(message)
         logger.info(f"Validation info: {message}")
-    
+
     @property
     def is_valid(self) -> bool:
         """Check if validation passed (no errors)."""
         return len(self.errors) == 0
-    
+
     @property
     def has_warnings(self) -> bool:
         """Check if validation has warnings."""
         return len(self.warnings) > 0
-    
+
     def display(self, context: str = "Validation") -> None:
         """Display validation results to console."""
         if self.errors:
             console.print(f"\n[red]✗ {context} Failed:[/red]")
             for error in self.errors:
                 console.print(f"  [red]• {error}[/red]")
-        
+
         if self.warnings:
             console.print(f"\n[yellow]⚠ {context} Warnings:[/yellow]")
             for warning in self.warnings:
                 console.print(f"  [yellow]• {warning}[/yellow]")
-        
+
         if self.info:
             console.print(f"\n[blue]ℹ {context} Info:[/blue]")
             for info_msg in self.info:
                 console.print(f"  [blue]• {info_msg}[/blue]")
-        
+
         if self.is_valid and not self.has_warnings:
             console.print(f"\n[green]✓ {context} Passed[/green]")
 
 
 class ContentValidator(ABC):
     """Abstract base class for content validators."""
-    
+
     @abstractmethod
     def validate(self, content: str, file_path: str) -> ValidationResult:
         """Validate content and return results.
-        
+
         Args:
             content: The file content to validate
             file_path: Path to the file (for error messages)
-            
+
         Returns:
             ValidationResult with errors, warnings, and info
         """
         pass
-    
+
     @abstractmethod
     def can_validate(self, file_path: str) -> bool:
         """Check if this validator can validate the given file.
-        
+
         Args:
             file_path: Path to the file
-            
+
         Returns:
             True if this validator can handle the file
         """
@@ -103,84 +103,88 @@ class ContentValidator(ABC):
 
 class DockerComposeValidator(ContentValidator):
     """Validator for Docker Compose files."""
-    
+
     COMPOSE_FILENAMES = {
         "docker-compose.yml",
         "docker-compose.yaml",
         "compose.yml",
         "compose.yaml",
     }
-    
+
     def can_validate(self, file_path: str) -> bool:
         """Check if file is a Docker Compose file."""
         filename = Path(file_path).name.lower()
         return filename in self.COMPOSE_FILENAMES
-    
+
     def validate(self, content: str, file_path: str) -> ValidationResult:
         """Validate Docker Compose file structure."""
         result = ValidationResult()
-        
+
         try:
             # Parse YAML
             data = yaml.safe_load(content)
-            
+
             if not isinstance(data, dict):
                 result.add_error("Docker Compose file must be a YAML dictionary")
                 return result
-            
+
             # Check for version (optional in Compose v2, but good practice)
             if "version" not in data:
-                result.add_info("No 'version' field specified (using Compose v2 format)")
-            
+                result.add_info(
+                    "No 'version' field specified (using Compose v2 format)"
+                )
+
             # Check for services (required)
             if "services" not in data:
                 result.add_error("Missing required 'services' section")
                 return result
-            
+
             services = data.get("services", {})
             if not isinstance(services, dict):
                 result.add_error("'services' must be a dictionary")
                 return result
-            
+
             if not services:
                 result.add_warning("No services defined")
-            
+
             # Validate each service
             for service_name, service_config in services.items():
                 self._validate_service(service_name, service_config, result)
-            
+
             # Check for networks (optional but recommended)
             if "networks" in data:
                 networks = data.get("networks", {})
                 if networks and isinstance(networks, dict):
                     result.add_info(f"Defines {len(networks)} network(s)")
-            
+
             # Check for volumes (optional)
             if "volumes" in data:
                 volumes = data.get("volumes", {})
                 if volumes and isinstance(volumes, dict):
                     result.add_info(f"Defines {len(volumes)} volume(s)")
-            
+
         except yaml.YAMLError as e:
             result.add_error(f"YAML parsing error: {e}")
         except Exception as e:
             result.add_error(f"Unexpected validation error: {e}")
-        
+
         return result
-    
-    def _validate_service(self, name: str, config: Any, result: ValidationResult) -> None:
+
+    def _validate_service(
+        self, name: str, config: Any, result: ValidationResult
+    ) -> None:
         """Validate a single service configuration."""
         if not isinstance(config, dict):
             result.add_error(f"Service '{name}': configuration must be a dictionary")
             return
-        
+
         # Check for image or build (at least one required)
         has_image = "image" in config
         has_build = "build" in config
-        
+
         if not has_image and not has_build:
             result.add_error(f"Service '{name}': must specify 'image' or 'build'")
-        
+
         # Warn about common misconfigurations
         if "restart" in config:
             restart_value = config["restart"]
@@ -190,7 +194,7 @@ class DockerComposeValidator(ContentValidator):
                     f"Service '{name}': restart policy '{restart_value}' may be invalid. "
                     f"Valid values: {', '.join(valid_restart_policies)}"
                 )
-        
+
         # Check for environment variables
         if "environment" in config:
             env = config["environment"]
@@ -202,7 +206,7 @@ class DockerComposeValidator(ContentValidator):
                     result.add_warning(
                         f"Service '{name}': duplicate environment variables: {', '.join(duplicates)}"
                     )
-        
+
         # Check for ports
         if "ports" in config:
             ports = config["ports"]
@@ -212,51 +216,51 @@ class DockerComposeValidator(ContentValidator):
 
 class YAMLValidator(ContentValidator):
     """Basic YAML syntax validator."""
-    
+
     def can_validate(self, file_path: str) -> bool:
         """Check if file is a YAML file."""
         return Path(file_path).suffix.lower() in [".yml", ".yaml"]
-    
+
     def validate(self, content: str, file_path: str) -> ValidationResult:
         """Validate YAML syntax."""
         result = ValidationResult()
-        
+
         try:
             yaml.safe_load(content)
             result.add_info("YAML syntax is valid")
         except yaml.YAMLError as e:
             result.add_error(f"YAML parsing error: {e}")
-        
+
         return result
 
 
 class ValidatorRegistry:
     """Registry for content validators."""
-    
+
     def __init__(self):
         self.validators: List[ContentValidator] = []
         self._register_default_validators()
-    
+
     def _register_default_validators(self) -> None:
         """Register built-in validators."""
         self.register(DockerComposeValidator())
         self.register(YAMLValidator())
-    
+
     def register(self, validator: ContentValidator) -> None:
         """Register a validator.
-        
+
         Args:
             validator: The validator to register
         """
         self.validators.append(validator)
         logger.debug(f"Registered validator: {validator.__class__.__name__}")
-    
+
     def get_validator(self, file_path: str) -> Optional[ContentValidator]:
         """Get the most appropriate validator for a file.
-        
+
         Args:
             file_path: Path to the file
-            
+
         Returns:
             ContentValidator if found, None otherwise
         """
@@ -265,26 +269,28 @@ class ValidatorRegistry:
             if validator.can_validate(file_path):
                 return validator
         return None
-    
+
     def validate_file(self, content: str, file_path: str) -> ValidationResult:
         """Validate file content using appropriate validator.
-        
+
         Args:
             content: The file content
             file_path: Path to the file
-            
+
         Returns:
             ValidationResult with validation results
         """
         validator = self.get_validator(file_path)
-        
+
         if validator:
             logger.debug(f"Validating {file_path} with {validator.__class__.__name__}")
             return validator.validate(content, file_path)
-        
+
         # No validator found - return empty result
         result = ValidationResult()
-        result.add_info(f"No semantic validator available for {Path(file_path).suffix} files")
+        result.add_info(
+            f"No semantic validator available for {Path(file_path).suffix} files"
+        )
         return result
 
 

+ 444 - 360
cli/core/variable.py

@@ -13,365 +13,449 @@ EMAIL_REGEX = re.compile(r"^[^@\\s]+@[^@\\s]+\\.[^@\\s]+$")
 
 
 class Variable:
-  """Represents a single templating variable with lightweight validation."""
-
-  def __init__(self, data: dict[str, Any]) -> None:
-    """Initialize Variable from a dictionary containing variable specification.
-    
-    Args:
-        data: Dictionary containing variable specification with required 'name' key
-              and optional keys: description, type, options, prompt, value, default, section, origin
-              
-    Raises:
-        ValueError: If data is not a dict, missing 'name' key, or has invalid default value
-    """
-    # Validate input
-    if not isinstance(data, dict):
-      raise ValueError("Variable data must be a dictionary")
-    
-    if "name" not in data:
-      raise ValueError("Variable data must contain 'name' key")
-    
-    # Track which fields were explicitly provided in source data
-    self._explicit_fields: Set[str] = set(data.keys())
-    
-    # Initialize fields
-    self.name: str = data["name"]
-    self.description: Optional[str] = data.get("description") or data.get("display", "")
-    self.type: str = data.get("type", "str")
-    self.options: Optional[List[Any]] = data.get("options", [])
-    self.prompt: Optional[str] = data.get("prompt")
-    self.value: Any = data.get("value") if data.get("value") is not None else data.get("default")
-    self.origin: Optional[str] = data.get("origin")
-    self.sensitive: bool = data.get("sensitive", False)
-    # Optional extra explanation used by interactive prompts
-    self.extra: Optional[str] = data.get("extra")
-    # Flag indicating this variable should be auto-generated when empty
-    self.autogenerated: bool = data.get("autogenerated", False)
-    # Original value before config override (used for display)
-    self.original_value: Optional[Any] = data.get("original_value")
-
-    # Validate and convert the default/initial value if present
-    if self.value is not None:
-      try:
-        self.value = self.convert(self.value)
-      except ValueError as exc:
-        raise ValueError(f"Invalid default for variable '{self.name}': {exc}")
-
-
-  def convert(self, value: Any) -> Any:
-    """Validate and convert a raw value based on the variable type.
-    
-    This method performs type conversion but does NOT check if the value
-    is required. Use validate_and_convert() for full validation including
-    required field checks.
-    """
-    if value is None:
-      return None
-
-    # Treat empty strings as None to avoid storing "" for missing values.
-    if isinstance(value, str) and value.strip() == "":
-      return None
-
-    # Type conversion mapping for cleaner code
-    converters = {
-      "bool": self._convert_bool,
-      "int": self._convert_int, 
-      "float": self._convert_float,
-      "enum": self._convert_enum,
-      "url": self._convert_url,
-      "email": self._convert_email,
-    }
-    
-    converter = converters.get(self.type)
-    if converter:
-      return converter(value)
-    
-    # Default to string conversion
-    return str(value)
-  
-  def validate_and_convert(self, value: Any, check_required: bool = True) -> Any:
-    """Validate and convert a value with comprehensive checks.
-    
-    This method combines type conversion with validation logic including
-    required field checks. It's the recommended method for user input validation.
-    
-    Args:
-        value: The raw value to validate and convert
-        check_required: If True, raises ValueError for required fields with empty values
-        
-    Returns:
-        The converted and validated value
-        
-    Raises:
-        ValueError: If validation fails (invalid format, required field empty, etc.)
-        
-    Examples:
-        # Basic validation
-        var.validate_and_convert("example@email.com")  # Returns validated email
-        
-        # Required field validation
-        var.validate_and_convert("", check_required=True)  # Raises ValueError if required
-        
-        # Autogenerated variables - allow empty values
-        var.validate_and_convert("", check_required=False)  # Returns None for autogeneration
-    """
-    # First, convert the value using standard type conversion
-    converted = self.convert(value)
-    
-    # Special handling for autogenerated variables
-    # Allow empty values as they will be auto-generated later
-    if self.autogenerated and (converted is None or (isinstance(converted, str) and (converted == "" or converted == "*auto"))):
-      return None  # Signal that auto-generation should happen
-    
-    # Check if this is a required field and the value is empty
-    if check_required and self.is_required():
-      if converted is None or (isinstance(converted, str) and converted == ""):
-        raise ValueError("This field is required and cannot be empty")
-    
-    return converted
-
-  def _convert_bool(self, value: Any) -> bool:
-    """Convert value to boolean."""
-    if isinstance(value, bool):
-      return value
-    if isinstance(value, str):
-      lowered = value.strip().lower()
-      if lowered in TRUE_VALUES:
+    """Represents a single templating variable with lightweight validation."""
+
+    def __init__(self, data: dict[str, Any]) -> None:
+        """Initialize Variable from a dictionary containing variable specification.
+
+        Args:
+            data: Dictionary containing variable specification with required 'name' key
+                  and optional keys: description, type, options, prompt, value, default, section, origin
+
+        Raises:
+            ValueError: If data is not a dict, missing 'name' key, or has invalid default value
+        """
+        # Validate input
+        if not isinstance(data, dict):
+            raise ValueError("Variable data must be a dictionary")
+
+        if "name" not in data:
+            raise ValueError("Variable data must contain 'name' key")
+
+        # Track which fields were explicitly provided in source data
+        self._explicit_fields: Set[str] = set(data.keys())
+
+        # Initialize fields
+        self.name: str = data["name"]
+        # Reference to parent section (set by VariableCollection)
+        self.parent_section: Optional["VariableSection"] = data.get("parent_section")
+        self.description: Optional[str] = data.get("description") or data.get(
+            "display", ""
+        )
+        self.type: str = data.get("type", "str")
+        self.options: Optional[List[Any]] = data.get("options", [])
+        self.prompt: Optional[str] = data.get("prompt")
+        if "value" in data:
+            self.value: Any = data.get("value")
+        elif "default" in data:
+            self.value: Any = data.get("default")
+        else:
+            self.value: Any = None
+        self.origin: Optional[str] = data.get("origin")
+        self.sensitive: bool = data.get("sensitive", False)
+        # Optional extra explanation used by interactive prompts
+        self.extra: Optional[str] = data.get("extra")
+        # Flag indicating this variable should be auto-generated when empty
+        self.autogenerated: bool = data.get("autogenerated", False)
+        # Flag indicating this variable is required even when section is disabled
+        self.required: bool = data.get("required", False)
+        # Flag indicating this variable can be empty/optional
+        self.optional: bool = data.get("optional", False)
+        # Original value before config override (used for display)
+        self.original_value: Optional[Any] = data.get("original_value")
+        # Variable dependencies - can be string or list of strings in format "var_name=value"
+        # Supports semicolon-separated multiple conditions: "var1=value1;var2=value2,value3"
+        needs_value = data.get("needs")
+        if needs_value:
+            if isinstance(needs_value, str):
+                # Split by semicolon to support multiple AND conditions in a single string
+                # Example: "traefik_enabled=true;network_mode=bridge,macvlan"
+                self.needs: List[str] = [
+                    need.strip() for need in needs_value.split(";") if need.strip()
+                ]
+            elif isinstance(needs_value, list):
+                self.needs: List[str] = needs_value
+            else:
+                raise ValueError(
+                    f"Variable '{self.name}' has invalid 'needs' value: must be string or list"
+                )
+        else:
+            self.needs: List[str] = []
+
+        # Validate and convert the default/initial value if present
+        if self.value is not None:
+            try:
+                self.value = self.convert(self.value)
+            except ValueError as exc:
+                raise ValueError(f"Invalid default for variable '{self.name}': {exc}")
+
+    def convert(self, value: Any) -> Any:
+        """Validate and convert a raw value based on the variable type.
+
+        This method performs type conversion but does NOT check if the value
+        is required. Use validate_and_convert() for full validation including
+        required field checks.
+        """
+        if value is None:
+            return None
+
+        # Treat empty strings as None to avoid storing "" for missing values.
+        if isinstance(value, str) and value.strip() == "":
+            return None
+
+        # Type conversion mapping for cleaner code
+        converters = {
+            "bool": self._convert_bool,
+            "int": self._convert_int,
+            "float": self._convert_float,
+            "enum": self._convert_enum,
+            "url": self._convert_url,
+            "email": self._convert_email,
+        }
+
+        converter = converters.get(self.type)
+        if converter:
+            return converter(value)
+
+        # Default to string conversion
+        return str(value)
+
+    def validate_and_convert(self, value: Any, check_required: bool = True) -> Any:
+        """Validate and convert a value with comprehensive checks.
+
+        This method combines type conversion with validation logic including
+        required field checks. It's the recommended method for user input validation.
+
+        Args:
+            value: The raw value to validate and convert
+            check_required: If True, raises ValueError for required fields with empty values
+
+        Returns:
+            The converted and validated value
+
+        Raises:
+            ValueError: If validation fails (invalid format, required field empty, etc.)
+
+        Examples:
+            # Basic validation
+            var.validate_and_convert("example@email.com")  # Returns validated email
+
+            # Required field validation
+            var.validate_and_convert("", check_required=True)  # Raises ValueError if required
+
+            # Autogenerated variables - allow empty values
+            var.validate_and_convert("", check_required=False)  # Returns None for autogeneration
+        """
+        # First, convert the value using standard type conversion
+        converted = self.convert(value)
+
+        # Special handling for autogenerated variables
+        # Allow empty values as they will be auto-generated later
+        if self.autogenerated and (
+            converted is None
+            or (
+                isinstance(converted, str) and (converted == "" or converted == "*auto")
+            )
+        ):
+            return None  # Signal that auto-generation should happen
+
+        # Allow empty values for optional variables
+        if self.optional and (
+            converted is None or (isinstance(converted, str) and converted == "")
+        ):
+            return None
+
+        # Check if this is a required field and the value is empty
+        if check_required and self.is_required():
+            if converted is None or (isinstance(converted, str) and converted == ""):
+                raise ValueError("This field is required and cannot be empty")
+
+        return converted
+
+    def _convert_bool(self, value: Any) -> bool:
+        """Convert value to boolean."""
+        if isinstance(value, bool):
+            return value
+        if isinstance(value, str):
+            lowered = value.strip().lower()
+            if lowered in TRUE_VALUES:
+                return True
+            if lowered in FALSE_VALUES:
+                return False
+        raise ValueError("value must be a boolean (true/false)")
+
+    def _convert_int(self, value: Any) -> Optional[int]:
+        """Convert value to integer."""
+        if isinstance(value, int):
+            return value
+        if isinstance(value, str) and value.strip() == "":
+            return None
+        try:
+            return int(value)
+        except (TypeError, ValueError) as exc:
+            raise ValueError("value must be an integer") from exc
+
+    def _convert_float(self, value: Any) -> Optional[float]:
+        """Convert value to float."""
+        if isinstance(value, float):
+            return value
+        if isinstance(value, str) and value.strip() == "":
+            return None
+        try:
+            return float(value)
+        except (TypeError, ValueError) as exc:
+            raise ValueError("value must be a float") from exc
+
+    def _convert_enum(self, value: Any) -> Optional[str]:
+        if value == "":
+            return None
+        val = str(value)
+        if self.options and val not in self.options:
+            raise ValueError(f"value must be one of: {', '.join(self.options)}")
+        return val
+
+    def _convert_url(self, value: Any) -> str:
+        val = str(value).strip()
+        if not val:
+            return None
+        parsed = urlparse(val)
+        if not (parsed.scheme and parsed.netloc):
+            raise ValueError("value must be a valid URL (include scheme and host)")
+        return val
+
+    def _convert_email(self, value: Any) -> str:
+        val = str(value).strip()
+        if not val:
+            return None
+        if not EMAIL_REGEX.fullmatch(val):
+            raise ValueError("value must be a valid email address")
+        return val
+
+    def to_dict(self) -> Dict[str, Any]:
+        """Serialize Variable to a dictionary for storage."""
+        result = {}
+
+        # Always include type
+        if self.type:
+            result["type"] = self.type
+
+        # Include value/default if not None
+        if self.value is not None:
+            result["default"] = self.value
+
+        # Include string fields if truthy
+        for field in ("description", "prompt", "extra", "origin"):
+            if value := getattr(self, field):
+                result[field] = value
+
+        # Include boolean/list fields if truthy (but empty list is OK for options)
+        if self.sensitive:
+            result["sensitive"] = True
+        if self.autogenerated:
+            result["autogenerated"] = True
+        if self.required:
+            result["required"] = True
+        if self.optional:
+            result["optional"] = True
+        if self.options is not None:  # Allow empty list
+            result["options"] = self.options
+
+        # Store dependencies (single value if only one, list otherwise)
+        if self.needs:
+            result["needs"] = self.needs[0] if len(self.needs) == 1 else self.needs
+
+        return result
+
+    def get_display_value(
+        self, mask_sensitive: bool = True, max_length: int = 30, show_none: bool = True
+    ) -> str:
+        """Get formatted display value with optional masking and truncation.
+
+        Args:
+            mask_sensitive: If True, mask sensitive values with asterisks
+            max_length: Maximum length before truncation (0 = no limit)
+            show_none: If True, display "(none)" for None values instead of empty string
+
+        Returns:
+            Formatted string representation of the value
+        """
+        if self.value is None or self.value == "":
+            # Show (*auto) for autogenerated variables instead of (none)
+            if self.autogenerated:
+                return "[dim](*auto)[/dim]" if show_none else ""
+            return "[dim](none)[/dim]" if show_none else ""
+
+        # Mask sensitive values
+        if self.sensitive and mask_sensitive:
+            return "********"
+
+        # Convert to string
+        display = str(self.value)
+
+        # Truncate if needed
+        if max_length > 0 and len(display) > max_length:
+            return display[: max_length - 3] + "..."
+
+        return display
+
+    def get_normalized_default(self) -> Any:
+        """Get normalized default value suitable for prompts and display."""
+        try:
+            typed = self.convert(self.value)
+        except Exception:
+            typed = self.value
+
+        # Autogenerated: return display hint
+        if self.autogenerated and not typed:
+            return "*auto"
+
+        # Type-specific handlers
+        if self.type == "enum":
+            if not self.options:
+                return typed
+            return (
+                self.options[0]
+                if typed is None or str(typed) not in self.options
+                else str(typed)
+            )
+
+        if self.type == "bool":
+            return (
+                typed
+                if isinstance(typed, bool)
+                else (None if typed is None else bool(typed))
+            )
+
+        if self.type == "int":
+            try:
+                return int(typed) if typed not in (None, "") else None
+            except Exception:
+                return None
+
+        # Default: return string or None
+        return None if typed is None else str(typed)
+
+    def get_prompt_text(self) -> str:
+        """Get formatted prompt text for interactive input.
+
+        Returns:
+            Prompt text with optional type hints and descriptions
+        """
+        prompt_text = self.prompt or self.description or self.name
+
+        # Add type hint for semantic types if there's a default
+        if self.value is not None and self.type in ["email", "url"]:
+            prompt_text += f" ({self.type})"
+
+        return prompt_text
+
+    def get_validation_hint(self) -> Optional[str]:
+        """Get validation hint for prompts (e.g., enum options).
+
+        Returns:
+            Formatted hint string or None if no hint needed
+        """
+        hints = []
+
+        # Add enum options
+        if self.type == "enum" and self.options:
+            hints.append(f"Options: {', '.join(self.options)}")
+
+        # Add extra help text
+        if self.extra:
+            hints.append(self.extra)
+
+        return " — ".join(hints) if hints else None
+
+    def is_required(self) -> bool:
+        """Check if this variable requires a value (cannot be empty/None).
+
+        A variable is considered required if:
+        - It has an explicit 'required: true' flag (highest precedence)
+        - OR it doesn't have a default value (value is None)
+          AND it's not marked as autogenerated (which can be empty and generated later)
+          AND it's not marked as optional (which can be empty)
+          AND it's not a boolean type (booleans default to False if not set)
+
+        Returns:
+            True if the variable must have a non-empty value, False otherwise
+        """
+        # Optional variables can always be empty
+        if self.optional:
+            return False
+
+        # Explicit required flag takes highest precedence
+        if self.required:
+            # But autogenerated variables can still be empty (will be generated later)
+            if self.autogenerated:
+                return False
+            return True
+
+        # Autogenerated variables can be empty (will be generated later)
+        if self.autogenerated:
+            return False
+
+        # Boolean variables always have a value (True or False)
+        if self.type == "bool":
+            return False
+
+        # Variables with a default value are not required
+        if self.value is not None:
+            return False
+
+        # No default value and not autogenerated = required
         return True
-      if lowered in FALSE_VALUES:
-        return False
-    raise ValueError("value must be a boolean (true/false)")
-
-  def _convert_int(self, value: Any) -> Optional[int]:
-    """Convert value to integer."""
-    if isinstance(value, int):
-      return value
-    if isinstance(value, str) and value.strip() == "":
-      return None
-    try:
-      return int(value)
-    except (TypeError, ValueError) as exc:
-      raise ValueError("value must be an integer") from exc
-
-  def _convert_float(self, value: Any) -> Optional[float]:
-    """Convert value to float."""
-    if isinstance(value, float):
-      return value
-    if isinstance(value, str) and value.strip() == "":
-      return None
-    try:
-      return float(value)
-    except (TypeError, ValueError) as exc:
-      raise ValueError("value must be a float") from exc
-
-  def _convert_enum(self, value: Any) -> Optional[str]:
-    if value == "":
-      return None
-    val = str(value)
-    if self.options and val not in self.options:
-      raise ValueError(f"value must be one of: {', '.join(self.options)}")
-    return val
-
-  def _convert_url(self, value: Any) -> str:
-    val = str(value).strip()
-    if not val:
-      return None
-    parsed = urlparse(val)
-    if not (parsed.scheme and parsed.netloc):
-      raise ValueError("value must be a valid URL (include scheme and host)")
-    return val
-
-  def _convert_email(self, value: Any) -> str:
-    val = str(value).strip()
-    if not val:
-      return None
-    if not EMAIL_REGEX.fullmatch(val):
-      raise ValueError("value must be a valid email address")
-    return val
-
-  def to_dict(self) -> Dict[str, Any]:
-    """Serialize Variable to a dictionary for storage."""
-    result = {}
-    
-    # Always include type
-    if self.type:
-      result['type'] = self.type
-    
-    # Include value/default if not None
-    if self.value is not None:
-      result['default'] = self.value
-    
-    # Include string fields if truthy
-    for field in ('description', 'prompt', 'extra', 'origin'):
-      if value := getattr(self, field):
-        result[field] = value
-    
-    # Include boolean/list fields if truthy (but empty list is OK for options)
-    if self.sensitive:
-      result['sensitive'] = True
-    if self.autogenerated:
-      result['autogenerated'] = True
-    if self.options is not None:  # Allow empty list
-      result['options'] = self.options
-    
-    return result
-  
-  def get_display_value(self, mask_sensitive: bool = True, max_length: int = 30, show_none: bool = True) -> str:
-    """Get formatted display value with optional masking and truncation.
-    
-    Args:
-        mask_sensitive: If True, mask sensitive values with asterisks
-        max_length: Maximum length before truncation (0 = no limit)
-        show_none: If True, display "(none)" for None values instead of empty string
-        
-    Returns:
-        Formatted string representation of the value
-    """
-    if self.value is None or self.value == "":
-      # Show (*auto) for autogenerated variables instead of (none)
-      if self.autogenerated:
-        return "[dim](*auto)[/dim]" if show_none else ""
-      return "[dim](none)[/dim]" if show_none else ""
-    
-    # Mask sensitive values
-    if self.sensitive and mask_sensitive:
-      return "********"
-    
-    # Convert to string
-    display = str(self.value)
-    
-    # Truncate if needed
-    if max_length > 0 and len(display) > max_length:
-      return display[:max_length - 3] + "..."
-    
-    return display
-  
-  def get_normalized_default(self) -> Any:
-    """Get normalized default value suitable for prompts and display."""
-    try:
-      typed = self.convert(self.value)
-    except Exception:
-      typed = self.value
-    
-    # Autogenerated: return display hint
-    if self.autogenerated and not typed:
-      return "*auto"
-    
-    # Type-specific handlers
-    if self.type == "enum":
-      if not self.options:
-        return typed
-      return self.options[0] if typed is None or str(typed) not in self.options else str(typed)
-    
-    if self.type == "bool":
-      return typed if isinstance(typed, bool) else (None if typed is None else bool(typed))
-    
-    if self.type == "int":
-      try:
-        return int(typed) if typed not in (None, "") else None
-      except Exception:
-        return None
-    
-    # Default: return string or None
-    return None if typed is None else str(typed)
-  
-  def get_prompt_text(self) -> str:
-    """Get formatted prompt text for interactive input.
-    
-    Returns:
-        Prompt text with optional type hints and descriptions
-    """
-    prompt_text = self.prompt or self.description or self.name
-    
-    # Add type hint for semantic types if there's a default
-    if self.value is not None and self.type in ["email", "url"]:
-      prompt_text += f" ({self.type})"
-    
-    return prompt_text
-  
-  def get_validation_hint(self) -> Optional[str]:
-    """Get validation hint for prompts (e.g., enum options).
-    
-    Returns:
-        Formatted hint string or None if no hint needed
-    """
-    hints = []
-    
-    # Add enum options
-    if self.type == "enum" and self.options:
-      hints.append(f"Options: {', '.join(self.options)}")
-    
-    # Add extra help text
-    if self.extra:
-      hints.append(self.extra)
-    
-    return " — ".join(hints) if hints else None
-  
-  def is_required(self) -> bool:
-    """Check if this variable requires a value (cannot be empty/None).
-    
-    A variable is considered required if:
-    - It doesn't have a default value (value is None)
-    - It's not marked as autogenerated (which can be empty and generated later)
-    - It's not a boolean type (booleans default to False if not set)
-    
-    Returns:
-        True if the variable must have a non-empty value, False otherwise
-    """
-    # Autogenerated variables can be empty (will be generated later)
-    if self.autogenerated:
-      return False
-    
-    # Boolean variables always have a value (True or False)
-    if self.type == "bool":
-      return False
-    
-    # Variables with a default value are not required
-    if self.value is not None:
-      return False
-    
-    # No default value and not autogenerated = required
-    return True
-  
-  def clone(self, update: Optional[Dict[str, Any]] = None) -> 'Variable':
-    """Create a deep copy of the variable with optional field updates.
-    
-    This is more efficient than converting to dict and back when copying variables.
-    
-    Args:
-        update: Optional dictionary of field updates to apply to the clone
-        
-    Returns:
-        New Variable instance with copied data
+
+    def get_parent(self) -> Optional["VariableSection"]:
+        """Get the parent VariableSection that contains this variable.
         
-    Example:
-        var2 = var1.clone(update={'origin': 'template'})
-    """
-    data = {
-      'name': self.name,
-      'type': self.type,
-      'value': self.value,
-      'description': self.description,
-      'prompt': self.prompt,
-      'options': self.options.copy() if self.options else None,
-      'origin': self.origin,
-      'sensitive': self.sensitive,
-      'extra': self.extra,
-      'autogenerated': self.autogenerated,
-      'original_value': self.original_value,
-    }
-    
-    # Apply updates if provided
-    if update:
-      data.update(update)
-    
-    # Create new variable
-    cloned = Variable(data)
-    
-    # Preserve explicit fields from original, and add any update keys
-    cloned._explicit_fields = self._explicit_fields.copy()
-    if update:
-      cloned._explicit_fields.update(update.keys())
-    
-    return cloned
+        Returns:
+            The parent VariableSection if set, None otherwise
+        """
+        return self.parent_section
+
+    def clone(self, update: Optional[Dict[str, Any]] = None) -> "Variable":
+        """Create a deep copy of the variable with optional field updates.
+
+        This is more efficient than converting to dict and back when copying variables.
+
+        Args:
+            update: Optional dictionary of field updates to apply to the clone
+
+        Returns:
+            New Variable instance with copied data
+
+        Example:
+            var2 = var1.clone(update={'origin': 'template'})
+        """
+        data = {
+            "name": self.name,
+            "type": self.type,
+            "value": self.value,
+            "description": self.description,
+            "prompt": self.prompt,
+            "options": self.options.copy() if self.options else None,
+            "origin": self.origin,
+            "sensitive": self.sensitive,
+            "extra": self.extra,
+            "autogenerated": self.autogenerated,
+            "required": self.required,
+            "optional": self.optional,
+            "original_value": self.original_value,
+            "needs": self.needs.copy() if self.needs else None,
+            "parent_section": self.parent_section,
+        }
+
+        # Apply updates if provided
+        if update:
+            data.update(update)
+
+        # Create new variable
+        cloned = Variable(data)
+
+        # Preserve explicit fields from original, and add any update keys
+        cloned._explicit_fields = self._explicit_fields.copy()
+        if update:
+            cloned._explicit_fields.update(update.keys())
+
+        return cloned

+ 110 - 0
cli/core/version.py

@@ -0,0 +1,110 @@
+"""Version comparison utilities for semantic versioning.
+
+This module provides utilities for parsing and comparing semantic version strings.
+Supports version strings in the format: major.minor (e.g., "1.0", "1.2")
+"""
+
+from __future__ import annotations
+
+import re
+from typing import Tuple
+import logging
+
+logger = logging.getLogger(__name__)
+
+
+def parse_version(version_str: str) -> Tuple[int, int]:
+    """Parse a semantic version string into a tuple of integers.
+
+    Args:
+        version_str: Version string in format "major.minor" (e.g., "1.0", "1.2")
+
+    Returns:
+        Tuple of (major, minor) as integers
+
+    Raises:
+        ValueError: If version string is not in valid semantic version format
+
+    Examples:
+        >>> parse_version("1.0")
+        (1, 0)
+        >>> parse_version("1.2")
+        (1, 2)
+    """
+    if not version_str:
+        raise ValueError("Version string cannot be empty")
+
+    # Remove 'v' prefix if present
+    version_str = version_str.lstrip("v")
+
+    # Match semantic version pattern: major.minor
+    pattern = r"^(\d+)\.(\d+)$"
+    match = re.match(pattern, version_str)
+
+    if not match:
+        raise ValueError(
+            f"Invalid version format '{version_str}'. "
+            "Expected format: major.minor (e.g., '1.0', '1.2')"
+        )
+
+    major, minor = match.groups()
+    return (int(major), int(minor))
+
+
+def compare_versions(version1: str, version2: str) -> int:
+    """Compare two semantic version strings.
+
+    Args:
+        version1: First version string
+        version2: Second version string
+
+    Returns:
+        -1 if version1 < version2
+         0 if version1 == version2
+         1 if version1 > version2
+
+    Raises:
+        ValueError: If either version string is invalid
+
+    Examples:
+        >>> compare_versions("1.0", "0.9")
+        1
+        >>> compare_versions("1.0", "1.0")
+        0
+        >>> compare_versions("1.0", "1.1")
+        -1
+    """
+    v1 = parse_version(version1)
+    v2 = parse_version(version2)
+
+    if v1 < v2:
+        return -1
+    if v1 > v2:
+        return 1
+    return 0
+
+
+def is_compatible(current_version: str, required_version: str) -> bool:
+    """Check if current version meets the minimum required version.
+
+    Args:
+        current_version: Current version
+        required_version: Minimum required version
+
+    Returns:
+        True if current_version >= required_version, False otherwise
+
+    Examples:
+        >>> is_compatible("1.0", "0.9")
+        True
+        >>> is_compatible("1.0", "1.0")
+        True
+        >>> is_compatible("1.0", "1.1")
+        False
+    """
+    try:
+        return compare_versions(current_version, required_version) >= 0
+    except ValueError as e:
+        logger.warning("Version compatibility check failed: %s", e)
+        # If we can't parse versions, assume incompatible for safety
+        return False

+ 0 - 292
cli/modules/compose.py

@@ -1,292 +0,0 @@
-from collections import OrderedDict
-
-from ..core.module import Module
-from ..core.registry import registry
-
-spec = OrderedDict(
-    {
-      "general": {
-        "title": "General",
-        "vars": {
-          "service_name": {
-            "description": "Service name",
-            "type": "str",
-          },
-          "container_name": {
-            "description": "Container name",
-            "type": "str",
-          },
-          "container_hostname": {
-            "description": "Container internal hostname",
-            "type": "str",
-          },
-          "container_timezone": {
-            "description": "Container timezone (e.g., Europe/Berlin)",
-            "type": "str",
-            "default": "UTC",
-          },
-          "user_uid": {
-            "description": "User UID for container process",
-            "type": "int",
-            "default": 1000,
-          },
-          "user_gid": {
-            "description": "User GID for container process",
-            "type": "int",
-            "default": 1000,
-          },
-          "container_loglevel": {
-            "description": "Container log level",
-            "type": "enum",
-            "options": ["debug", "info", "warn", "error"],
-            "default": "info",
-          },
-          "restart_policy": {
-            "description": "Container restart policy",
-            "type": "enum",
-            "options": ["unless-stopped", "always", "on-failure", "no"],
-            "default": "unless-stopped",
-          },
-        },
-      },
-      "network": {
-        "title": "Network",
-        "toggle": "network_enabled",
-        "vars": {
-          "network_enabled": {
-            "description": "Enable custom network block",
-            "type": "bool",
-            "default": False,
-          },
-          "network_name": {
-            "description": "Docker network name",
-            "type": "str",
-            "default": "bridge",
-          },
-          "network_external": {
-            "description": "Use existing Docker network",
-            "type": "bool",
-            "default": True,
-          },
-        },
-      },
-      "ports": {
-        "title": "Ports",
-        "toggle": "ports_enabled",
-        "vars": {
-          "ports_enabled": {
-            "description": "Expose ports via 'ports' mapping",
-            "type": "bool",
-            "default": True,
-          }
-        },
-      },
-      "traefik": {
-        "title": "Traefik",
-        "toggle": "traefik_enabled",
-        "description": "Traefik routes external traffic to your service.",
-        "vars": {
-          "traefik_enabled": {
-            "description": "Enable Traefik reverse proxy integration",
-            "type": "bool",
-            "default": False,
-          },
-          "traefik_network": {
-            "description": "Traefik network name",
-            "type": "str",
-            "default": "traefik",
-          },
-          "traefik_host": {
-            "description": "Domain name for your service (e.g., app.example.com)",
-            "type": "str",
-          },
-          "traefik_entrypoint": {
-            "description": "HTTP entrypoint (non-TLS)",
-            "type": "str",
-            "default": "web",
-          },
-        },
-      },
-      "traefik_tls": {
-        "title": "Traefik TLS/SSL",
-        "toggle": "traefik_tls_enabled",
-        "needs": "traefik",
-        "description": "Enable HTTPS/TLS for Traefik with certificate management.",
-        "vars": {
-          "traefik_tls_enabled": {
-            "description": "Enable HTTPS/TLS",
-            "type": "bool",
-            "default": True,
-          },
-          "traefik_tls_entrypoint": {
-            "description": "TLS entrypoint",
-            "type": "str",
-            "default": "websecure",
-          },
-          "traefik_tls_certresolver": {
-            "description": "Traefik certificate resolver name",
-            "type": "str",
-            "default": "cloudflare",
-          },
-        },
-      },
-      "swarm": {
-        "title": "Docker Swarm",
-        "toggle": "swarm_enabled",
-        "description": "Deploy service in Docker Swarm mode with replicas.",
-        "vars": {
-          "swarm_enabled": {
-            "description": "Enable Docker Swarm mode",
-            "type": "bool",
-            "default": False,
-          },
-          "swarm_replicas": {
-            "description": "Number of replicas in Swarm",
-            "type": "int",
-            "default": 1,
-          },
-          "swarm_placement_mode": {
-            "description": "Swarm placement mode",
-            "type": "enum",
-            "options": ["global", "replicated"],
-            "default": "replicated"
-          },
-          "swarm_placement_host": {
-            "description": "Limit placement to specific node",
-            "type": "str",
-          }
-        },
-      },
-      "database": {
-        "title": "Database",
-        "toggle": "database_enabled",
-        "description": "Connect to external database (PostgreSQL or MySQL)",
-        "vars": {
-          "database_enabled": {
-            "description": "Enable external database integration",
-            "type": "bool",
-            "default": False,
-          },
-          "database_type": {
-            "description": "Database type",
-            "type": "enum",
-            "options": ["postgres", "mysql"],
-            "default": "postgres",
-          },
-          "database_external": {
-            "description": "Use an external database server?",
-            "extra": "If 'no', a database container will be created in the compose project.",
-            "type": "bool",
-            "default": False,
-          },
-          "database_host": {
-            "description": "Database host",
-            "type": "str",
-            "default": "database",
-          },
-          "database_port": {
-            "description": "Database port",
-            "type": "int"
-          },
-          "database_name": {
-            "description": "Database name",
-            "type": "str",
-          },
-          "database_user": {
-            "description": "Database user",
-            "type": "str",
-          },
-          "database_password": {
-            "description": "Database password",
-            "type": "str",
-            "default": "",
-            "sensitive": True,
-            "autogenerated": True,
-          },
-        },
-      },
-      "email": {
-        "title": "Email Server",
-        "toggle": "email_enabled",
-        "description": "Configure email server for notifications and user management.",
-        "vars": {
-          "email_enabled": {
-            "description": "Enable email server configuration",
-            "type": "bool",
-            "default": False,
-          },
-          "email_host": {
-            "description": "SMTP server hostname",
-            "type": "str",
-          },
-          "email_port": {
-            "description": "SMTP server port",
-            "type": "int",
-            "default": 587,
-          },
-          "email_username": {
-            "description": "SMTP username",
-            "type": "str",
-          },
-          "email_password": {
-            "description": "SMTP password",
-            "type": "str",
-            "sensitive": True,
-          },
-          "email_from": {
-            "description": "From email address",
-            "type": "str",
-          },
-          "email_use_tls": {
-            "description": "Use TLS encryption",
-            "type": "bool",
-            "default": True,
-          },
-          "email_use_ssl": {
-            "description": "Use SSL encryption",
-            "type": "bool",
-            "default": False,
-          }
-        },
-      },
-      "authentik": {
-        "title": "Authentik SSO",
-        "toggle": "authentik_enabled",
-        "description": "Integrate with Authentik for Single Sign-On authentication.",
-        "vars": {
-          "authentik_enabled": {
-            "description": "Enable Authentik SSO integration",
-            "type": "bool",
-            "default": False,
-          },
-          "authentik_url": {
-            "description": "Authentik base URL (e.g., https://auth.example.com)",
-            "type": "str",
-          },
-          "authentik_slug": {
-            "description": "Authentik application slug",
-            "type": "str",
-          },
-          "authentik_client_id": {
-            "description": "OAuth client ID from Authentik provider",
-            "type": "str",
-          },
-          "authentik_client_secret": {
-            "description": "OAuth client secret from Authentik provider",
-            "type": "str",
-            "sensitive": True,
-          },
-        },
-      },
-    }
-  )
-
-
-class ComposeModule(Module):
-  """Docker Compose module."""
-
-  name = "compose"
-  description = "Manage Docker Compose configurations"
-
-
-registry.register(ComposeModule)

+ 29 - 0
cli/modules/compose/__init__.py

@@ -0,0 +1,29 @@
+"""Docker Compose module with multi-schema support."""
+
+from ...core.module import Module
+from ...core.registry import registry
+
+# Import schema specifications
+from .spec_v1_0 import spec as spec_1_0
+from .spec_v1_1 import spec as spec_1_1
+
+# Schema version mapping
+SCHEMAS = {
+    "1.0": spec_1_0,
+    "1.1": spec_1_1,
+}
+
+# Default spec points to latest version
+spec = spec_1_1
+
+
+class ComposeModule(Module):
+    """Docker Compose module."""
+
+    name = "compose"
+    description = "Manage Docker Compose configurations"
+    schema_version = "1.1"  # Current schema version supported by this module
+    schemas = SCHEMAS  # Available schema versions
+
+
+registry.register(ComposeModule)

+ 278 - 0
cli/modules/compose/spec_v1_0.py

@@ -0,0 +1,278 @@
+"""Compose module schema version 1.0 - Original specification."""
+
+from collections import OrderedDict
+
+spec = OrderedDict(
+    {
+        "general": {
+            "title": "General",
+            "vars": {
+                "service_name": {
+                    "description": "Service name",
+                    "type": "str",
+                },
+                "container_name": {
+                    "description": "Container name",
+                    "type": "str",
+                },
+                "container_hostname": {
+                    "description": "Container internal hostname",
+                    "type": "str",
+                },
+                "container_timezone": {
+                    "description": "Container timezone (e.g., Europe/Berlin)",
+                    "type": "str",
+                    "default": "UTC",
+                },
+                "user_uid": {
+                    "description": "User UID for container process",
+                    "type": "int",
+                    "default": 1000,
+                },
+                "user_gid": {
+                    "description": "User GID for container process",
+                    "type": "int",
+                    "default": 1000,
+                },
+                "container_loglevel": {
+                    "description": "Container log level",
+                    "type": "enum",
+                    "options": ["debug", "info", "warn", "error"],
+                    "default": "info",
+                },
+                "restart_policy": {
+                    "description": "Container restart policy",
+                    "type": "enum",
+                    "options": ["unless-stopped", "always", "on-failure", "no"],
+                    "default": "unless-stopped",
+                },
+            },
+        },
+        "network": {
+            "title": "Network",
+            "toggle": "network_enabled",
+            "vars": {
+                "network_enabled": {
+                    "description": "Enable custom network block",
+                    "type": "bool",
+                    "default": False,
+                },
+                "network_name": {
+                    "description": "Docker network name",
+                    "type": "str",
+                    "default": "bridge",
+                },
+                "network_external": {
+                    "description": "Use existing Docker network",
+                    "type": "bool",
+                    "default": True,
+                },
+            },
+        },
+        "ports": {
+            "title": "Ports",
+            "toggle": "ports_enabled",
+            "vars": {
+                "ports_enabled": {
+                    "description": "Expose ports via 'ports' mapping",
+                    "type": "bool",
+                    "default": True,
+                }
+            },
+        },
+        "traefik": {
+            "title": "Traefik",
+            "toggle": "traefik_enabled",
+            "description": "Traefik routes external traffic to your service.",
+            "vars": {
+                "traefik_enabled": {
+                    "description": "Enable Traefik reverse proxy integration",
+                    "type": "bool",
+                    "default": False,
+                },
+                "traefik_network": {
+                    "description": "Traefik network name",
+                    "type": "str",
+                    "default": "traefik",
+                },
+                "traefik_host": {
+                    "description": "Domain name for your service (e.g., app.example.com)",
+                    "type": "str",
+                },
+                "traefik_entrypoint": {
+                    "description": "HTTP entrypoint (non-TLS)",
+                    "type": "str",
+                    "default": "web",
+                },
+            },
+        },
+        "traefik_tls": {
+            "title": "Traefik TLS/SSL",
+            "toggle": "traefik_tls_enabled",
+            "needs": "traefik",
+            "description": "Enable HTTPS/TLS for Traefik with certificate management.",
+            "vars": {
+                "traefik_tls_enabled": {
+                    "description": "Enable HTTPS/TLS",
+                    "type": "bool",
+                    "default": True,
+                },
+                "traefik_tls_entrypoint": {
+                    "description": "TLS entrypoint",
+                    "type": "str",
+                    "default": "websecure",
+                },
+                "traefik_tls_certresolver": {
+                    "description": "Traefik certificate resolver name",
+                    "type": "str",
+                    "default": "cloudflare",
+                },
+            },
+        },
+        "swarm": {
+            "title": "Docker Swarm",
+            "toggle": "swarm_enabled",
+            "description": "Deploy service in Docker Swarm mode with replicas.",
+            "vars": {
+                "swarm_enabled": {
+                    "description": "Enable Docker Swarm mode",
+                    "type": "bool",
+                    "default": False,
+                },
+                "swarm_replicas": {
+                    "description": "Number of replicas in Swarm",
+                    "type": "int",
+                    "default": 1,
+                },
+                "swarm_placement_mode": {
+                    "description": "Swarm placement mode",
+                    "type": "enum",
+                    "options": ["global", "replicated"],
+                    "default": "replicated",
+                },
+                "swarm_placement_host": {
+                    "description": "Limit placement to specific node",
+                    "type": "str",
+                },
+            },
+        },
+        "database": {
+            "title": "Database",
+            "toggle": "database_enabled",
+            "description": "Connect to external database (PostgreSQL or MySQL)",
+            "vars": {
+                "database_enabled": {
+                    "description": "Enable external database integration",
+                    "type": "bool",
+                    "default": False,
+                },
+                "database_type": {
+                    "description": "Database type",
+                    "type": "enum",
+                    "options": ["postgres", "mysql"],
+                    "default": "postgres",
+                },
+                "database_external": {
+                    "description": "Use an external database server?",
+                    "extra": "skips creation of internal database container",
+                    "type": "bool",
+                    "default": False,
+                },
+                "database_host": {
+                    "description": "Database host",
+                    "type": "str",
+                    "default": "database",
+                },
+                "database_port": {"description": "Database port", "type": "int"},
+                "database_name": {
+                    "description": "Database name",
+                    "type": "str",
+                },
+                "database_user": {
+                    "description": "Database user",
+                    "type": "str",
+                },
+                "database_password": {
+                    "description": "Database password",
+                    "type": "str",
+                    "default": "",
+                    "sensitive": True,
+                    "autogenerated": True,
+                },
+            },
+        },
+        "email": {
+            "title": "Email Server",
+            "toggle": "email_enabled",
+            "description": "Configure email server for notifications and user management.",
+            "vars": {
+                "email_enabled": {
+                    "description": "Enable email server configuration",
+                    "type": "bool",
+                    "default": False,
+                },
+                "email_host": {
+                    "description": "SMTP server hostname",
+                    "type": "str",
+                },
+                "email_port": {
+                    "description": "SMTP server port",
+                    "type": "int",
+                    "default": 587,
+                },
+                "email_username": {
+                    "description": "SMTP username",
+                    "type": "str",
+                },
+                "email_password": {
+                    "description": "SMTP password",
+                    "type": "str",
+                    "sensitive": True,
+                },
+                "email_from": {
+                    "description": "From email address",
+                    "type": "str",
+                },
+                "email_use_tls": {
+                    "description": "Use TLS encryption",
+                    "type": "bool",
+                    "default": True,
+                },
+                "email_use_ssl": {
+                    "description": "Use SSL encryption",
+                    "type": "bool",
+                    "default": False,
+                },
+            },
+        },
+        "authentik": {
+            "title": "Authentik SSO",
+            "toggle": "authentik_enabled",
+            "description": "Integrate with Authentik for Single Sign-On authentication.",
+            "vars": {
+                "authentik_enabled": {
+                    "description": "Enable Authentik SSO integration",
+                    "type": "bool",
+                    "default": False,
+                },
+                "authentik_url": {
+                    "description": "Authentik base URL (e.g., https://auth.example.com)",
+                    "type": "str",
+                },
+                "authentik_slug": {
+                    "description": "Authentik application slug",
+                    "type": "str",
+                },
+                "authentik_client_id": {
+                    "description": "OAuth client ID from Authentik provider",
+                    "type": "str",
+                },
+                "authentik_client_secret": {
+                    "description": "OAuth client secret from Authentik provider",
+                    "type": "str",
+                    "sensitive": True,
+                },
+            },
+        },
+    }
+)

+ 342 - 0
cli/modules/compose/spec_v1_1.py

@@ -0,0 +1,342 @@
+"""Compose module schema version 1.1 - Enhanced with network_mode and improved swarm.
+
+Changes from 1.0:
+- network: Added network_mode (bridge/host/macvlan) with conditional macvlan fields
+- swarm: Added volume modes (local/mount/nfs) and conditional placement constraints
+- traefik_tls: Updated needs format from 'traefik' to 'traefik_enabled=true'
+"""
+
+from collections import OrderedDict
+
+spec = OrderedDict(
+    {
+        "general": {
+            "title": "General",
+            "vars": {
+                "service_name": {
+                    "description": "Service name",
+                    "type": "str",
+                },
+                "container_name": {
+                    "description": "Container name",
+                    "type": "str",
+                },
+                "container_hostname": {
+                    "description": "Container internal hostname",
+                    "type": "str",
+                },
+                "container_timezone": {
+                    "description": "Container timezone (e.g., Europe/Berlin)",
+                    "type": "str",
+                    "default": "UTC",
+                },
+                "user_uid": {
+                    "description": "User UID for container process",
+                    "type": "int",
+                    "default": 1000,
+                },
+                "user_gid": {
+                    "description": "User GID for container process",
+                    "type": "int",
+                    "default": 1000,
+                },
+                "container_loglevel": {
+                    "description": "Container log level",
+                    "type": "enum",
+                    "options": ["debug", "info", "warn", "error"],
+                    "default": "info",
+                },
+                "restart_policy": {
+                    "description": "Container restart policy",
+                    "type": "enum",
+                    "options": ["unless-stopped", "always", "on-failure", "no"],
+                    "default": "unless-stopped",
+                },
+            },
+        },
+        "network": {
+            "title": "Network",
+            "vars": {
+                "network_mode": {
+                    "description": "Docker network mode",
+                    "type": "enum",
+                    "options": ["bridge", "host", "macvlan"],
+                    "default": "bridge",
+                },
+                "network_name": {
+                    "description": "Docker network name",
+                    "type": "str",
+                    "default": "bridge",
+                    "needs": "network_mode=bridge,macvlan",
+                },
+                "network_external": {
+                    "description": "Use existing Docker network (external)",
+                    "type": "bool",
+                    "default": False,
+                    "needs": "network_mode=bridge,macvlan",
+                },
+                "network_macvlan_ipv4_address": {
+                    "description": "Static IP address for container",
+                    "type": "str",
+                    "default": "192.168.1.253",
+                    "needs": "network_mode=macvlan",
+                },
+                "network_macvlan_parent_interface": {
+                    "description": "Host network interface name",
+                    "type": "str",
+                    "default": "eth0",
+                    "needs": "network_mode=macvlan",
+                },
+                "network_macvlan_subnet": {
+                    "description": "Network subnet in CIDR notation",
+                    "type": "str",
+                    "default": "192.168.1.0/24",
+                    "needs": "network_mode=macvlan",
+                },
+                "network_macvlan_gateway": {
+                    "description": "Network gateway IP address",
+                    "type": "str",
+                    "default": "192.168.1.1",
+                    "needs": "network_mode=macvlan",
+                },
+            },
+        },
+        "ports": {
+            "title": "Ports",
+            "toggle": "ports_enabled",
+            "needs": "network_mode=bridge",
+            "vars": {
+            },
+        },
+        "traefik": {
+            "title": "Traefik",
+            "toggle": "traefik_enabled",
+            "needs": "network_mode=bridge",
+            "description": "Traefik routes external traffic to your service.",
+            "vars": {
+                "traefik_enabled": {
+                    "description": "Enable Traefik reverse proxy integration",
+                    "type": "bool",
+                    "default": False,
+                },
+                "traefik_network": {
+                    "description": "Traefik network name",
+                    "type": "str",
+                    "default": "traefik",
+                },
+                "traefik_host": {
+                    "description": "Domain name for your service (e.g., app.example.com)",
+                    "type": "str",
+                },
+                "traefik_entrypoint": {
+                    "description": "HTTP entrypoint (non-TLS)",
+                    "type": "str",
+                    "default": "web",
+                },
+            },
+        },
+        "traefik_tls": {
+            "title": "Traefik TLS/SSL",
+            "toggle": "traefik_tls_enabled",
+            "needs": "traefik_enabled=true;network_mode=bridge",
+            "description": "Enable HTTPS/TLS for Traefik with certificate management.",
+            "vars": {
+                "traefik_tls_enabled": {
+                    "description": "Enable HTTPS/TLS",
+                    "type": "bool",
+                    "default": True,
+                },
+                "traefik_tls_entrypoint": {
+                    "description": "TLS entrypoint",
+                    "type": "str",
+                    "default": "websecure",
+                },
+                "traefik_tls_certresolver": {
+                    "description": "Traefik certificate resolver name",
+                    "type": "str",
+                    "default": "cloudflare",
+                },
+            },
+        },
+        "swarm": {
+            "title": "Docker Swarm",
+            "needs": "network_mode=bridge",
+            "toggle": "swarm_enabled",
+            "description": "Deploy service in Docker Swarm mode.",
+            "vars": {
+                "swarm_enabled": {
+                    "description": "Enable Docker Swarm mode",
+                    "type": "bool",
+                    "default": False,
+                },
+                "swarm_placement_mode": {
+                    "description": "Swarm placement mode",
+                    "type": "enum",
+                    "options": ["replicated", "global"],
+                    "default": "replicated",
+                },
+                "swarm_replicas": {
+                    "description": "Number of replicas",
+                    "type": "int",
+                    "default": 1,
+                    "needs": "swarm_placement_mode=replicated",
+                },
+                "swarm_placement_host": {
+                    "description": "Target hostname for placement constraint",
+                    "type": "str",
+                    "default": "",
+                    "optional": True,
+                    "needs": "swarm_placement_mode=replicated",
+                    "extra": "Constrains service to run on specific node by hostname",
+                },
+                "swarm_volume_mode": {
+                    "description": "Swarm volume storage backend",
+                    "type": "enum",
+                    "options": ["local", "mount", "nfs"],
+                    "default": "local",
+                    "extra": "WARNING: 'local' only works on single-node deployments!",
+                },
+                "swarm_volume_mount_path": {
+                    "description": "Host path for bind mount",
+                    "type": "str",
+                    "default": "/mnt/storage",
+                    "needs": "swarm_volume_mode=mount",
+                    "extra": "Useful for shared/replicated storage",
+                },
+                "swarm_volume_nfs_server": {
+                    "description": "NFS server address",
+                    "type": "str",
+                    "default": "192.168.1.1",
+                    "needs": "swarm_volume_mode=nfs",
+                    "extra": "IP address or hostname of NFS server",
+                },
+                "swarm_volume_nfs_path": {
+                    "description": "NFS export path",
+                    "type": "str",
+                    "default": "/export",
+                    "needs": "swarm_volume_mode=nfs",
+                    "extra": "Path to NFS export on the server",
+                },
+                "swarm_volume_nfs_options": {
+                    "description": "NFS mount options",
+                    "type": "str",
+                    "default": "rw,nolock,soft",
+                    "needs": "swarm_volume_mode=nfs",
+                    "extra": "Comma-separated NFS mount options",
+                },
+            },
+        },
+        "database": {
+            "title": "Database",
+            "toggle": "database_enabled",
+            "vars": {
+                "database_type": {
+                    "description": "Database type",
+                    "type": "enum",
+                    "options": ["default", "sqlite", "postgres", "mysql"],
+                    "default": "default",
+                },
+                "database_external": {
+                    "description": "Use an external database server?",
+                    "extra": "skips creation of internal database container",
+                    "type": "bool",
+                    "default": False,
+                },
+                "database_host": {
+                    "description": "Database host",
+                    "type": "str",
+                    "default": "database",
+                },
+                "database_port": {"description": "Database port", "type": "int"},
+                "database_name": {
+                    "description": "Database name",
+                    "type": "str",
+                },
+                "database_user": {
+                    "description": "Database user",
+                    "type": "str",
+                },
+                "database_password": {
+                    "description": "Database password",
+                    "type": "str",
+                    "default": "",
+                    "sensitive": True,
+                    "autogenerated": True,
+                },
+            },
+        },
+        "email": {
+            "title": "Email Server",
+            "toggle": "email_enabled",
+            "description": "Configure email server for notifications and user management.",
+            "vars": {
+                "email_enabled": {
+                    "description": "Enable email server configuration",
+                    "type": "bool",
+                    "default": False,
+                },
+                "email_host": {
+                    "description": "SMTP server hostname",
+                    "type": "str",
+                },
+                "email_port": {
+                    "description": "SMTP server port",
+                    "type": "int",
+                    "default": 587,
+                },
+                "email_username": {
+                    "description": "SMTP username",
+                    "type": "str",
+                },
+                "email_password": {
+                    "description": "SMTP password",
+                    "type": "str",
+                    "sensitive": True,
+                },
+                "email_from": {
+                    "description": "From email address",
+                    "type": "str",
+                },
+                "email_use_tls": {
+                    "description": "Use TLS encryption",
+                    "type": "bool",
+                    "default": True,
+                },
+                "email_use_ssl": {
+                    "description": "Use SSL encryption",
+                    "type": "bool",
+                    "default": False,
+                },
+            },
+        },
+        "authentik": {
+            "title": "Authentik SSO",
+            "toggle": "authentik_enabled",
+            "description": "Integrate with Authentik for Single Sign-On authentication.",
+            "vars": {
+                "authentik_enabled": {
+                    "description": "Enable Authentik SSO integration",
+                    "type": "bool",
+                    "default": False,
+                },
+                "authentik_url": {
+                    "description": "Authentik base URL (e.g., https://auth.example.com)",
+                    "type": "str",
+                },
+                "authentik_slug": {
+                    "description": "Authentik application slug",
+                    "type": "str",
+                },
+                "authentik_client_id": {
+                    "description": "OAuth client ID from Authentik provider",
+                    "type": "str",
+                },
+                "authentik_client_secret": {
+                    "description": "OAuth client secret from Authentik provider",
+                    "type": "str",
+                    "sensitive": True,
+                },
+            },
+        },
+    }
+)

+ 1 - 1
library/compose/gitlab/template.yaml

@@ -87,7 +87,7 @@ spec:
       registry_external_url:
         type: str
         description: External URL for Container Registry
-        default: http://localhost:5678
+        default: http://localhost:2424
   advanced:
     title: Advanced Settings
     description: Performance tuning and advanced configuration options

+ 23 - 0
library/compose/pihole/.env.pihole.j2

@@ -0,0 +1,23 @@
+# Pi-hole Configuration
+# Contains application configuration
+
+# Timezone
+TZ={{ container_timezone }}
+
+# User and Group IDs
+PIHOLE_UID={{ user_uid }}
+PIHOLE_GID={{ user_gid }}
+
+# Web Interface Admin Password
+{% if swarm_enabled %}
+# In swarm mode, password is loaded from Docker secret (use secret name, not path)
+WEBPASSWORD_FILE={{ webpassword_secret_name }}
+{% else %}
+# In compose mode, password is stored directly
+FTLCONF_webserver_api_password={{ webpassword }}
+{% endif %}
+
+# DNS Listening Mode
+{% if network_mode == 'bridge' %}
+FTLCONF_dns_listeningMode=all
+{% endif %}

+ 1 - 0
library/compose/pihole/.env.secret.j2

@@ -0,0 +1 @@
+{{ webpassword }}

+ 158 - 33
library/compose/pihole/compose.yaml.j2

@@ -1,55 +1,180 @@
 services:
-  {{ service_name | default('pihole') }}:
-    container_name: {{ container_name | default('pihole') }}
-    image: docker.io/pihole/pihole:2025.08.0
-    {% if ports_enabled %}
+  {{ service_name }}:
+    {% if not swarm_enabled %}
+    container_name: {{ container_name }}
+    {% endif %}
+    image: docker.io/pihole/pihole:2025.10.2
+    env_file:
+      - .env.pihole
+    {% if network_mode == 'host' %}
+    network_mode: host
+    {% else %}
+    networks:
+      {% if traefik_enabled %}
+      {{ traefik_network }}:
+      {% endif %}
+      {% if network_mode == 'macvlan' %}
+      {{ network_name }}:
+        ipv4_address: {{ network_macvlan_ipv4_address }}
+      {% elif network_mode == 'bridge' %}
+      {{ network_name }}:
+      {% endif %}
+    {% endif %}
+    {% if network_mode not in ['host', 'macvlan'] %}
     ports:
-      - "{{ ports_dns_tcp | default(53) }}:53/tcp"
-      - "{{ ports_dns_udp | default(53) }}:53/udp"
-      - "{{ ports_dhcp | default(67) }}:67/udp"
-      - "{{ ports_http | default(8081) }}:80/tcp"
-      - "{{ ports_https | default(8443) }}:443/tcp"
-    {% endif %}
-    environment:
-      - TZ={{ container_timezone | default('UTC') }}
-      {% if pihole_webpassword %}
-      - FTLCONF_webserver_api_password={{ pihole_webpassword }}
-      {% endif %}
-      - FTLCONF_dns_upstreams={{ pihole_dns_upstreams | default('8.8.8.8;8.8.4.4') }}
+      {% if not traefik_enabled %}
+      {% if swarm_enabled %}
+      - target: 80
+        published: {{ ports_http }}
+        protocol: tcp
+        mode: host
+      - target: 443
+        published: {{ ports_https }}
+        protocol: tcp
+        mode: host
+      {% else %}
+      - "{{ ports_http }}:80/tcp"
+      - "{{ ports_https }}:443/tcp"
+      {% endif %}
+      {% endif %}
+      {% if swarm_enabled %}
+      - target: 53
+        published: {{ ports_dns }}
+        protocol: tcp
+        mode: host
+      - target: 53
+        published: {{ ports_dns }}
+        protocol: udp
+        mode: host
+      - target: 123
+        published: {{ ports_ntp }}
+        protocol: udp
+        mode: host
+      {% else %}
+      - "{{ ports_dns }}:53/tcp"
+      - "{{ ports_dns }}:53/udp"
+      - "{{ ports_ntp }}:123/udp"
+      {% endif %}
+    {% endif %}
     volumes:
+      {% if not swarm_enabled %}
       - config_dnsmasq:/etc/dnsmasq.d
       - config_pihole:/etc/pihole
-    {% if network_enabled %}
-    networks:
-      - {{ network_name | default('bridge') }}
-    {% endif %}
+      {% else %}
+      {% if swarm_volume_mode == 'mount' %}
+      - {{ swarm_volume_mount_path }}/dnsmasq:/etc/dnsmasq.d:rw
+      - {{ swarm_volume_mount_path }}/pihole:/etc/pihole:rw
+      {% elif swarm_volume_mode == 'local' %}
+      - config_dnsmasq:/etc/dnsmasq.d
+      - config_pihole:/etc/pihole
+      {% elif swarm_volume_mode == 'nfs' %}
+      - config_dnsmasq:/etc/dnsmasq.d
+      - config_pihole:/etc/pihole
+      {% endif %}
+      {% endif %}
+    cap_add:
+      - NET_ADMIN
+      - SYS_TIME
+    {% if swarm_enabled %}
+    secrets:
+      - {{ webpassword_secret_name }}
+    deploy:
+      mode: replicated
+      replicas: 1
+      placement:
+        constraints:
+          - node.hostname == {{ swarm_placement_host }}
+      {% if traefik_enabled %}
+      labels:
+        - traefik.enable=true
+        - traefik.http.services.{{ service_name }}-web.loadBalancer.server.port=80
+        - traefik.http.routers.{{ service_name }}-http.service={{ service_name }}-web
+        - traefik.http.routers.{{ service_name }}-http.rule=Host(`{{ traefik_host }}`)
+        - traefik.http.routers.{{ service_name }}-http.entrypoints={{ traefik_entrypoint }}
+        {% if traefik_tls_enabled %}
+        - traefik.http.routers.{{ service_name }}-https.service={{ service_name }}-web
+        - traefik.http.routers.{{ service_name }}-https.rule=Host(`{{ traefik_host }}`)
+        - traefik.http.routers.{{ service_name }}-https.entrypoints={{ traefik_tls_entrypoint }}
+        - traefik.http.routers.{{ service_name }}-https.tls=true
+        - traefik.http.routers.{{ service_name }}-https.tls.certresolver={{ traefik_tls_certresolver }}
+        {% endif %}
+      {% endif %}
+    {% else %}
     {% if traefik_enabled %}
     labels:
       - traefik.enable=true
-      - traefik.http.services.{{ service_name | default('pihole') }}.loadBalancer.server.port=80
-      - traefik.http.routers.{{ service_name | default('pihole') }}-http.service={{ service_name | default('pihole') }}
-      - traefik.http.routers.{{ service_name | default('pihole') }}-http.rule=Host(`{{ traefik_host }}`)
-      - traefik.http.routers.{{ service_name | default('pihole') }}-http.entrypoints={{ traefik_entrypoint | default('web') }}
+      - traefik.http.services.{{ service_name }}-web.loadBalancer.server.port=80
+      - traefik.http.routers.{{ service_name }}-http.service={{ service_name }}-web
+      - traefik.http.routers.{{ service_name }}-http.rule=Host(`{{ traefik_host }}`)
+      - traefik.http.routers.{{ service_name }}-http.entrypoints={{ traefik_entrypoint }}
       {% if traefik_tls_enabled %}
-      - traefik.http.routers.{{ service_name | default('pihole') }}-https.service={{ service_name | default('pihole') }}
-      - traefik.http.routers.{{ service_name | default('pihole') }}-https.rule=Host(`{{ traefik_host }}`)
-      - traefik.http.routers.{{ service_name | default('pihole') }}-https.entrypoints={{ traefik_tls_entrypoint | default('websecure') }}
-      - traefik.http.routers.{{ service_name | default('pihole') }}-https.tls=true
-      - traefik.http.routers.{{ service_name | default('pihole') }}-https.tls.certresolver={{ traefik_tls_certresolver }}
+      - traefik.http.routers.{{ service_name }}-https.service={{ service_name }}-web
+      - traefik.http.routers.{{ service_name }}-https.rule=Host(`{{ traefik_host }}`)
+      - traefik.http.routers.{{ service_name }}-https.entrypoints={{ traefik_tls_entrypoint }}
+      - traefik.http.routers.{{ service_name }}-https.tls=true
+      - traefik.http.routers.{{ service_name }}-https.tls.certresolver={{ traefik_tls_certresolver }}
       {% endif %}
     {% endif %}
-    restart: {{ restart_policy | default('unless-stopped') }}
+    restart: {{ restart_policy }}
+    {% endif %}
 
+{% if swarm_enabled %}
+{% if swarm_volume_mode in ['local', 'nfs'] %}
 volumes:
   config_dnsmasq:
+    {% if swarm_volume_mode == 'nfs' %}
     driver: local
+    driver_opts:
+      type: nfs
+      o: addr={{ swarm_volume_nfs_server }},{{ swarm_volume_nfs_options }}
+      device: ":{{ swarm_volume_nfs_path }}/dnsmasq"
+    {% endif %}
   config_pihole:
+    {% if swarm_volume_mode == 'nfs' %}
     driver: local
+    driver_opts:
+      type: nfs
+      o: addr={{ swarm_volume_nfs_server }},{{ swarm_volume_nfs_options }}
+      device: ":{{ swarm_volume_nfs_path }}/pihole"
+    {% endif %}
+{% endif %}
 
-{% if network_enabled %}
+secrets:
+  {{ webpassword_secret_name }}:
+    file: ./.env.secret
+{% else %}
+volumes:
+  config_dnsmasq:
+    driver: local
+  config_pihole:
+    driver: local
+{% endif %}
+
+{% if network_mode != 'host' %}
 networks:
-  {{ network_name | default('bridge') }}:
-    {% if network_external %}
+  {% if network_mode == 'macvlan' %}
+  {{ network_name }}:
+    driver: macvlan
+    driver_opts:
+      parent: {{ network_macvlan_parent_interface }}
+    ipam:
+      config:
+        - subnet: {{ network_macvlan_subnet }}
+          gateway: {{ network_macvlan_gateway }}
+  {% elif network_mode == 'bridge' and network_external %}
+  {{ network_name }}:
     external: true
+  {% elif network_mode == 'bridge' and not network_external %}
+  {{ network_name }}:
+    {% if swarm_enabled %}
+    driver: overlay
+    attachable: true
+    {% else %}
+    driver: bridge
     {% endif %}
+  {% endif %}
+  {% if traefik_enabled %}
+  {{ traefik_network }}:
+    external: true
+  {% endif %}
 {% endif %}

+ 73 - 34
library/compose/pihole/template.yaml

@@ -1,11 +1,12 @@
 ---
 kind: compose
+schema: "1.1"
 metadata:
   name: Pihole
   description: >
-    Pi-hole is a network-wide ad blocker that acts as a DNS sinkhole, blocking ads and trackers for all devices on your network.
-    It improves browsing speed, enhances privacy, and reduces bandwidth usage by filtering out unwanted content at the DNS level.
-    Pi-hole can be easily integrated with existing routers and supports custom blocklists and whitelists.
+    Network-wide advertisement and internet tracker blocking application that functions as a DNS blackhole.
+    Provides DNS-level content filtering for all network devices, improving browsing performance, privacy, and security.
+    Supports custom blocklists, whitelists, and seamless integration with existing network infrastructure.
 
 
     Project: https://pi-hole.net/
@@ -13,52 +14,90 @@ metadata:
     Documentation: https://docs.pi-hole.net/
 
     GitHub: https://github.com/pi-hole/pi-hole
-  version: 2025.08.0
+  version: 2025.10.2
   author: Christian Lempa
-  date: '2025-09-28'
+  date: '2025-10-28'
   tags:
     - dns
     - ad-blocking
-  draft: true
+  next_steps: |
+    {% if swarm_enabled -%}
+    1. Deploy to Docker Swarm:
+       docker stack deploy -c compose.yaml pihole
+    2. Access the Web Interface:
+       {%- if traefik_enabled == True -%}https://{{ traefik_host }}/admin
+       {%- else -%}https://<your-swarm-node-ip>:{{ ports_https }}/admin{%- endif %}
+    3. Configure devices to use swarm node IP as DNS
+    {% else -%}
+    1. Deploy to Docker:
+       docker compose up -d
+    2. Access the Web Interface:
+       {%- if traefik_enabled == True -%}https://{{ traefik_host }}/admin
+       {%- else -%}https://<your-docker-host-ip>:{{ ports_https }}/admin{%- endif %}
+    3. Configure devices to use docker host IP as DNS
+    {% endif -%}
 spec:
+  general:
+    vars:
+      service_name:
+        default: "pihole"
+      container_name:
+        default: "pihole"
+  admin_settings:
+    description: "Admin Pi-hole Settings"
+    required: true
+    vars:
+      webpassword:
+        description: "Web interface admin password"
+        type: str
+        sensitive: true
+        default: ""
+        autogenerated: true
+  traefik:
+    vars:
+      traefik_enabled:
+        needs: "network_mode=bridge"
+      traefik_host:
+        default: "pihole.home.arpa"
+  network:
+    vars:
+      network_mode:
+        extra: >
+          If you need DHCP functionality, use 'host' or 'macvlan' mode.
+          NOTE: Swarm only supports 'bridge' mode!"
+      network_name:
+        default: "pihole_network"
   ports:
     vars:
       ports_http:
-        description: "Host port for HTTP web interface (80)"
+        description: "External HTTP port"
         type: int
         default: 8080
+        needs: ["traefik_enabled=false", "network_mode=bridge"]
       ports_https:
-        description: "Host port for HTTPS web interface (443)"
+        description: "External HTTPS port"
         type: int
         default: 8443
-      ports_dns_udp:
-        description: "Host port for DNS over UDP (53)"
+        needs: ["traefik_enabled=false", "network_mode=bridge"]
+      ports_dns:
+        description: "External DNS port"
         type: int
         default: 53
-      ports_dns_tcp:
-        description: "Host port for DNS over TCP (53)"
+        needs: "network_mode=bridge"
+      ports_ntp:
+        description: "External NTP port"
         type: int
-        default: 53
-      ports_dhcp:
-        description: "Host port for DHCP (67)"
-        type: int
-        default: 67
-  pihole:
-    description: "Pi-hole configuration settings"
-    required: true
-    vars:
-      pihole_webpassword:
-        description: "Web interface admin password"
-        type: str
-        sensitive: true
-        default: "changeme"
-      pihole_dns_upstreams:
-        description: "Upstream DNS servers (semicolon separated)"
-        type: str
-        default: "1.1.1.1;1.0.0.1"
-  general:
+        default: 123
+        needs: "network_mode=bridge"
+  swarm:
     vars:
-      pihole_version:
+      swarm_enabled:
+        needs: "network_mode=bridge"
+      swarm_placement_host:
+        required: true
+        optional: false
+        needs: null
+      webpassword_secret_name:
+        description: "Docker Swarm secret name for admin password"
         type: str
-        description: Pihole version
-        default: latest
+        default: "pihole_webpassword"

+ 3 - 19
library/compose/traefik/.env.j2

@@ -1,21 +1,5 @@
 # Traefik Environment Variables
-# This file contains sensitive credentials for ACME DNS providers
-
-{% if traefik_tls_enabled %}
-# ACME DNS Challenge Configuration
-{% if traefik_tls_acme_provider == "cloudflare" %}
-# Cloudflare API Token
-# Required permissions: Zone:DNS:Edit
-# Create token at: https://dash.cloudflare.com/profile/api-tokens
-{% if swarm_enabled %}
-# Swarm mode: API token read from Docker secret
-CF_DNS_API_TOKEN_FILE=/run/secrets/{{ traefik_tls_acme_secret_name }}
-{% else %}
-# Standard mode: API token from environment variable
-CF_API_TOKEN={{ traefik_tls_acme_token }}
-{% endif %}
-{% endif %}
-
-{% else %}
-# ACME/TLS is disabled - no DNS provider credentials needed
+# Reference to secret file containing API token
+{% if traefik_tls_enabled and traefik_tls_acme_provider == "cloudflare" %}
+CF_API_TOKEN_FILE=/.env.secret
 {% endif %}

+ 1 - 0
library/compose/traefik/.env.secret.j2

@@ -0,0 +1 @@
+{% if traefik_tls_enabled %}{{ traefik_tls_acme_token }}{% endif %}

+ 69 - 7
library/compose/traefik/compose.yaml.j2

@@ -1,6 +1,6 @@
 services:
   {{ service_name }}:
-    image: docker.io/library/traefik:v3.2
+    image: docker.io/library/traefik:v3.5.3
     {% if not swarm_enabled %}
     container_name: {{ container_name }}
     {% endif %}
@@ -14,14 +14,44 @@ services:
     {% endif %}
     volumes:
       - /var/run/docker.sock:/var/run/docker.sock:ro
+      {% if not swarm_enabled %}
       - ./config/:/etc/traefik/:ro
       - ./certs/:/var/traefik/certs/:rw
-    {% if traefik_tls_enabled %}
+      {% else %}
+      {% if swarm_volume_mode == 'mount' %}
+      - {{ swarm_volume_mount_path }}:/var/traefik/certs/:rw
+      {% elif swarm_volume_mode == 'local' %}
+      - traefik_certs:/var/traefik/certs/:rw
+      {% elif swarm_volume_mode == 'nfs' %}
+      - traefik_certs:/var/traefik/certs/:rw
+      {% endif %}
+      {% endif %}
+      {% if traefik_tls_enabled %}
+      {% if not swarm_enabled %}
+      - ./.env.secret:/.env.secret:ro
+      {% endif %}
     env_file:
-      - ./.env.
+      - ./.env
+    {% endif %}
+    {% if swarm_enabled %}
+    configs:
+      - source: traefik_config
+        target: /etc/traefik/traefik.yaml
+      - source: traefik_middlewares
+        target: /etc/traefik/files/middlewares.yaml
+      - source: traefik_tls
+        target: /etc/traefik/files/tls.yaml
+      - source: traefik_external_services
+        target: /etc/traefik/files/external-services.yaml
     {% endif %}
     environment:
       - TZ={{ container_timezone }}
+    healthcheck:
+      test: ["CMD", "traefik", "healthcheck", "--ping"]
+      interval: 30s
+      timeout: 5s
+      retries: 3
+      start_period: 10s
     {% if network_enabled %}
     networks:
       - {{ network_name }}
@@ -29,7 +59,9 @@ services:
     {% if swarm_enabled %}
     {% if traefik_tls_enabled %}
     secrets:
-      - {{ traefik_tls_acme_secret_name }}
+      - source: {{ traefik_tls_acme_secret_name }}
+        target: /.env.secret
+        mode: 0400
     {% endif %}
     deploy:
       mode: {{ swarm_placement_mode }}
@@ -39,16 +71,40 @@ services:
       {% if swarm_placement_host %}
       placement:
         constraints:
-          - {{ swarm_placement_host }}
+          - node.hostname == {{ swarm_placement_host }}
       {% endif %}
     {% else %}
     restart: {{ restart_policy }}
     {% endif %}
 
-{% if swarm_enabled and traefik_tls_enabled %}
+{% if swarm_enabled %}
+{% if swarm_volume_mode in ['local', 'nfs'] %}
+volumes:
+  traefik_certs:
+    {% if swarm_volume_mode == 'nfs' %}
+    driver: local
+    driver_opts:
+      type: nfs
+      o: addr={{ swarm_volume_nfs_server }},{{ swarm_volume_nfs_options }}
+      device: ":{{ swarm_volume_nfs_path }}"
+    {% endif %}
+{% endif %}
+
+configs:
+  traefik_config:
+    file: ./config/traefik.yaml
+  traefik_middlewares:
+    file: ./config/files/middlewares.yaml
+  traefik_tls:
+    file: ./config/files/tls.yaml
+  traefik_external_services:
+    file: ./config/files/external-services.yaml
+
+{% if traefik_tls_enabled %}
 secrets:
   {{ traefik_tls_acme_secret_name }}:
-    external: true
+    file: ./.env.secret
+{% endif %}
 {% endif %}
 
 {% if network_enabled %}
@@ -57,6 +113,12 @@ networks:
     {% if network_external %}
     external: true
     {% else %}
+    {% if swarm_enabled %}
+    driver: overlay
+    attachable: true
+    {% else %}
     driver: bridge
     {% endif %}
+    name: {{ network_name }}
+    {% endif %}
 {% endif %}

+ 29 - 17
library/compose/traefik/template.yaml

@@ -1,5 +1,6 @@
 ---
 kind: compose
+schema: "1.1"
 metadata:
   name: Traefik
   description: >
@@ -10,7 +11,7 @@ metadata:
     Project: https://traefik.io/
 
     Documentation: https://doc.traefik.io/traefik/
-  version: v3.2
+  version: v3.5.3
   author: "Christian Lempa"
   date: "2025-10-02"
   tags:
@@ -74,10 +75,21 @@ spec:
     title: "Traefik Settings"
     description: "Configure Traefik as a reverse proxy"
     required: true
+    vars:
+      traefik_entrypoint:
+        type: "str"
+        description: "HTTP entrypoint name (non-TLS)"
+        default: "web"
+        extra: "Standard HTTP traffic on port 80"
+      traefik_dashboard_enabled:
+        type: "bool"
+        description: "Enable Traefik dashboard (insecure mode)"
+        default: false
+        extra: "WARNING: Don't use in production! Exposes dashboard on port 8080"
   traefik_tls:
     title: "Traefik TLS Settings"
     description: "Configure TLS/SSL with Let's Encrypt ACME"
-    needs: "traefik"
+    needs: null
     vars:
       traefik_tls_enabled:
         type: "bool"
@@ -93,9 +105,8 @@ spec:
       traefik_tls_acme_token:
         type: "str"
         description: "DNS provider API token"
-        default: "your-api-token-here"
         sensitive: true
-        extra: "For Cloudflare, create an API token with Zone:DNS:Edit permissions"
+        extra: "For Cloudflare, create an API token with Zone:DNS:Edit permissions. Leave empty to use Docker Swarm secrets."
       traefik_tls_acme_secret_name:
         type: "str"
         description: "Docker Swarm secret name for API token (swarm mode only)"
@@ -113,25 +124,26 @@ spec:
   ports:
     toggle: "ports_enabled"
     vars:
-      traefik_dashboard_enabled:
-        type: "bool"
-        description: "Enable Traefik dashboard (don't use in production)"
-        default: false
-        extra: "Exposes dashboard on port 8080 in insecure mode"
+      traefik_http_port:
+        type: "int"
+        description: "HTTP port (external)"
+        default: 80
+        extra: "Maps to entrypoint 'web' (port 80)"
+      traefik_https_port:
+        type: "int"
+        description: "HTTPS port (external)"
+        default: 443
+        extra: "Maps to entrypoint 'websecure' (port 443)"
   network:
     vars:
       network_enabled:
         default: true
+      network_mode:
+        default: "bridge"
       network_name:
         default: "proxy"
-  swarm:
-    vars:
-      swarm_placement_mode:
-        default: "global"
-      swarm_placement_host:
-        type: str
-        description: "Placement constraint for node selection (optional)"
-        default: ""
+      network_external:
+        default: false
   authentik:
     title: Authentik Middleware
     description: Enable Authentik SSO integration for Traefik

+ 1 - 1
pyproject.toml

@@ -4,7 +4,7 @@ build-backend = "setuptools.build_meta"
 
 [project]
 name = "boilerplates"
-version = "0.0.0"  # NOTE: Placeholder - will be overwritten by release script
+version = "0.0.7"
 description = "CLI tool for managing infrastructure boilerplates"
 readme = "README.md"
 requires-python = ">=3.9"

+ 2 - 2
requirements.txt

@@ -1,5 +1,5 @@
 typer==0.19.2
-rich==14.1.0
-PyYAML==6.0.2
+rich==14.2.0
+PyYAML==6.0.3
 python-frontmatter==1.1.0
 Jinja2==3.1.6

Niektóre pliki nie zostały wyświetlone z powodu dużej ilości zmienionych plików