瀏覽代碼

chore(config): align code formatting with PEP 8 standards

- Update .editorconfig to include JS/TS file rules (2 spaces)
- Update AGENTS.md to use ruff instead of pylint
- Remove redundant indentation standards (handled by .editorconfig)
- Run ruff check --fix to remove unused imports and fix code issues
- Run ruff format to apply PEP 8 formatting (4 spaces for Python)
- Remove local config display message (now debug-level only)
xcad 3 月之前
父節點
當前提交
9990395193
共有 21 個文件被更改,包括 5887 次插入5112 次删除
  1. 3 42
      .editorconfig
  2. 1 2
      AGENTS.md
  3. 181 165
      cli/__main__.py
  4. 808 746
      cli/core/collection.py
  5. 271 201
      cli/core/config.py
  6. 340 216
      cli/core/display.py
  7. 32 14
      cli/core/exceptions.py
  8. 362 307
      cli/core/library.py
  9. 1263 1024
      cli/core/module.py
  10. 266 219
      cli/core/prompt.py
  11. 31 23
      cli/core/registry.py
  12. 141 99
      cli/core/repo.py
  13. 114 106
      cli/core/section.py
  14. 888 771
      cli/core/template.py
  15. 62 56
      cli/core/validators.py
  16. 429 409
      cli/core/variable.py
  17. 88 88
      cli/core/version.py
  18. 7 7
      cli/modules/compose/__init__.py
  19. 263 267
      cli/modules/compose/spec_v1_0.py
  20. 337 341
      cli/modules/compose/spec_v1_1.py
  21. 0 9
      pyproject.toml

+ 3 - 42
.editorconfig

@@ -9,57 +9,18 @@ indent_style = space
 insert_final_newline = true
 insert_final_newline = true
 trim_trailing_whitespace = true
 trim_trailing_whitespace = true
 
 
-[/docker-compose/nginx/**/*.conf]
-indent_size = 2
-
-[/docker-compose/wazuh/**/*.conf]
+[*.json]
 indent_size = 2
 indent_size = 2
 
 
-[*.css]
-indent_size = 2
-
-[{*.go,go.mod}]
-indent_style = tab
-indent_size = unset
-
-[*.hcl]
-indent_size = unset
-
-[*{.min,.min.*,-min}.js]
-charset = unset
-indent_size = unset
-indent_style = unset
-insert_final_newline = unset
-max_line_length = off
-
-[*.json]
+[*.{js,jsx,ts,tsx}]
 indent_size = 2
 indent_size = 2
 
 
 [*.md]
 [*.md]
 indent_size = unset
 indent_size = unset
 trim_trailing_whitespace = false
 trim_trailing_whitespace = false
 
 
-[*.nix]
-indent_size = 2
-
 [*.py]
 [*.py]
-indent_size = 2
-
-[*.tf]
-indent_size = unset
-
-[/packer/**/http/user-data]
-indent_size = 2
+indent_size = 4
 
 
 [{*.{yaml,yml},.yamllint}]
 [{*.{yaml,yml},.yamllint}]
 indent_size = 2
 indent_size = 2
-
-[*.xml]
-indent_size = 2
-
-[Makefile]
-indent_style = tab
-indent_size = unset
-
-[Vagrantfile]
-indent_size = 2

+ 1 - 2
AGENTS.md

@@ -21,8 +21,7 @@ python3 -m cli --log-level DEBUG compose list
 
 
 Should **always** happen before pushing anything to the repository.
 Should **always** happen before pushing anything to the repository.
 
 
-- Use `yamllint` for YAML files and `pylint` for Python code.
-- Use `2` spaces for YAML and Python indentation.
+- Use `yamllint` for YAML files and `ruff` for Python code.
 
 
 ### Project Management and Git
 ### Project Management and Git
 
 

+ 181 - 165
cli/__main__.py

@@ -3,6 +3,7 @@
 Main entry point for the Boilerplates CLI application.
 Main entry point for the Boilerplates CLI application.
 This file serves as the primary executable when running the CLI.
 This file serves as the primary executable when running the CLI.
 """
 """
+
 from __future__ import annotations
 from __future__ import annotations
 
 
 import importlib
 import importlib
@@ -11,7 +12,7 @@ import pkgutil
 import sys
 import sys
 from pathlib import Path
 from pathlib import Path
 from typing import Optional
 from typing import Optional
-from typer import Typer, Context, Option
+from typer import Typer, Option
 from rich.console import Console
 from rich.console import Console
 import cli.modules
 import cli.modules
 from cli.core.registry import registry
 from cli.core.registry import registry
@@ -20,185 +21,200 @@ from cli import __version__
 # Using standard Python exceptions instead of custom ones
 # Using standard Python exceptions instead of custom ones
 
 
 app = Typer(
 app = Typer(
-  help="CLI tool for managing infrastructure boilerplates.\n\n[dim]Easily generate, customize, and deploy templates for Docker Compose, Terraform, Kubernetes, and more.\n\n [white]Made with 💜 by [bold]Christian Lempa[/bold]",
-  add_completion=True,
-  rich_markup_mode="rich",
+    help="CLI tool for managing infrastructure boilerplates.\n\n[dim]Easily generate, customize, and deploy templates for Docker Compose, Terraform, Kubernetes, and more.\n\n [white]Made with 💜 by [bold]Christian Lempa[/bold]",
+    add_completion=True,
+    rich_markup_mode="rich",
 )
 )
 console = Console()
 console = Console()
 
 
+
 def setup_logging(log_level: str = "WARNING") -> None:
 def setup_logging(log_level: str = "WARNING") -> None:
-  """Configure the logging system with the specified log level.
-  
-  Args:
-      log_level: The logging level (DEBUG, INFO, WARNING, ERROR, CRITICAL)
-  
-  Raises:
-      ValueError: If the log level is invalid
-      RuntimeError: If logging configuration fails
-  """
-  numeric_level = getattr(logging, log_level.upper(), None)
-  if not isinstance(numeric_level, int):
-    raise ValueError(
-      f"Invalid log level '{log_level}'. Valid levels: DEBUG, INFO, WARNING, ERROR, CRITICAL"
-    )
-  
-  try:
-    logging.basicConfig(
-      level=numeric_level,
-      format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
-      datefmt='%Y-%m-%d %H:%M:%S'
-    )
+    """Configure the logging system with the specified log level.
+
+    Args:
+        log_level: The logging level (DEBUG, INFO, WARNING, ERROR, CRITICAL)
+
+    Raises:
+        ValueError: If the log level is invalid
+        RuntimeError: If logging configuration fails
+    """
+    numeric_level = getattr(logging, log_level.upper(), None)
+    if not isinstance(numeric_level, int):
+        raise ValueError(
+            f"Invalid log level '{log_level}'. Valid levels: DEBUG, INFO, WARNING, ERROR, CRITICAL"
+        )
+
+    try:
+        logging.basicConfig(
+            level=numeric_level,
+            format="%(asctime)s - %(name)s - %(levelname)s - %(message)s",
+            datefmt="%Y-%m-%d %H:%M:%S",
+        )
+
+        logger = logging.getLogger(__name__)
+        logger.setLevel(numeric_level)
+    except Exception as e:
+        raise RuntimeError(f"Failed to configure logging: {e}")
 
 
-    logger = logging.getLogger(__name__)
-    logger.setLevel(numeric_level)
-  except Exception as e:
-    raise RuntimeError(f"Failed to configure logging: {e}")
 
 
 @app.callback(invoke_without_command=True)
 @app.callback(invoke_without_command=True)
 def main(
 def main(
-  version: Optional[bool] = Option(
-    None,
-    "--version",
-    "-v",
-    help="Show the application version and exit.",
-    is_flag=True,
-    callback=lambda v: console.print(f"boilerplates version {__version__}") or sys.exit(0) if v else None,
-    is_eager=True,
-  ),
-  log_level: Optional[str] = Option(
-    None,
-    "--log-level",
-    help="Set the logging level (DEBUG, INFO, WARNING, ERROR, CRITICAL). If omitted, logging is disabled."
-  )
+    version: Optional[bool] = Option(
+        None,
+        "--version",
+        "-v",
+        help="Show the application version and exit.",
+        is_flag=True,
+        callback=lambda v: console.print(f"boilerplates version {__version__}")
+        or sys.exit(0)
+        if v
+        else None,
+        is_eager=True,
+    ),
+    log_level: Optional[str] = Option(
+        None,
+        "--log-level",
+        help="Set the logging level (DEBUG, INFO, WARNING, ERROR, CRITICAL). If omitted, logging is disabled.",
+    ),
 ) -> None:
 ) -> None:
-  """CLI tool for managing infrastructure boilerplates."""
-  # Disable logging by default; only enable when user provides --log-level
-  if log_level:
-    # Re-enable logging and configure
-    logging.disable(logging.NOTSET)
-    setup_logging(log_level)
-  else:
-    # Silence all logging (including third-party) unless user explicitly requests it
-    logging.disable(logging.CRITICAL)
-  
-  # Get context without type annotation (compatible with all Typer versions)
-  import click
-  ctx = click.get_current_context()
-  
-  # Store log level in context for potential use by other commands
-  ctx.ensure_object(dict)
-  ctx.obj['log_level'] = log_level
-  
-  # Check for local config.yaml and show indicator
-  local_config = Path.cwd() / "config.yaml"
-  if local_config.exists() and local_config.is_file():
-    console.print(f"[dim]→ Using local config: config.yaml[/dim]")
-  
-  # If no subcommand is provided, show help and friendly intro
-  if ctx.invoked_subcommand is None:
-    console.print(ctx.get_help())
-    sys.exit(0)
+    """CLI tool for managing infrastructure boilerplates."""
+    # Disable logging by default; only enable when user provides --log-level
+    if log_level:
+        # Re-enable logging and configure
+        logging.disable(logging.NOTSET)
+        setup_logging(log_level)
+    else:
+        # Silence all logging (including third-party) unless user explicitly requests it
+        logging.disable(logging.CRITICAL)
+
+    # Get context without type annotation (compatible with all Typer versions)
+    import click
+
+    ctx = click.get_current_context()
+
+    # Store log level in context for potential use by other commands
+    ctx.ensure_object(dict)
+    ctx.obj["log_level"] = log_level
+
+    # If no subcommand is provided, show help and friendly intro
+    if ctx.invoked_subcommand is None:
+        console.print(ctx.get_help())
+        sys.exit(0)
+
 
 
 def init_app() -> None:
 def init_app() -> None:
-  """Initialize the application by discovering and registering modules.
-  
-  Raises:
-      ImportError: If critical module import operations fail
-      RuntimeError: If application initialization fails
-  """
-  logger = logging.getLogger(__name__)
-  failed_imports = []
-  failed_registrations = []
-  
-  try:
-    # Auto-discover and import all modules
-    modules_path = Path(cli.modules.__file__).parent
-    logger.debug(f"Discovering modules in {modules_path}")
-    
-    for finder, name, ispkg in pkgutil.iter_modules([str(modules_path)]):
-      # Import both module files and packages (for multi-schema modules)
-      if not name.startswith('_') and name != 'base':
+    """Initialize the application by discovering and registering modules.
+
+    Raises:
+        ImportError: If critical module import operations fail
+        RuntimeError: If application initialization fails
+    """
+    logger = logging.getLogger(__name__)
+    failed_imports = []
+    failed_registrations = []
+
+    try:
+        # Auto-discover and import all modules
+        modules_path = Path(cli.modules.__file__).parent
+        logger.debug(f"Discovering modules in {modules_path}")
+
+        for finder, name, ispkg in pkgutil.iter_modules([str(modules_path)]):
+            # Import both module files and packages (for multi-schema modules)
+            if not name.startswith("_") and name != "base":
+                try:
+                    logger.debug(
+                        f"Importing module: {name} ({'package' if ispkg else 'file'})"
+                    )
+                    importlib.import_module(f"cli.modules.{name}")
+                except ImportError as e:
+                    error_info = f"Import failed for '{name}': {str(e)}"
+                    failed_imports.append(error_info)
+                    logger.warning(error_info)
+                except Exception as e:
+                    error_info = f"Unexpected error importing '{name}': {str(e)}"
+                    failed_imports.append(error_info)
+                    logger.error(error_info)
+
+        # Register core repo command
         try:
         try:
-          logger.debug(f"Importing module: {name} ({'package' if ispkg else 'file'})")
-          importlib.import_module(f"cli.modules.{name}")
-        except ImportError as e:
-          error_info = f"Import failed for '{name}': {str(e)}"
-          failed_imports.append(error_info)
-          logger.warning(error_info)
+            logger.debug("Registering repo command")
+            repo.register_cli(app)
         except Exception as e:
         except Exception as e:
-          error_info = f"Unexpected error importing '{name}': {str(e)}"
-          failed_imports.append(error_info)
-          logger.error(error_info)
-    
-    # Register core repo command
-    try:
-      logger.debug("Registering repo command")
-      repo.register_cli(app)
+            error_info = f"Repo command registration failed: {str(e)}"
+            failed_registrations.append(error_info)
+            logger.warning(error_info)
+
+        # Register template-based modules with app
+        module_classes = list(registry.iter_module_classes())
+        logger.debug(f"Registering {len(module_classes)} template-based modules")
+
+        for name, module_cls in module_classes:
+            try:
+                logger.debug(f"Registering module class: {module_cls.__name__}")
+                module_cls.register_cli(app)
+            except Exception as e:
+                error_info = (
+                    f"Registration failed for '{module_cls.__name__}': {str(e)}"
+                )
+                failed_registrations.append(error_info)
+                # Log warning but don't raise exception for individual module failures
+                logger.warning(error_info)
+                console.print(f"[yellow]Warning:[/yellow] {error_info}")
+
+        # If we have no modules registered at all, that's a critical error
+        if not module_classes and not failed_imports:
+            raise RuntimeError("No modules found to register")
+
+        # Log summary
+        successful_modules = len(module_classes) - len(failed_registrations)
+        logger.info(
+            f"Application initialized: {successful_modules} modules registered successfully"
+        )
+
+        if failed_imports:
+            logger.info(f"Module import failures: {len(failed_imports)}")
+        if failed_registrations:
+            logger.info(f"Module registration failures: {len(failed_registrations)}")
+
     except Exception as e:
     except Exception as e:
-      error_info = f"Repo command registration failed: {str(e)}"
-      failed_registrations.append(error_info)
-      logger.warning(error_info)
-    
-    # Register template-based modules with app
-    module_classes = list(registry.iter_module_classes())
-    logger.debug(f"Registering {len(module_classes)} template-based modules")
-    
-    for name, module_cls in module_classes:
-      try:
-        logger.debug(f"Registering module class: {module_cls.__name__}")
-        module_cls.register_cli(app)
-      except Exception as e:
-        error_info = f"Registration failed for '{module_cls.__name__}': {str(e)}"
-        failed_registrations.append(error_info)
-        # Log warning but don't raise exception for individual module failures
-        logger.warning(error_info)
-        console.print(f"[yellow]Warning:[/yellow] {error_info}")
-    
-    # If we have no modules registered at all, that's a critical error
-    if not module_classes and not failed_imports:
-      raise RuntimeError("No modules found to register")
-    
-    # Log summary
-    successful_modules = len(module_classes) - len(failed_registrations)
-    logger.info(f"Application initialized: {successful_modules} modules registered successfully")
-    
-    if failed_imports:
-      logger.info(f"Module import failures: {len(failed_imports)}")
-    if failed_registrations:
-      logger.info(f"Module registration failures: {len(failed_registrations)}")
-      
-  except Exception as e:
-    error_details = []
-    if failed_imports:
-      error_details.extend(["Import failures:"] + [f"  - {err}" for err in failed_imports])
-    if failed_registrations:
-      error_details.extend(["Registration failures:"] + [f"  - {err}" for err in failed_registrations])
-    
-    details = "\n".join(error_details) if error_details else str(e)
-    raise RuntimeError(f"Application initialization failed: {details}")
+        error_details = []
+        if failed_imports:
+            error_details.extend(
+                ["Import failures:"] + [f"  - {err}" for err in failed_imports]
+            )
+        if failed_registrations:
+            error_details.extend(
+                ["Registration failures:"]
+                + [f"  - {err}" for err in failed_registrations]
+            )
+
+        details = "\n".join(error_details) if error_details else str(e)
+        raise RuntimeError(f"Application initialization failed: {details}")
+
 
 
 def run() -> None:
 def run() -> None:
-  """Run the CLI application."""
-  try:
-    init_app()
-    app()
-  except (ValueError, RuntimeError) as e:
-    # Handle configuration and initialization errors cleanly
-    console.print(f"[bold red]Error:[/bold red] {e}")
-    sys.exit(1)
-  except ImportError as e:
-    # Handle module import errors with detailed info
-    console.print(f"[bold red]Module Import Error:[/bold red] {e}")
-    sys.exit(1)
-  except KeyboardInterrupt:
-    # Handle Ctrl+C gracefully
-    console.print("\n[yellow]Operation cancelled by user[/yellow]")
-    sys.exit(130)
-  except Exception as e:
-    # Handle unexpected errors - show simplified message
-    console.print(f"[bold red]Unexpected error:[/bold red] {e}")
-    console.print("[dim]Use --log-level DEBUG for more details[/dim]")
-    sys.exit(1)
+    """Run the CLI application."""
+    try:
+        init_app()
+        app()
+    except (ValueError, RuntimeError) as e:
+        # Handle configuration and initialization errors cleanly
+        console.print(f"[bold red]Error:[/bold red] {e}")
+        sys.exit(1)
+    except ImportError as e:
+        # Handle module import errors with detailed info
+        console.print(f"[bold red]Module Import Error:[/bold red] {e}")
+        sys.exit(1)
+    except KeyboardInterrupt:
+        # Handle Ctrl+C gracefully
+        console.print("\n[yellow]Operation cancelled by user[/yellow]")
+        sys.exit(130)
+    except Exception as e:
+        # Handle unexpected errors - show simplified message
+        console.print(f"[bold red]Unexpected error:[/bold red] {e}")
+        console.print("[dim]Use --log-level DEBUG for more details[/dim]")
+        sys.exit(1)
+
 
 
 if __name__ == "__main__":
 if __name__ == "__main__":
-  run()
+    run()

+ 808 - 746
cli/core/collection.py

@@ -11,758 +11,820 @@ logger = logging.getLogger(__name__)
 
 
 
 
 class VariableCollection:
 class VariableCollection:
-  """Manages variables grouped by sections and builds Jinja context."""
-
-  def __init__(self, spec: dict[str, Any]) -> None:
-    """Initialize VariableCollection from a specification dictionary.
-    
-    Args:
-        spec: Dictionary containing the complete variable specification structure
-              Expected format (as used in compose.py):
-              {
-                "section_key": {
-                  "title": "Section Title",
-                  "prompt": "Optional prompt text",
-                  "toggle": "optional_toggle_var_name", 
-                  "description": "Optional description",
-                  "vars": {
-                    "var_name": {
-                      "description": "Variable description",
-                      "type": "str",
-                      "default": "default_value",
-                      ...
+    """Manages variables grouped by sections and builds Jinja context."""
+
+    def __init__(self, spec: dict[str, Any]) -> None:
+        """Initialize VariableCollection from a specification dictionary.
+
+        Args:
+            spec: Dictionary containing the complete variable specification structure
+                  Expected format (as used in compose.py):
+                  {
+                    "section_key": {
+                      "title": "Section Title",
+                      "prompt": "Optional prompt text",
+                      "toggle": "optional_toggle_var_name",
+                      "description": "Optional description",
+                      "vars": {
+                        "var_name": {
+                          "description": "Variable description",
+                          "type": "str",
+                          "default": "default_value",
+                          ...
+                        }
+                      }
                     }
                     }
                   }
                   }
-                }
-              }
-    """
-    if not isinstance(spec, dict):
-      raise ValueError("Spec must be a dictionary")
-    
-    self._sections: Dict[str, VariableSection] = {}
-    # NOTE: The _variable_map provides a flat, O(1) lookup for any variable by its name,
-    # avoiding the need to iterate through sections. It stores references to the same
-    # Variable objects contained in the _set structure.
-    self._variable_map: Dict[str, Variable] = {}
-    self._initialize_sections(spec)
-    # Validate dependencies after all sections are loaded
-    self._validate_dependencies()
-
-  def _initialize_sections(self, spec: dict[str, Any]) -> None:
-    """Initialize sections from the spec."""
-    for section_key, section_data in spec.items():
-      if not isinstance(section_data, dict):
-        continue
-      
-      section = self._create_section(section_key, section_data)
-      # Guard against None from empty YAML sections (vars: with no content)
-      vars_data = section_data.get("vars") or {}
-      self._initialize_variables(section, vars_data)
-      self._sections[section_key] = section
-    
-    # Validate all variable names are unique across sections
-    self._validate_unique_variable_names()
-
-  def _create_section(self, key: str, data: dict[str, Any]) -> VariableSection:
-    """Create a VariableSection from data."""
-    section_init_data = {
-      "key": key,
-      "title": data.get("title", key.replace("_", " ").title()),
-      "description": data.get("description"),
-      "toggle": data.get("toggle"),
-      "required": data.get("required", key == "general"),
-      "needs": data.get("needs")
-    }
-    return VariableSection(section_init_data)
-
-  def _initialize_variables(self, section: VariableSection, vars_data: dict[str, Any]) -> None:
-    """Initialize variables for a section."""
-    # Guard against None from empty YAML sections
-    if vars_data is None:
-      vars_data = {}
-    
-    for var_name, var_data in vars_data.items():
-      var_init_data = {"name": var_name, **var_data}
-      variable = Variable(var_init_data)
-      section.variables[var_name] = variable
-      # NOTE: Populate the direct lookup map for efficient access.
-      self._variable_map[var_name] = variable
-    
-    # Validate toggle variable after all variables are added
-    self._validate_section_toggle(section)
-    # TODO: Add more section-level validation:
-    #   - Validate that required sections have at least one non-toggle variable
-    #   - Validate that enum variables have non-empty options lists
-    #   - Validate that variable names follow naming conventions (e.g., lowercase_with_underscores)
-    #   - Validate that default values are compatible with their type definitions
-
-  def _validate_unique_variable_names(self) -> None:
-    """Validate that all variable names are unique across all sections."""
-    var_to_sections: Dict[str, List[str]] = defaultdict(list)
-    
-    # Build mapping of variable names to sections
-    for section_key, section in self._sections.items():
-      for var_name in section.variables:
-        var_to_sections[var_name].append(section_key)
-    
-    # Find duplicates and format error
-    duplicates = {var: sections for var, sections in var_to_sections.items() if len(sections) > 1}
-    
-    if duplicates:
-      errors = ["Variable names must be unique across all sections, but found duplicates:"]
-      errors.extend(f"  - '{var}' appears in sections: {', '.join(secs)}" for var, secs in sorted(duplicates.items()))
-      errors.append("\nPlease rename variables to be unique or consolidate them into a single section.")
-      error_msg = "\n".join(errors)
-      logger.error(error_msg)
-      raise ValueError(error_msg)
-  
-  def _validate_section_toggle(self, section: VariableSection) -> None:
-    """Validate that toggle variable is of type bool if it exists.
-    
-    If the toggle variable doesn't exist (e.g., filtered out), removes the toggle.
-    
-    Args:
-        section: The section to validate
-        
-    Raises:
-        ValueError: If toggle variable exists but is not boolean type
-    """
-    if not section.toggle:
-      return
-    
-    toggle_var = section.variables.get(section.toggle)
-    if not toggle_var:
-      # Toggle variable doesn't exist (e.g., was filtered out) - remove toggle metadata
-      section.toggle = None
-      return
-    
-    if toggle_var.type != "bool":
-      raise ValueError(
-        f"Section '{section.key}' toggle variable '{section.toggle}' must be type 'bool', "
-        f"but is type '{toggle_var.type}'"
-      )
-  
-  @staticmethod
-  def _parse_need(need_str: str) -> tuple[str, Optional[Any]]:
-    """Parse a need string into variable name and expected value(s).
-    
-    Supports three formats:
-    1. New format with multiple values: "variable_name=value1,value2" - checks if variable equals any value
-    2. New format with single value: "variable_name=value" - checks if variable equals value
-    3. Old format (backwards compatibility): "section_name" - checks if section is enabled
-    
-    Args:
-        need_str: Need specification string
-        
-    Returns:
-        Tuple of (variable_or_section_name, expected_value)
-        For old format, expected_value is None (means check section enabled)
-        For new format, expected_value is the string value(s) after '=' (string or list)
-    
-    Examples:
-        "traefik_enabled=true" -> ("traefik_enabled", "true")
-        "storage_mode=nfs" -> ("storage_mode", "nfs")
-        "network_mode=bridge,macvlan" -> ("network_mode", ["bridge", "macvlan"])
-        "traefik" -> ("traefik", None)  # Old format: section name
-    """
-    if '=' in need_str:
-      # New format: variable=value or variable=value1,value2
-      parts = need_str.split('=', 1)
-      var_name = parts[0].strip()
-      value_part = parts[1].strip()
-      
-      # Check if multiple values are provided (comma-separated)
-      if ',' in value_part:
-        values = [v.strip() for v in value_part.split(',')]
-        return (var_name, values)
-      else:
-        return (var_name, value_part)
-    else:
-      # Old format: section name (backwards compatibility)
-      return (need_str.strip(), None)
-  
-  def _is_need_satisfied(self, need_str: str) -> bool:
-    """Check if a single need condition is satisfied.
-    
-    Args:
-        need_str: Need specification ("variable=value", "variable=value1,value2" or "section_name")
-        
-    Returns:
-        True if need is satisfied, False otherwise
-    """
-    var_or_section, expected_value = self._parse_need(need_str)
-    
-    if expected_value is None:
-      # Old format: check if section is enabled (backwards compatibility)
-      section = self._sections.get(var_or_section)
-      if not section:
-        logger.warning(f"Need references missing section '{var_or_section}'")
-        return False
-      return section.is_enabled()
-    else:
-      # New format: check if variable has expected value(s)
-      variable = self._variable_map.get(var_or_section)
-      if not variable:
-        logger.warning(f"Need references missing variable '{var_or_section}'")
-        return False
-      
-      # Convert actual value for comparison
-      try:
-        actual_value = variable.convert(variable.value)
-        
-        # Handle multiple expected values (comma-separated in needs)
-        if isinstance(expected_value, list):
-          # Check if actual value matches any of the expected values
-          for expected in expected_value:
-            expected_converted = variable.convert(expected)
-            
-            # Handle boolean comparisons specially
-            if variable.type == "bool":
-              if bool(actual_value) == bool(expected_converted):
-                return True
-            else:
-              # String comparison for other types
-              if actual_value is not None and str(actual_value) == str(expected_converted):
-                return True
-          return False  # None of the expected values matched
-        else:
-          # Single expected value (original behavior)
-          expected_converted = variable.convert(expected_value)
-          
-          # Handle boolean comparisons specially
-          if variable.type == "bool":
-            return bool(actual_value) == bool(expected_converted)
-          
-          # String comparison for other types
-          return str(actual_value) == str(expected_converted) if actual_value is not None else False
-      except Exception as e:
-        logger.debug(f"Failed to compare need '{need_str}': {e}")
-        return False
-  
-  def _validate_dependencies(self) -> None:
-    """Validate section dependencies for cycles and missing references.
-    
-    Raises:
-        ValueError: If circular dependencies or missing section references are found
-    """
-    # Check for missing dependencies in sections
-    for section_key, section in self._sections.items():
-      for dep in section.needs:
-        var_or_section, expected_value = self._parse_need(dep)
-        
-        if expected_value is None:
-          # Old format: validate section exists
-          if var_or_section not in self._sections:
-            raise ValueError(
-              f"Section '{section_key}' depends on '{var_or_section}', but '{var_or_section}' does not exist"
-            )
-        else:
-          # New format: validate variable exists
-          if var_or_section not in self._variable_map:
-            raise ValueError(
-              f"Section '{section_key}' has need '{dep}', but variable '{var_or_section}' does not exist"
+        """
+        if not isinstance(spec, dict):
+            raise ValueError("Spec must be a dictionary")
+
+        self._sections: Dict[str, VariableSection] = {}
+        # NOTE: The _variable_map provides a flat, O(1) lookup for any variable by its name,
+        # avoiding the need to iterate through sections. It stores references to the same
+        # Variable objects contained in the _set structure.
+        self._variable_map: Dict[str, Variable] = {}
+        self._initialize_sections(spec)
+        # Validate dependencies after all sections are loaded
+        self._validate_dependencies()
+
+    def _initialize_sections(self, spec: dict[str, Any]) -> None:
+        """Initialize sections from the spec."""
+        for section_key, section_data in spec.items():
+            if not isinstance(section_data, dict):
+                continue
+
+            section = self._create_section(section_key, section_data)
+            # Guard against None from empty YAML sections (vars: with no content)
+            vars_data = section_data.get("vars") or {}
+            self._initialize_variables(section, vars_data)
+            self._sections[section_key] = section
+
+        # Validate all variable names are unique across sections
+        self._validate_unique_variable_names()
+
+    def _create_section(self, key: str, data: dict[str, Any]) -> VariableSection:
+        """Create a VariableSection from data."""
+        section_init_data = {
+            "key": key,
+            "title": data.get("title", key.replace("_", " ").title()),
+            "description": data.get("description"),
+            "toggle": data.get("toggle"),
+            "required": data.get("required", key == "general"),
+            "needs": data.get("needs"),
+        }
+        return VariableSection(section_init_data)
+
+    def _initialize_variables(
+        self, section: VariableSection, vars_data: dict[str, Any]
+    ) -> None:
+        """Initialize variables for a section."""
+        # Guard against None from empty YAML sections
+        if vars_data is None:
+            vars_data = {}
+
+        for var_name, var_data in vars_data.items():
+            var_init_data = {"name": var_name, **var_data}
+            variable = Variable(var_init_data)
+            section.variables[var_name] = variable
+            # NOTE: Populate the direct lookup map for efficient access.
+            self._variable_map[var_name] = variable
+
+        # Validate toggle variable after all variables are added
+        self._validate_section_toggle(section)
+        # TODO: Add more section-level validation:
+        #   - Validate that required sections have at least one non-toggle variable
+        #   - Validate that enum variables have non-empty options lists
+        #   - Validate that variable names follow naming conventions (e.g., lowercase_with_underscores)
+        #   - Validate that default values are compatible with their type definitions
+
+    def _validate_unique_variable_names(self) -> None:
+        """Validate that all variable names are unique across all sections."""
+        var_to_sections: Dict[str, List[str]] = defaultdict(list)
+
+        # Build mapping of variable names to sections
+        for section_key, section in self._sections.items():
+            for var_name in section.variables:
+                var_to_sections[var_name].append(section_key)
+
+        # Find duplicates and format error
+        duplicates = {
+            var: sections
+            for var, sections in var_to_sections.items()
+            if len(sections) > 1
+        }
+
+        if duplicates:
+            errors = [
+                "Variable names must be unique across all sections, but found duplicates:"
+            ]
+            errors.extend(
+                f"  - '{var}' appears in sections: {', '.join(secs)}"
+                for var, secs in sorted(duplicates.items())
             )
             )
-    
-    # Check for missing dependencies in variables
-    for var_name, variable in self._variable_map.items():
-      for dep in variable.needs:
-        dep_var, expected_value = self._parse_need(dep)
-        if expected_value is not None:  # Only validate new format
-          if dep_var not in self._variable_map:
-            raise ValueError(
-              f"Variable '{var_name}' has need '{dep}', but variable '{dep_var}' does not exist"
+            errors.append(
+                "\nPlease rename variables to be unique or consolidate them into a single section."
             )
             )
-    
-    # Check for circular dependencies using depth-first search
-    # Note: Only checks section-level dependencies in old format (section names)
-    # Variable-level dependencies (variable=value) don't create cycles in the same way
-    visited = set()
-    rec_stack = set()
-    
-    def has_cycle(section_key: str) -> bool:
-      visited.add(section_key)
-      rec_stack.add(section_key)
-      
-      section = self._sections[section_key]
-      for dep in section.needs:
-        # Only check circular deps for old format (section references)
-        dep_name, expected_value = self._parse_need(dep)
-        if expected_value is None and dep_name in self._sections:
-          # Old format section dependency - check for cycles
-          if dep_name not in visited:
-            if has_cycle(dep_name):
-              return True
-          elif dep_name in rec_stack:
+            error_msg = "\n".join(errors)
+            logger.error(error_msg)
+            raise ValueError(error_msg)
+
+    def _validate_section_toggle(self, section: VariableSection) -> None:
+        """Validate that toggle variable is of type bool if it exists.
+
+        If the toggle variable doesn't exist (e.g., filtered out), removes the toggle.
+
+        Args:
+            section: The section to validate
+
+        Raises:
+            ValueError: If toggle variable exists but is not boolean type
+        """
+        if not section.toggle:
+            return
+
+        toggle_var = section.variables.get(section.toggle)
+        if not toggle_var:
+            # Toggle variable doesn't exist (e.g., was filtered out) - remove toggle metadata
+            section.toggle = None
+            return
+
+        if toggle_var.type != "bool":
             raise ValueError(
             raise ValueError(
-              f"Circular dependency detected: '{section_key}' depends on '{dep_name}', "
-              f"which creates a cycle"
+                f"Section '{section.key}' toggle variable '{section.toggle}' must be type 'bool', "
+                f"but is type '{toggle_var.type}'"
             )
             )
-      
-      rec_stack.remove(section_key)
-      return False
-    
-    for section_key in self._sections:
-      if section_key not in visited:
-        has_cycle(section_key)
-  
-  def is_section_satisfied(self, section_key: str) -> bool:
-    """Check if all dependencies for a section are satisfied.
-    
-    Supports both formats:
-    - Old format: "section_name" - checks if section is enabled (backwards compatible)
-    - New format: "variable=value" - checks if variable has specific value
-    
-    Args:
-        section_key: The key of the section to check
-        
-    Returns:
-        True if all dependencies are satisfied, False otherwise
-    """
-    section = self._sections.get(section_key)
-    if not section:
-      return False
-    
-    # No dependencies = always satisfied
-    if not section.needs:
-      return True
-    
-    # Check each dependency using the unified need satisfaction logic
-    for need in section.needs:
-      if not self._is_need_satisfied(need):
-        logger.debug(f"Section '{section_key}' need '{need}' is not satisfied")
-        return False
-    
-    return True
-  
-  def is_variable_satisfied(self, var_name: str) -> bool:
-    """Check if all dependencies for a variable are satisfied.
-    
-    A variable is satisfied if all its needs are met.
-    Needs are specified as "variable_name=value".
-    
-    Args:
-        var_name: The name of the variable to check
-        
-    Returns:
-        True if all dependencies are satisfied, False otherwise
-    """
-    variable = self._variable_map.get(var_name)
-    if not variable:
-      return False
-    
-    # No dependencies = always satisfied
-    if not variable.needs:
-      return True
-    
-    # Check each dependency
-    for need in variable.needs:
-      if not self._is_need_satisfied(need):
-        logger.debug(f"Variable '{var_name}' need '{need}' is not satisfied")
-        return False
-    
-    return True
-
-  def sort_sections(self) -> None:
-    """Sort sections with the following priority:
-    
-    1. Dependencies come before dependents (topological sort)
-    2. Required sections first (in their original order)
-    3. Enabled sections with satisfied dependencies next (in their original order)
-    4. Disabled sections or sections with unsatisfied dependencies last (in their original order)
-    
-    This maintains the original ordering within each group while organizing
-    sections logically for display and user interaction, and ensures that
-    sections are prompted in the correct dependency order.
-    """
-    # First, perform topological sort to respect dependencies
-    sorted_keys = self._topological_sort()
-    
-    # Then apply priority sorting within dependency groups
-    section_items = [(key, self._sections[key]) for key in sorted_keys]
-    
-    # Define sort key: (priority, original_index)
-    # Priority: 0 = required, 1 = enabled with satisfied dependencies, 2 = disabled or unsatisfied dependencies
-    def get_sort_key(item_with_index):
-      index, (key, section) = item_with_index
-      if section.required:
-        priority = 0
-      elif section.is_enabled() and self.is_section_satisfied(key):
-        priority = 1
-      else:
-        priority = 2
-      return (priority, index)
-    
-    # Sort with original index to maintain order within each priority group
-    # Note: This preserves the topological order from earlier
-    sorted_items = sorted(
-      enumerate(section_items),
-      key=get_sort_key
-    )
-    
-    # Rebuild _sections dict in new order
-    self._sections = {key: section for _, (key, section) in sorted_items}
-  
-  def _topological_sort(self) -> List[str]:
-    """Perform topological sort on sections based on dependencies using Kahn's algorithm."""
-    in_degree = {key: len(section.needs) for key, section in self._sections.items()}
-    queue = [key for key, degree in in_degree.items() if degree == 0]
-    queue.sort(key=lambda k: list(self._sections.keys()).index(k))  # Preserve original order
-    result = []
-    
-    while queue:
-      current = queue.pop(0)
-      result.append(current)
-      
-      # Update in-degree for dependent sections
-      for key, section in self._sections.items():
-        if current in section.needs:
-          in_degree[key] -= 1
-          if in_degree[key] == 0:
-            queue.append(key)
-    
-    # Fallback to original order if cycle detected
-    if len(result) != len(self._sections):
-      logger.warning("Topological sort incomplete - using original order")
-      return list(self._sections.keys())
-    
-    return result
-
-  def get_sections(self) -> Dict[str, VariableSection]:
-    """Get all sections in the collection."""
-    return self._sections.copy()
-  
-  def get_section(self, key: str) -> Optional[VariableSection]:
-    """Get a specific section by its key."""
-    return self._sections.get(key)
-  
-  def has_sections(self) -> bool:
-    """Check if the collection has any sections."""
-    return bool(self._sections)
-
-  def get_all_values(self) -> dict[str, Any]:
-    """Get all variable values as a dictionary."""
-    # NOTE: Uses _variable_map for O(1) access
-    return {name: var.convert(var.value) for name, var in self._variable_map.items()}
-  
-  def get_satisfied_values(self) -> dict[str, Any]:
-    """Get variable values only from sections with satisfied dependencies.
-    
-    This respects both toggle states and section dependencies, ensuring that:
-    - Variables from disabled sections (toggle=false) are excluded EXCEPT required variables
-    - Variables from sections with unsatisfied dependencies are excluded
-    - Required variables are always included if their section dependencies are satisfied
-    
-    Returns:
-        Dictionary of variable names to values for satisfied sections only
-    """
-    satisfied_values = {}
-    
-    for section_key, section in self._sections.items():
-      # Skip sections with unsatisfied dependencies (even required variables need satisfied deps)
-      if not self.is_section_satisfied(section_key):
-        logger.debug(f"Excluding variables from section '{section_key}' - dependencies not satisfied")
-        continue
-      
-      # Check if section is enabled
-      is_enabled = section.is_enabled()
-      
-      if is_enabled:
-        # Include all variables from enabled section
-        for var_name, variable in section.variables.items():
-          satisfied_values[var_name] = variable.convert(variable.value)
-      else:
-        # Section is disabled - only include required variables
-        logger.debug(f"Section '{section_key}' is disabled - including only required variables")
-        for var_name, variable in section.variables.items():
-          if variable.required:
-            logger.debug(f"Including required variable '{var_name}' from disabled section '{section_key}'")
-            satisfied_values[var_name] = variable.convert(variable.value)
-    
-    return satisfied_values
-
-  def get_sensitive_variables(self) -> Dict[str, Any]:
-    """Get only the sensitive variables with their values."""
-    return {name: var.value for name, var in self._variable_map.items() if var.sensitive and var.value}
-
-  def apply_defaults(self, defaults: dict[str, Any], origin: str = "cli") -> list[str]:
-    """Apply default values to variables, updating their origin.
-    
-    Args:
-        defaults: Dictionary mapping variable names to their default values
-        origin: Source of these defaults (e.g., 'config', 'cli')
-        
-    Returns:
-        List of variable names that were successfully updated
-    """
-    # NOTE: This method uses the _variable_map for a significant performance gain,
-    # as it allows direct O(1) lookup of variables instead of iterating
-    # through all sections to find a match.
-    successful = []
-    errors = []
-    
-    for var_name, value in defaults.items():
-      try:
+
+    @staticmethod
+    def _parse_need(need_str: str) -> tuple[str, Optional[Any]]:
+        """Parse a need string into variable name and expected value(s).
+
+        Supports three formats:
+        1. New format with multiple values: "variable_name=value1,value2" - checks if variable equals any value
+        2. New format with single value: "variable_name=value" - checks if variable equals value
+        3. Old format (backwards compatibility): "section_name" - checks if section is enabled
+
+        Args:
+            need_str: Need specification string
+
+        Returns:
+            Tuple of (variable_or_section_name, expected_value)
+            For old format, expected_value is None (means check section enabled)
+            For new format, expected_value is the string value(s) after '=' (string or list)
+
+        Examples:
+            "traefik_enabled=true" -> ("traefik_enabled", "true")
+            "storage_mode=nfs" -> ("storage_mode", "nfs")
+            "network_mode=bridge,macvlan" -> ("network_mode", ["bridge", "macvlan"])
+            "traefik" -> ("traefik", None)  # Old format: section name
+        """
+        if "=" in need_str:
+            # New format: variable=value or variable=value1,value2
+            parts = need_str.split("=", 1)
+            var_name = parts[0].strip()
+            value_part = parts[1].strip()
+
+            # Check if multiple values are provided (comma-separated)
+            if "," in value_part:
+                values = [v.strip() for v in value_part.split(",")]
+                return (var_name, values)
+            else:
+                return (var_name, value_part)
+        else:
+            # Old format: section name (backwards compatibility)
+            return (need_str.strip(), None)
+
+    def _is_need_satisfied(self, need_str: str) -> bool:
+        """Check if a single need condition is satisfied.
+
+        Args:
+            need_str: Need specification ("variable=value", "variable=value1,value2" or "section_name")
+
+        Returns:
+            True if need is satisfied, False otherwise
+        """
+        var_or_section, expected_value = self._parse_need(need_str)
+
+        if expected_value is None:
+            # Old format: check if section is enabled (backwards compatibility)
+            section = self._sections.get(var_or_section)
+            if not section:
+                logger.warning(f"Need references missing section '{var_or_section}'")
+                return False
+            return section.is_enabled()
+        else:
+            # New format: check if variable has expected value(s)
+            variable = self._variable_map.get(var_or_section)
+            if not variable:
+                logger.warning(f"Need references missing variable '{var_or_section}'")
+                return False
+
+            # Convert actual value for comparison
+            try:
+                actual_value = variable.convert(variable.value)
+
+                # Handle multiple expected values (comma-separated in needs)
+                if isinstance(expected_value, list):
+                    # Check if actual value matches any of the expected values
+                    for expected in expected_value:
+                        expected_converted = variable.convert(expected)
+
+                        # Handle boolean comparisons specially
+                        if variable.type == "bool":
+                            if bool(actual_value) == bool(expected_converted):
+                                return True
+                        else:
+                            # String comparison for other types
+                            if actual_value is not None and str(actual_value) == str(
+                                expected_converted
+                            ):
+                                return True
+                    return False  # None of the expected values matched
+                else:
+                    # Single expected value (original behavior)
+                    expected_converted = variable.convert(expected_value)
+
+                    # Handle boolean comparisons specially
+                    if variable.type == "bool":
+                        return bool(actual_value) == bool(expected_converted)
+
+                    # String comparison for other types
+                    return (
+                        str(actual_value) == str(expected_converted)
+                        if actual_value is not None
+                        else False
+                    )
+            except Exception as e:
+                logger.debug(f"Failed to compare need '{need_str}': {e}")
+                return False
+
+    def _validate_dependencies(self) -> None:
+        """Validate section dependencies for cycles and missing references.
+
+        Raises:
+            ValueError: If circular dependencies or missing section references are found
+        """
+        # Check for missing dependencies in sections
+        for section_key, section in self._sections.items():
+            for dep in section.needs:
+                var_or_section, expected_value = self._parse_need(dep)
+
+                if expected_value is None:
+                    # Old format: validate section exists
+                    if var_or_section not in self._sections:
+                        raise ValueError(
+                            f"Section '{section_key}' depends on '{var_or_section}', but '{var_or_section}' does not exist"
+                        )
+                else:
+                    # New format: validate variable exists
+                    if var_or_section not in self._variable_map:
+                        raise ValueError(
+                            f"Section '{section_key}' has need '{dep}', but variable '{var_or_section}' does not exist"
+                        )
+
+        # Check for missing dependencies in variables
+        for var_name, variable in self._variable_map.items():
+            for dep in variable.needs:
+                dep_var, expected_value = self._parse_need(dep)
+                if expected_value is not None:  # Only validate new format
+                    if dep_var not in self._variable_map:
+                        raise ValueError(
+                            f"Variable '{var_name}' has need '{dep}', but variable '{dep_var}' does not exist"
+                        )
+
+        # Check for circular dependencies using depth-first search
+        # Note: Only checks section-level dependencies in old format (section names)
+        # Variable-level dependencies (variable=value) don't create cycles in the same way
+        visited = set()
+        rec_stack = set()
+
+        def has_cycle(section_key: str) -> bool:
+            visited.add(section_key)
+            rec_stack.add(section_key)
+
+            section = self._sections[section_key]
+            for dep in section.needs:
+                # Only check circular deps for old format (section references)
+                dep_name, expected_value = self._parse_need(dep)
+                if expected_value is None and dep_name in self._sections:
+                    # Old format section dependency - check for cycles
+                    if dep_name not in visited:
+                        if has_cycle(dep_name):
+                            return True
+                    elif dep_name in rec_stack:
+                        raise ValueError(
+                            f"Circular dependency detected: '{section_key}' depends on '{dep_name}', "
+                            f"which creates a cycle"
+                        )
+
+            rec_stack.remove(section_key)
+            return False
+
+        for section_key in self._sections:
+            if section_key not in visited:
+                has_cycle(section_key)
+
+    def is_section_satisfied(self, section_key: str) -> bool:
+        """Check if all dependencies for a section are satisfied.
+
+        Supports both formats:
+        - Old format: "section_name" - checks if section is enabled (backwards compatible)
+        - New format: "variable=value" - checks if variable has specific value
+
+        Args:
+            section_key: The key of the section to check
+
+        Returns:
+            True if all dependencies are satisfied, False otherwise
+        """
+        section = self._sections.get(section_key)
+        if not section:
+            return False
+
+        # No dependencies = always satisfied
+        if not section.needs:
+            return True
+
+        # Check each dependency using the unified need satisfaction logic
+        for need in section.needs:
+            if not self._is_need_satisfied(need):
+                logger.debug(f"Section '{section_key}' need '{need}' is not satisfied")
+                return False
+
+        return True
+
+    def is_variable_satisfied(self, var_name: str) -> bool:
+        """Check if all dependencies for a variable are satisfied.
+
+        A variable is satisfied if all its needs are met.
+        Needs are specified as "variable_name=value".
+
+        Args:
+            var_name: The name of the variable to check
+
+        Returns:
+            True if all dependencies are satisfied, False otherwise
+        """
         variable = self._variable_map.get(var_name)
         variable = self._variable_map.get(var_name)
         if not variable:
         if not variable:
-          logger.warning(f"Variable '{var_name}' not found in template")
-          continue
-        
-        # Store original value before overriding (for display purposes)
-        # Only store if this is the first time config is being applied
-        if origin == "config" and not hasattr(variable, '_original_stored'):
-          variable.original_value = variable.value
-          variable._original_stored = True
-        
-        # Convert and set the new value
-        converted_value = variable.convert(value)
-        variable.value = converted_value
-        
-        # Set origin to the current source (not a chain)
-        variable.origin = origin
-        
-        successful.append(var_name)
-          
-      except ValueError as e:
-        error_msg = f"Invalid value for '{var_name}': {value} - {e}"
-        errors.append(error_msg)
-        logger.error(error_msg)
-    
-    if errors:
-      logger.warning(f"Some defaults failed to apply: {'; '.join(errors)}")
-    
-    return successful
-  
-  def validate_all(self) -> None:
-    """Validate all variables in the collection.
-    
-    Validates:
-    - All variables in enabled sections with satisfied dependencies
-    - Required variables even if their section is disabled (but dependencies must be satisfied)
-    """
-    errors: list[str] = []
-
-    for section_key, section in self._sections.items():
-      # Skip sections with unsatisfied dependencies (even for required variables)
-      if not self.is_section_satisfied(section_key):
-        logger.debug(f"Skipping validation for section '{section_key}' - dependencies not satisfied")
-        continue
-      
-      # Check if section is enabled
-      is_enabled = section.is_enabled()
-      
-      if not is_enabled:
-        logger.debug(f"Section '{section_key}' is disabled - validating only required variables")
-
-      # Validate variables in the section
-      for var_name, variable in section.variables.items():
-        # Skip non-required variables in disabled sections
-        if not is_enabled and not variable.required:
-          continue
-        
-        try:
-          # Skip autogenerated variables when empty
-          if variable.autogenerated and not variable.value:
-            continue
-          
-          # Check required fields
-          if variable.value is None:
-            # Optional variables can be None/empty
-            if hasattr(variable, 'optional') and variable.optional:
-              continue
-            if variable.is_required():
-              errors.append(f"{section.key}.{var_name} (required - no default provided)")
-            continue
-
-          # Validate typed value
-          typed = variable.convert(variable.value)
-          if variable.type not in ("bool",) and not typed:
-            msg = f"{section.key}.{var_name}"
-            errors.append(f"{msg} (required - cannot be empty)" if variable.is_required() else f"{msg} (empty)")
-
-        except ValueError as e:
-          errors.append(f"{section.key}.{var_name} (invalid format: {e})")
-
-    if errors:
-      error_msg = "Variable validation failed: " + ", ".join(errors)
-      logger.error(error_msg)
-      raise ValueError(error_msg)
-
-  def merge(self, other_spec: Union[Dict[str, Any], 'VariableCollection'], origin: str = "override") -> 'VariableCollection':
-    """Merge another spec or VariableCollection into this one with precedence tracking.
-    
-    OPTIMIZED: Works directly on objects without dict conversions for better performance.
-    
-    The other spec/collection has higher precedence and will override values in self.
-    Creates a new VariableCollection with merged data.
-    
-    Args:
-        other_spec: Either a spec dictionary or another VariableCollection to merge
-        origin: Origin label for variables from other_spec (e.g., 'template', 'config')
-        
-    Returns:
-        New VariableCollection with merged data
-        
-    Example:
-        module_vars = VariableCollection(module_spec)
-        template_vars = module_vars.merge(template_spec, origin='template')
-        # Variables from template_spec override module_spec
-        # Origins tracked: 'module' or 'module -> template'
-    """
-    # Convert dict to VariableCollection if needed (only once)
-    if isinstance(other_spec, dict):
-      other = VariableCollection(other_spec)
-    else:
-      other = other_spec
-    
-    # Create new collection without calling __init__ (optimization)
-    merged = VariableCollection.__new__(VariableCollection)
-    merged._sections = {}
-    merged._variable_map = {}
-    
-    # First pass: clone sections from self
-    for section_key, self_section in self._sections.items():
-      if section_key in other._sections:
-        # Section exists in both - will merge
-        merged._sections[section_key] = self._merge_sections(
-          self_section, 
-          other._sections[section_key], 
-          origin
-        )
-      else:
-        # Section only in self - clone it
-        merged._sections[section_key] = self_section.clone()
-    
-    # Second pass: add sections that only exist in other
-    for section_key, other_section in other._sections.items():
-      if section_key not in merged._sections:
-        # New section from other - clone with origin update
-        merged._sections[section_key] = other_section.clone(origin_update=origin)
-    
-    # Rebuild variable map for O(1) lookups
-    for section in merged._sections.values():
-      for var_name, variable in section.variables.items():
-        merged._variable_map[var_name] = variable
-    
-    return merged
-  
-  def _merge_sections(self, self_section: VariableSection, other_section: VariableSection, origin: str) -> VariableSection:
-    """Merge two sections, with other_section taking precedence."""
-    merged_section = self_section.clone()
-    
-    # Update section metadata from other (other takes precedence)
-    # Only override if explicitly provided in other AND has a value
-    for attr in ('title', 'description', 'toggle'):
-      other_value = getattr(other_section, attr)
-      if hasattr(other_section, '_explicit_fields') and attr in other_section._explicit_fields and other_value:
-        setattr(merged_section, attr, other_value)
-    
-    merged_section.required = other_section.required
-    # Respect explicit clears for dependencies (explicit null/empty clears, missing field preserves)
-    if hasattr(other_section, '_explicit_fields') and 'needs' in other_section._explicit_fields:
-      merged_section.needs = other_section.needs.copy() if other_section.needs else []
-    
-    # Merge variables
-    for var_name, other_var in other_section.variables.items():
-      if var_name in merged_section.variables:
-        # Variable exists in both - merge with other taking precedence
-        self_var = merged_section.variables[var_name]
-        
-        # Build update dict with ONLY explicitly provided fields from other
-        update = {'origin': origin}
-        field_map = {
-          'type': other_var.type,
-          'description': other_var.description,
-          'prompt': other_var.prompt,
-          'options': other_var.options,
-          'sensitive': other_var.sensitive,
-          'extra': other_var.extra,
+            return False
+
+        # No dependencies = always satisfied
+        if not variable.needs:
+            return True
+
+        # Check each dependency
+        for need in variable.needs:
+            if not self._is_need_satisfied(need):
+                logger.debug(f"Variable '{var_name}' need '{need}' is not satisfied")
+                return False
+
+        return True
+
+    def sort_sections(self) -> None:
+        """Sort sections with the following priority:
+
+        1. Dependencies come before dependents (topological sort)
+        2. Required sections first (in their original order)
+        3. Enabled sections with satisfied dependencies next (in their original order)
+        4. Disabled sections or sections with unsatisfied dependencies last (in their original order)
+
+        This maintains the original ordering within each group while organizing
+        sections logically for display and user interaction, and ensures that
+        sections are prompted in the correct dependency order.
+        """
+        # First, perform topological sort to respect dependencies
+        sorted_keys = self._topological_sort()
+
+        # Then apply priority sorting within dependency groups
+        section_items = [(key, self._sections[key]) for key in sorted_keys]
+
+        # Define sort key: (priority, original_index)
+        # Priority: 0 = required, 1 = enabled with satisfied dependencies, 2 = disabled or unsatisfied dependencies
+        def get_sort_key(item_with_index):
+            index, (key, section) = item_with_index
+            if section.required:
+                priority = 0
+            elif section.is_enabled() and self.is_section_satisfied(key):
+                priority = 1
+            else:
+                priority = 2
+            return (priority, index)
+
+        # Sort with original index to maintain order within each priority group
+        # Note: This preserves the topological order from earlier
+        sorted_items = sorted(enumerate(section_items), key=get_sort_key)
+
+        # Rebuild _sections dict in new order
+        self._sections = {key: section for _, (key, section) in sorted_items}
+
+    def _topological_sort(self) -> List[str]:
+        """Perform topological sort on sections based on dependencies using Kahn's algorithm."""
+        in_degree = {key: len(section.needs) for key, section in self._sections.items()}
+        queue = [key for key, degree in in_degree.items() if degree == 0]
+        queue.sort(
+            key=lambda k: list(self._sections.keys()).index(k)
+        )  # Preserve original order
+        result = []
+
+        while queue:
+            current = queue.pop(0)
+            result.append(current)
+
+            # Update in-degree for dependent sections
+            for key, section in self._sections.items():
+                if current in section.needs:
+                    in_degree[key] -= 1
+                    if in_degree[key] == 0:
+                        queue.append(key)
+
+        # Fallback to original order if cycle detected
+        if len(result) != len(self._sections):
+            logger.warning("Topological sort incomplete - using original order")
+            return list(self._sections.keys())
+
+        return result
+
+    def get_sections(self) -> Dict[str, VariableSection]:
+        """Get all sections in the collection."""
+        return self._sections.copy()
+
+    def get_section(self, key: str) -> Optional[VariableSection]:
+        """Get a specific section by its key."""
+        return self._sections.get(key)
+
+    def has_sections(self) -> bool:
+        """Check if the collection has any sections."""
+        return bool(self._sections)
+
+    def get_all_values(self) -> dict[str, Any]:
+        """Get all variable values as a dictionary."""
+        # NOTE: Uses _variable_map for O(1) access
+        return {
+            name: var.convert(var.value) for name, var in self._variable_map.items()
         }
         }
-        
-        # Add fields that were explicitly provided, even if falsy/empty
-        for field, value in field_map.items():
-          if field in other_var._explicit_fields:
-            update[field] = value
-        
-        # For boolean flags, only copy if explicitly provided in other
-        # This prevents False defaults from overriding True values
-        for bool_field in ('optional', 'autogenerated', 'required'):
-          if bool_field in other_var._explicit_fields:
-            update[bool_field] = getattr(other_var, bool_field)
-        
-        # Special handling for value/default (allow explicit null to clear)
-        if 'value' in other_var._explicit_fields:
-          update['value'] = other_var.value
-        elif 'default' in other_var._explicit_fields:
-          update['value'] = other_var.value
-        
-        merged_section.variables[var_name] = self_var.clone(update=update)
-      else:
-        # New variable from other - clone with origin
-        merged_section.variables[var_name] = other_var.clone(update={'origin': origin})
-    
-    return merged_section
-  
-  def filter_to_used(self, used_variables: Set[str], keep_sensitive: bool = True) -> 'VariableCollection':
-    """Filter collection to only variables that are used (or sensitive).
-    
-    OPTIMIZED: Works directly on objects without dict conversions for better performance.
-    
-    Creates a new VariableCollection containing only the variables in used_variables.
-    Sections with no remaining variables are removed.
-    
-    Args:
-        used_variables: Set of variable names that are actually used
-        keep_sensitive: If True, also keep sensitive variables even if not in used set
-        
-    Returns:
-        New VariableCollection with filtered variables
-        
-    Example:
-        all_vars = VariableCollection(spec)
-        used_vars = all_vars.filter_to_used({'var1', 'var2', 'var3'})
-        # Only var1, var2, var3 (and any sensitive vars) remain
-    """
-    # Create new collection without calling __init__ (optimization)
-    filtered = VariableCollection.__new__(VariableCollection)
-    filtered._sections = {}
-    filtered._variable_map = {}
-    
-    # Filter each section
-    for section_key, section in self._sections.items():
-      # Create a new section with same metadata
-      filtered_section = VariableSection({
-        'key': section.key,
-        'title': section.title,
-        'description': section.description,
-        'toggle': section.toggle,
-        'required': section.required,
-        'needs': section.needs.copy() if section.needs else None,
-      })
-      
-      # Clone only the variables that should be included
-      for var_name, variable in section.variables.items():
-        # Include if used OR if sensitive (and keep_sensitive is True)
-        should_include = (
-          var_name in used_variables or 
-          (keep_sensitive and variable.sensitive)
-        )
-        
-        if should_include:
-          filtered_section.variables[var_name] = variable.clone()
-      
-      # Only add section if it has variables
-      if filtered_section.variables:
-        filtered._sections[section_key] = filtered_section
-        # Add variables to map
-        for var_name, variable in filtered_section.variables.items():
-          filtered._variable_map[var_name] = variable
-    
-    return filtered
-  
-  def get_all_variable_names(self) -> Set[str]:
-    """Get set of all variable names across all sections.
-    
-    Returns:
-        Set of all variable names
-    """
-    return set(self._variable_map.keys())
+
+    def get_satisfied_values(self) -> dict[str, Any]:
+        """Get variable values only from sections with satisfied dependencies.
+
+        This respects both toggle states and section dependencies, ensuring that:
+        - Variables from disabled sections (toggle=false) are excluded EXCEPT required variables
+        - Variables from sections with unsatisfied dependencies are excluded
+        - Required variables are always included if their section dependencies are satisfied
+
+        Returns:
+            Dictionary of variable names to values for satisfied sections only
+        """
+        satisfied_values = {}
+
+        for section_key, section in self._sections.items():
+            # Skip sections with unsatisfied dependencies (even required variables need satisfied deps)
+            if not self.is_section_satisfied(section_key):
+                logger.debug(
+                    f"Excluding variables from section '{section_key}' - dependencies not satisfied"
+                )
+                continue
+
+            # Check if section is enabled
+            is_enabled = section.is_enabled()
+
+            if is_enabled:
+                # Include all variables from enabled section
+                for var_name, variable in section.variables.items():
+                    satisfied_values[var_name] = variable.convert(variable.value)
+            else:
+                # Section is disabled - only include required variables
+                logger.debug(
+                    f"Section '{section_key}' is disabled - including only required variables"
+                )
+                for var_name, variable in section.variables.items():
+                    if variable.required:
+                        logger.debug(
+                            f"Including required variable '{var_name}' from disabled section '{section_key}'"
+                        )
+                        satisfied_values[var_name] = variable.convert(variable.value)
+
+        return satisfied_values
+
+    def get_sensitive_variables(self) -> Dict[str, Any]:
+        """Get only the sensitive variables with their values."""
+        return {
+            name: var.value
+            for name, var in self._variable_map.items()
+            if var.sensitive and var.value
+        }
+
+    def apply_defaults(
+        self, defaults: dict[str, Any], origin: str = "cli"
+    ) -> list[str]:
+        """Apply default values to variables, updating their origin.
+
+        Args:
+            defaults: Dictionary mapping variable names to their default values
+            origin: Source of these defaults (e.g., 'config', 'cli')
+
+        Returns:
+            List of variable names that were successfully updated
+        """
+        # NOTE: This method uses the _variable_map for a significant performance gain,
+        # as it allows direct O(1) lookup of variables instead of iterating
+        # through all sections to find a match.
+        successful = []
+        errors = []
+
+        for var_name, value in defaults.items():
+            try:
+                variable = self._variable_map.get(var_name)
+                if not variable:
+                    logger.warning(f"Variable '{var_name}' not found in template")
+                    continue
+
+                # Store original value before overriding (for display purposes)
+                # Only store if this is the first time config is being applied
+                if origin == "config" and not hasattr(variable, "_original_stored"):
+                    variable.original_value = variable.value
+                    variable._original_stored = True
+
+                # Convert and set the new value
+                converted_value = variable.convert(value)
+                variable.value = converted_value
+
+                # Set origin to the current source (not a chain)
+                variable.origin = origin
+
+                successful.append(var_name)
+
+            except ValueError as e:
+                error_msg = f"Invalid value for '{var_name}': {value} - {e}"
+                errors.append(error_msg)
+                logger.error(error_msg)
+
+        if errors:
+            logger.warning(f"Some defaults failed to apply: {'; '.join(errors)}")
+
+        return successful
+
+    def validate_all(self) -> None:
+        """Validate all variables in the collection.
+
+        Validates:
+        - All variables in enabled sections with satisfied dependencies
+        - Required variables even if their section is disabled (but dependencies must be satisfied)
+        """
+        errors: list[str] = []
+
+        for section_key, section in self._sections.items():
+            # Skip sections with unsatisfied dependencies (even for required variables)
+            if not self.is_section_satisfied(section_key):
+                logger.debug(
+                    f"Skipping validation for section '{section_key}' - dependencies not satisfied"
+                )
+                continue
+
+            # Check if section is enabled
+            is_enabled = section.is_enabled()
+
+            if not is_enabled:
+                logger.debug(
+                    f"Section '{section_key}' is disabled - validating only required variables"
+                )
+
+            # Validate variables in the section
+            for var_name, variable in section.variables.items():
+                # Skip non-required variables in disabled sections
+                if not is_enabled and not variable.required:
+                    continue
+
+                try:
+                    # Skip autogenerated variables when empty
+                    if variable.autogenerated and not variable.value:
+                        continue
+
+                    # Check required fields
+                    if variable.value is None:
+                        # Optional variables can be None/empty
+                        if hasattr(variable, "optional") and variable.optional:
+                            continue
+                        if variable.is_required():
+                            errors.append(
+                                f"{section.key}.{var_name} (required - no default provided)"
+                            )
+                        continue
+
+                    # Validate typed value
+                    typed = variable.convert(variable.value)
+                    if variable.type not in ("bool",) and not typed:
+                        msg = f"{section.key}.{var_name}"
+                        errors.append(
+                            f"{msg} (required - cannot be empty)"
+                            if variable.is_required()
+                            else f"{msg} (empty)"
+                        )
+
+                except ValueError as e:
+                    errors.append(f"{section.key}.{var_name} (invalid format: {e})")
+
+        if errors:
+            error_msg = "Variable validation failed: " + ", ".join(errors)
+            logger.error(error_msg)
+            raise ValueError(error_msg)
+
+    def merge(
+        self,
+        other_spec: Union[Dict[str, Any], "VariableCollection"],
+        origin: str = "override",
+    ) -> "VariableCollection":
+        """Merge another spec or VariableCollection into this one with precedence tracking.
+
+        OPTIMIZED: Works directly on objects without dict conversions for better performance.
+
+        The other spec/collection has higher precedence and will override values in self.
+        Creates a new VariableCollection with merged data.
+
+        Args:
+            other_spec: Either a spec dictionary or another VariableCollection to merge
+            origin: Origin label for variables from other_spec (e.g., 'template', 'config')
+
+        Returns:
+            New VariableCollection with merged data
+
+        Example:
+            module_vars = VariableCollection(module_spec)
+            template_vars = module_vars.merge(template_spec, origin='template')
+            # Variables from template_spec override module_spec
+            # Origins tracked: 'module' or 'module -> template'
+        """
+        # Convert dict to VariableCollection if needed (only once)
+        if isinstance(other_spec, dict):
+            other = VariableCollection(other_spec)
+        else:
+            other = other_spec
+
+        # Create new collection without calling __init__ (optimization)
+        merged = VariableCollection.__new__(VariableCollection)
+        merged._sections = {}
+        merged._variable_map = {}
+
+        # First pass: clone sections from self
+        for section_key, self_section in self._sections.items():
+            if section_key in other._sections:
+                # Section exists in both - will merge
+                merged._sections[section_key] = self._merge_sections(
+                    self_section, other._sections[section_key], origin
+                )
+            else:
+                # Section only in self - clone it
+                merged._sections[section_key] = self_section.clone()
+
+        # Second pass: add sections that only exist in other
+        for section_key, other_section in other._sections.items():
+            if section_key not in merged._sections:
+                # New section from other - clone with origin update
+                merged._sections[section_key] = other_section.clone(
+                    origin_update=origin
+                )
+
+        # Rebuild variable map for O(1) lookups
+        for section in merged._sections.values():
+            for var_name, variable in section.variables.items():
+                merged._variable_map[var_name] = variable
+
+        return merged
+
+    def _merge_sections(
+        self, self_section: VariableSection, other_section: VariableSection, origin: str
+    ) -> VariableSection:
+        """Merge two sections, with other_section taking precedence."""
+        merged_section = self_section.clone()
+
+        # Update section metadata from other (other takes precedence)
+        # Only override if explicitly provided in other AND has a value
+        for attr in ("title", "description", "toggle"):
+            other_value = getattr(other_section, attr)
+            if (
+                hasattr(other_section, "_explicit_fields")
+                and attr in other_section._explicit_fields
+                and other_value
+            ):
+                setattr(merged_section, attr, other_value)
+
+        merged_section.required = other_section.required
+        # Respect explicit clears for dependencies (explicit null/empty clears, missing field preserves)
+        if (
+            hasattr(other_section, "_explicit_fields")
+            and "needs" in other_section._explicit_fields
+        ):
+            merged_section.needs = (
+                other_section.needs.copy() if other_section.needs else []
+            )
+
+        # Merge variables
+        for var_name, other_var in other_section.variables.items():
+            if var_name in merged_section.variables:
+                # Variable exists in both - merge with other taking precedence
+                self_var = merged_section.variables[var_name]
+
+                # Build update dict with ONLY explicitly provided fields from other
+                update = {"origin": origin}
+                field_map = {
+                    "type": other_var.type,
+                    "description": other_var.description,
+                    "prompt": other_var.prompt,
+                    "options": other_var.options,
+                    "sensitive": other_var.sensitive,
+                    "extra": other_var.extra,
+                }
+
+                # Add fields that were explicitly provided, even if falsy/empty
+                for field, value in field_map.items():
+                    if field in other_var._explicit_fields:
+                        update[field] = value
+
+                # For boolean flags, only copy if explicitly provided in other
+                # This prevents False defaults from overriding True values
+                for bool_field in ("optional", "autogenerated", "required"):
+                    if bool_field in other_var._explicit_fields:
+                        update[bool_field] = getattr(other_var, bool_field)
+
+                # Special handling for value/default (allow explicit null to clear)
+                if "value" in other_var._explicit_fields:
+                    update["value"] = other_var.value
+                elif "default" in other_var._explicit_fields:
+                    update["value"] = other_var.value
+
+                merged_section.variables[var_name] = self_var.clone(update=update)
+            else:
+                # New variable from other - clone with origin
+                merged_section.variables[var_name] = other_var.clone(
+                    update={"origin": origin}
+                )
+
+        return merged_section
+
+    def filter_to_used(
+        self, used_variables: Set[str], keep_sensitive: bool = True
+    ) -> "VariableCollection":
+        """Filter collection to only variables that are used (or sensitive).
+
+        OPTIMIZED: Works directly on objects without dict conversions for better performance.
+
+        Creates a new VariableCollection containing only the variables in used_variables.
+        Sections with no remaining variables are removed.
+
+        Args:
+            used_variables: Set of variable names that are actually used
+            keep_sensitive: If True, also keep sensitive variables even if not in used set
+
+        Returns:
+            New VariableCollection with filtered variables
+
+        Example:
+            all_vars = VariableCollection(spec)
+            used_vars = all_vars.filter_to_used({'var1', 'var2', 'var3'})
+            # Only var1, var2, var3 (and any sensitive vars) remain
+        """
+        # Create new collection without calling __init__ (optimization)
+        filtered = VariableCollection.__new__(VariableCollection)
+        filtered._sections = {}
+        filtered._variable_map = {}
+
+        # Filter each section
+        for section_key, section in self._sections.items():
+            # Create a new section with same metadata
+            filtered_section = VariableSection(
+                {
+                    "key": section.key,
+                    "title": section.title,
+                    "description": section.description,
+                    "toggle": section.toggle,
+                    "required": section.required,
+                    "needs": section.needs.copy() if section.needs else None,
+                }
+            )
+
+            # Clone only the variables that should be included
+            for var_name, variable in section.variables.items():
+                # Include if used OR if sensitive (and keep_sensitive is True)
+                should_include = var_name in used_variables or (
+                    keep_sensitive and variable.sensitive
+                )
+
+                if should_include:
+                    filtered_section.variables[var_name] = variable.clone()
+
+            # Only add section if it has variables
+            if filtered_section.variables:
+                filtered._sections[section_key] = filtered_section
+                # Add variables to map
+                for var_name, variable in filtered_section.variables.items():
+                    filtered._variable_map[var_name] = variable
+
+        return filtered
+
+    def get_all_variable_names(self) -> Set[str]:
+        """Get set of all variable names across all sections.
+
+        Returns:
+            Set of all variable names
+        """
+        return set(self._variable_map.keys())

文件差異過大導致無法顯示
+ 271 - 201
cli/core/config.py


文件差異過大導致無法顯示
+ 340 - 216
cli/core/display.py


+ 32 - 14
cli/core/exceptions.py

@@ -9,27 +9,31 @@ from typing import Optional, List, Dict
 
 
 class BoilerplatesError(Exception):
 class BoilerplatesError(Exception):
     """Base exception for all boilerplates CLI errors."""
     """Base exception for all boilerplates CLI errors."""
+
     pass
     pass
 
 
 
 
 class ConfigError(BoilerplatesError):
 class ConfigError(BoilerplatesError):
     """Raised when configuration operations fail."""
     """Raised when configuration operations fail."""
+
     pass
     pass
 
 
 
 
 class ConfigValidationError(ConfigError):
 class ConfigValidationError(ConfigError):
     """Raised when configuration validation fails."""
     """Raised when configuration validation fails."""
+
     pass
     pass
 
 
 
 
 class TemplateError(BoilerplatesError):
 class TemplateError(BoilerplatesError):
     """Base exception for template-related errors."""
     """Base exception for template-related errors."""
+
     pass
     pass
 
 
 
 
 class TemplateNotFoundError(TemplateError):
 class TemplateNotFoundError(TemplateError):
     """Raised when a template cannot be found."""
     """Raised when a template cannot be found."""
-    
+
     def __init__(self, template_id: str, module_name: Optional[str] = None):
     def __init__(self, template_id: str, module_name: Optional[str] = None):
         self.template_id = template_id
         self.template_id = template_id
         self.module_name = module_name
         self.module_name = module_name
@@ -41,7 +45,7 @@ class TemplateNotFoundError(TemplateError):
 
 
 class DuplicateTemplateError(TemplateError):
 class DuplicateTemplateError(TemplateError):
     """Raised when duplicate template IDs are found within the same library."""
     """Raised when duplicate template IDs are found within the same library."""
-    
+
     def __init__(self, template_id: str, library_name: str):
     def __init__(self, template_id: str, library_name: str):
         self.template_id = template_id
         self.template_id = template_id
         self.library_name = library_name
         self.library_name = library_name
@@ -53,12 +57,13 @@ class DuplicateTemplateError(TemplateError):
 
 
 class TemplateLoadError(TemplateError):
 class TemplateLoadError(TemplateError):
     """Raised when a template fails to load."""
     """Raised when a template fails to load."""
+
     pass
     pass
 
 
 
 
 class TemplateSyntaxError(TemplateError):
 class TemplateSyntaxError(TemplateError):
     """Raised when a Jinja2 template has syntax errors."""
     """Raised when a Jinja2 template has syntax errors."""
-    
+
     def __init__(self, template_id: str, errors: List[str]):
     def __init__(self, template_id: str, errors: List[str]):
         self.template_id = template_id
         self.template_id = template_id
         self.errors = errors
         self.errors = errors
@@ -68,13 +73,20 @@ class TemplateSyntaxError(TemplateError):
 
 
 class TemplateValidationError(TemplateError):
 class TemplateValidationError(TemplateError):
     """Raised when template validation fails."""
     """Raised when template validation fails."""
+
     pass
     pass
 
 
 
 
 class IncompatibleSchemaVersionError(TemplateError):
 class IncompatibleSchemaVersionError(TemplateError):
     """Raised when a template uses a schema version not supported by the module."""
     """Raised when a template uses a schema version not supported by the module."""
-    
-    def __init__(self, template_id: str, template_schema: str, module_schema: str, module_name: str):
+
+    def __init__(
+        self,
+        template_id: str,
+        template_schema: str,
+        module_schema: str,
+        module_name: str,
+    ):
         self.template_id = template_id
         self.template_id = template_id
         self.template_schema = template_schema
         self.template_schema = template_schema
         self.module_schema = module_schema
         self.module_schema = module_schema
@@ -91,7 +103,7 @@ class IncompatibleSchemaVersionError(TemplateError):
 
 
 class TemplateRenderError(TemplateError):
 class TemplateRenderError(TemplateError):
     """Raised when template rendering fails."""
     """Raised when template rendering fails."""
-    
+
     def __init__(
     def __init__(
         self,
         self,
         message: str,
         message: str,
@@ -101,7 +113,7 @@ class TemplateRenderError(TemplateError):
         context_lines: Optional[List[str]] = None,
         context_lines: Optional[List[str]] = None,
         variable_context: Optional[Dict[str, str]] = None,
         variable_context: Optional[Dict[str, str]] = None,
         suggestions: Optional[List[str]] = None,
         suggestions: Optional[List[str]] = None,
-        original_error: Optional[Exception] = None
+        original_error: Optional[Exception] = None,
     ):
     ):
         self.file_path = file_path
         self.file_path = file_path
         self.line_number = line_number
         self.line_number = line_number
@@ -110,10 +122,10 @@ class TemplateRenderError(TemplateError):
         self.variable_context = variable_context or {}
         self.variable_context = variable_context or {}
         self.suggestions = suggestions or []
         self.suggestions = suggestions or []
         self.original_error = original_error
         self.original_error = original_error
-        
+
         # Build enhanced error message
         # Build enhanced error message
         parts = [message]
         parts = [message]
-        
+
         if file_path:
         if file_path:
             location = f"File: {file_path}"
             location = f"File: {file_path}"
             if line_number:
             if line_number:
@@ -121,18 +133,19 @@ class TemplateRenderError(TemplateError):
                 if column:
                 if column:
                     location += f", Column: {column}"
                     location += f", Column: {column}"
             parts.append(location)
             parts.append(location)
-        
+
         super().__init__("\n".join(parts))
         super().__init__("\n".join(parts))
 
 
 
 
 class VariableError(BoilerplatesError):
 class VariableError(BoilerplatesError):
     """Base exception for variable-related errors."""
     """Base exception for variable-related errors."""
+
     pass
     pass
 
 
 
 
 class VariableValidationError(VariableError):
 class VariableValidationError(VariableError):
     """Raised when variable validation fails."""
     """Raised when variable validation fails."""
-    
+
     def __init__(self, variable_name: str, message: str):
     def __init__(self, variable_name: str, message: str):
         self.variable_name = variable_name
         self.variable_name = variable_name
         msg = f"Validation error for variable '{variable_name}': {message}"
         msg = f"Validation error for variable '{variable_name}': {message}"
@@ -141,7 +154,7 @@ class VariableValidationError(VariableError):
 
 
 class VariableTypeError(VariableError):
 class VariableTypeError(VariableError):
     """Raised when a variable has an incorrect type."""
     """Raised when a variable has an incorrect type."""
-    
+
     def __init__(self, variable_name: str, expected_type: str, actual_type: str):
     def __init__(self, variable_name: str, expected_type: str, actual_type: str):
         self.variable_name = variable_name
         self.variable_name = variable_name
         self.expected_type = expected_type
         self.expected_type = expected_type
@@ -152,17 +165,19 @@ class VariableTypeError(VariableError):
 
 
 class LibraryError(BoilerplatesError):
 class LibraryError(BoilerplatesError):
     """Raised when library operations fail."""
     """Raised when library operations fail."""
+
     pass
     pass
 
 
 
 
 class ModuleError(BoilerplatesError):
 class ModuleError(BoilerplatesError):
     """Raised when module operations fail."""
     """Raised when module operations fail."""
+
     pass
     pass
 
 
 
 
 class ModuleNotFoundError(ModuleError):
 class ModuleNotFoundError(ModuleError):
     """Raised when a module cannot be found."""
     """Raised when a module cannot be found."""
-    
+
     def __init__(self, module_name: str):
     def __init__(self, module_name: str):
         self.module_name = module_name
         self.module_name = module_name
         msg = f"Module '{module_name}' not found"
         msg = f"Module '{module_name}' not found"
@@ -171,22 +186,25 @@ class ModuleNotFoundError(ModuleError):
 
 
 class ModuleLoadError(ModuleError):
 class ModuleLoadError(ModuleError):
     """Raised when a module fails to load."""
     """Raised when a module fails to load."""
+
     pass
     pass
 
 
 
 
 class FileOperationError(BoilerplatesError):
 class FileOperationError(BoilerplatesError):
     """Raised when file operations fail."""
     """Raised when file operations fail."""
+
     pass
     pass
 
 
 
 
 class RenderError(BoilerplatesError):
 class RenderError(BoilerplatesError):
     """Raised when rendering operations fail."""
     """Raised when rendering operations fail."""
+
     pass
     pass
 
 
 
 
 class YAMLParseError(BoilerplatesError):
 class YAMLParseError(BoilerplatesError):
     """Raised when YAML parsing fails."""
     """Raised when YAML parsing fails."""
-    
+
     def __init__(self, file_path: str, original_error: Exception):
     def __init__(self, file_path: str, original_error: Exception):
         self.file_path = file_path
         self.file_path = file_path
         self.original_error = original_error
         self.original_error = original_error

+ 362 - 307
cli/core/library.py

@@ -5,321 +5,376 @@ import logging
 from typing import Optional
 from typing import Optional
 import yaml
 import yaml
 
 
-from .exceptions import LibraryError, TemplateNotFoundError, YAMLParseError, DuplicateTemplateError
+from .exceptions import LibraryError, TemplateNotFoundError, DuplicateTemplateError
 
 
 logger = logging.getLogger(__name__)
 logger = logging.getLogger(__name__)
 
 
 
 
 class Library:
 class Library:
-  """Represents a single library with a specific path."""
-  
-  def __init__(self, name: str, path: Path, priority: int = 0, library_type: str = "git") -> None:
-    """Initialize a library instance.
-    
-    Args:
-      name: Display name for the library
-      path: Path to the library directory
-      priority: Priority for library lookup (higher = checked first)
-      library_type: Type of library ("git" or "static")
-    """
-    if library_type not in ("git", "static"):
-      raise ValueError(f"Invalid library type: {library_type}. Must be 'git' or 'static'.")
-    
-    self.name = name
-    self.path = path
-    self.priority = priority  # Higher priority = checked first
-    self.library_type = library_type
-  
-  def _is_template_draft(self, template_path: Path) -> bool:
-    """Check if a template is marked as draft."""
-    # Find the template file
-    for filename in ("template.yaml", "template.yml"):
-      template_file = template_path / filename
-      if template_file.exists():
-        break
-    else:
-      return False
-    
-    try:
-      with open(template_file, "r", encoding="utf-8") as f:
-        docs = [doc for doc in yaml.safe_load_all(f) if doc]
-        return docs[0].get("metadata", {}).get("draft", False) if docs else False
-    except (yaml.YAMLError, IOError, OSError) as e:
-      logger.warning(f"Error checking draft status for {template_path}: {e}")
-      return False
-
-  def find_by_id(self, module_name: str, template_id: str) -> tuple[Path, str]:
-    """Find a template by its ID in this library.
-    
-    Args:
-        module_name: The module name (e.g., 'compose', 'terraform')
-        template_id: The template ID to find
-    
-    Returns:
-        Path to the template directory if found
-        
-    Raises:
-        FileNotFoundError: If the template ID is not found in this library or is marked as draft
-    """
-    logger.debug(f"Looking for template '{template_id}' in module '{module_name}' in library '{self.name}'")
-    
-    # Build the path to the specific template directory
-    template_path = self.path / module_name / template_id
-    
-    # Check if template directory exists with a template file
-    has_template = template_path.is_dir() and any(
-      (template_path / f).exists() for f in ("template.yaml", "template.yml")
-    )
-    
-    if not has_template or self._is_template_draft(template_path):
-      raise TemplateNotFoundError(template_id, module_name)
-    
-    logger.debug(f"Found template '{template_id}' at: {template_path}")
-    return template_path, self.name
-
-
-  def find(self, module_name: str, sort_results: bool = False) -> list[tuple[Path, str]]:
-    """Find templates in this library for a specific module.
-    
-    Excludes templates marked as draft.
-    
-    Args:
-        module_name: The module name (e.g., 'compose', 'terraform')
-        sort_results: Whether to return results sorted alphabetically
-    
-    Returns:
-        List of Path objects representing template directories (excluding drafts)
-        
-    Raises:
-        FileNotFoundError: If the module directory is not found in this library
-    """
-    logger.debug(f"Looking for templates in module '{module_name}' in library '{self.name}'")
-    
-    # Build the path to the module directory
-    module_path = self.path / module_name
-    
-    # Check if the module directory exists
-    if not module_path.is_dir():
-      raise LibraryError(f"Module '{module_name}' not found in library '{self.name}'")
-    
-    # Track seen IDs to detect duplicates within this library
-    seen_ids = {}
-    template_dirs = []
-    try:
-      for item in module_path.iterdir():
-        has_template = item.is_dir() and any((item / f).exists() for f in ("template.yaml", "template.yml"))
-        if has_template and not self._is_template_draft(item):
-          template_id = item.name
-          
-          # Check for duplicate within same library
-          if template_id in seen_ids:
-            raise DuplicateTemplateError(template_id, self.name)
-          
-          seen_ids[template_id] = True
-          template_dirs.append((item, self.name))
-        elif has_template:
-          logger.debug(f"Skipping draft template: {item.name}")
-    except PermissionError as e:
-      raise LibraryError(f"Permission denied accessing module '{module_name}' in library '{self.name}': {e}")
-    
-    # Sort if requested
-    if sort_results:
-      template_dirs.sort(key=lambda x: x[0].name.lower())
-    
-    logger.debug(f"Found {len(template_dirs)} templates in module '{module_name}'")
-    return template_dirs
+    """Represents a single library with a specific path."""
 
 
-class LibraryManager:
-  """Manages multiple libraries and provides methods to find templates."""
-  
-  def __init__(self) -> None:
-    """Initialize LibraryManager with git-based libraries from config."""
-    from .config import ConfigManager
-    
-    self.config = ConfigManager()
-    self.libraries = self._load_libraries_from_config()
-  
-  def _load_libraries_from_config(self) -> list[Library]:
-    """Load libraries from configuration.
-    
-    Returns:
-        List of Library instances
-    """
-    libraries = []
-    libraries_path = self.config.get_libraries_path()
-    
-    # Get library configurations from config
-    library_configs = self.config.get_libraries()
-    
-    for i, lib_config in enumerate(library_configs):
-      # Skip disabled libraries
-      if not lib_config.get("enabled", True):
-        logger.debug(f"Skipping disabled library: {lib_config.get('name')}")
-        continue
-      
-      name = lib_config.get("name")
-      lib_type = lib_config.get("type", "git")  # Default to "git" for backward compat
-      
-      # Handle library type-specific path resolution
-      if lib_type == "git":
-        # Existing git logic
-        directory = lib_config.get("directory", ".")
-        
-        # Build path to library: ~/.config/boilerplates/libraries/{name}/{directory}/
-        # For sparse-checkout, files remain in the specified directory
-        library_base = libraries_path / name
-        if directory and directory != ".":
-          library_path = library_base / directory
-        else:
-          library_path = library_base
-      
-      elif lib_type == "static":
-        # New static logic - use path directly
-        path_str = lib_config.get("path")
-        if not path_str:
-          logger.warning(f"Static library '{name}' has no path configured")
-          continue
-        
-        # Expand ~ and resolve relative paths
-        library_path = Path(path_str).expanduser()
-        if not library_path.is_absolute():
-          # Resolve relative to config directory
-          library_path = (self.config.config_path.parent / library_path).resolve()
-      
-      else:
-        logger.warning(f"Unknown library type '{lib_type}' for library '{name}'")
-        continue
-      
-      # Check if library path exists
-      if not library_path.exists():
-        if lib_type == "git":
-          logger.warning(
-            f"Library '{name}' not found at {library_path}. "
-            f"Run 'repo update' to sync libraries."
-          )
+    def __init__(
+        self, name: str, path: Path, priority: int = 0, library_type: str = "git"
+    ) -> None:
+        """Initialize a library instance.
+
+        Args:
+          name: Display name for the library
+          path: Path to the library directory
+          priority: Priority for library lookup (higher = checked first)
+          library_type: Type of library ("git" or "static")
+        """
+        if library_type not in ("git", "static"):
+            raise ValueError(
+                f"Invalid library type: {library_type}. Must be 'git' or 'static'."
+            )
+
+        self.name = name
+        self.path = path
+        self.priority = priority  # Higher priority = checked first
+        self.library_type = library_type
+
+    def _is_template_draft(self, template_path: Path) -> bool:
+        """Check if a template is marked as draft."""
+        # Find the template file
+        for filename in ("template.yaml", "template.yml"):
+            template_file = template_path / filename
+            if template_file.exists():
+                break
         else:
         else:
-          logger.warning(f"Static library '{name}' not found at {library_path}")
-        continue
-      
-      # Create Library instance with type and priority based on order (first = highest priority)
-      priority = len(library_configs) - i
-      libraries.append(
-        Library(name=name, path=library_path, priority=priority, library_type=lib_type)
-      )
-      logger.debug(f"Loaded {lib_type} library '{name}' from {library_path} with priority {priority}")
-    
-    if not libraries:
-      logger.warning("No libraries loaded. Run 'repo update' to sync libraries.")
-    
-    return libraries
-
-  def find_by_id(self, module_name: str, template_id: str) -> Optional[tuple[Path, str]]:
-    """Find a template by its ID across all libraries.
-    
-    Supports both simple IDs and qualified IDs (template.library format).
-    
-    Args:
-        module_name: The module name (e.g., 'compose', 'terraform')
-        template_id: The template ID to find (simple or qualified)
-    
-    Returns:
-        Tuple of (template_path, library_name) if found, None otherwise
-    """
-    logger.debug(f"Searching for template '{template_id}' in module '{module_name}' across all libraries")
-    
-    # Check if this is a qualified ID (contains '.')
-    if '.' in template_id:
-      parts = template_id.rsplit('.', 1)
-      if len(parts) == 2:
-        base_id, requested_lib = parts
-        logger.debug(f"Parsing qualified ID: base='{base_id}', library='{requested_lib}'")
-        
-        # Try to find in the specific library
-        for library in self.libraries:
-          if library.name == requested_lib:
+            return False
+
+        try:
+            with open(template_file, "r", encoding="utf-8") as f:
+                docs = [doc for doc in yaml.safe_load_all(f) if doc]
+                return (
+                    docs[0].get("metadata", {}).get("draft", False) if docs else False
+                )
+        except (yaml.YAMLError, IOError, OSError) as e:
+            logger.warning(f"Error checking draft status for {template_path}: {e}")
+            return False
+
+    def find_by_id(self, module_name: str, template_id: str) -> tuple[Path, str]:
+        """Find a template by its ID in this library.
+
+        Args:
+            module_name: The module name (e.g., 'compose', 'terraform')
+            template_id: The template ID to find
+
+        Returns:
+            Path to the template directory if found
+
+        Raises:
+            FileNotFoundError: If the template ID is not found in this library or is marked as draft
+        """
+        logger.debug(
+            f"Looking for template '{template_id}' in module '{module_name}' in library '{self.name}'"
+        )
+
+        # Build the path to the specific template directory
+        template_path = self.path / module_name / template_id
+
+        # Check if template directory exists with a template file
+        has_template = template_path.is_dir() and any(
+            (template_path / f).exists() for f in ("template.yaml", "template.yml")
+        )
+
+        if not has_template or self._is_template_draft(template_path):
+            raise TemplateNotFoundError(template_id, module_name)
+
+        logger.debug(f"Found template '{template_id}' at: {template_path}")
+        return template_path, self.name
+
+    def find(
+        self, module_name: str, sort_results: bool = False
+    ) -> list[tuple[Path, str]]:
+        """Find templates in this library for a specific module.
+
+        Excludes templates marked as draft.
+
+        Args:
+            module_name: The module name (e.g., 'compose', 'terraform')
+            sort_results: Whether to return results sorted alphabetically
+
+        Returns:
+            List of Path objects representing template directories (excluding drafts)
+
+        Raises:
+            FileNotFoundError: If the module directory is not found in this library
+        """
+        logger.debug(
+            f"Looking for templates in module '{module_name}' in library '{self.name}'"
+        )
+
+        # Build the path to the module directory
+        module_path = self.path / module_name
+
+        # Check if the module directory exists
+        if not module_path.is_dir():
+            raise LibraryError(
+                f"Module '{module_name}' not found in library '{self.name}'"
+            )
+
+        # Track seen IDs to detect duplicates within this library
+        seen_ids = {}
+        template_dirs = []
+        try:
+            for item in module_path.iterdir():
+                has_template = item.is_dir() and any(
+                    (item / f).exists() for f in ("template.yaml", "template.yml")
+                )
+                if has_template and not self._is_template_draft(item):
+                    template_id = item.name
+
+                    # Check for duplicate within same library
+                    if template_id in seen_ids:
+                        raise DuplicateTemplateError(template_id, self.name)
+
+                    seen_ids[template_id] = True
+                    template_dirs.append((item, self.name))
+                elif has_template:
+                    logger.debug(f"Skipping draft template: {item.name}")
+        except PermissionError as e:
+            raise LibraryError(
+                f"Permission denied accessing module '{module_name}' in library '{self.name}': {e}"
+            )
+
+        # Sort if requested
+        if sort_results:
+            template_dirs.sort(key=lambda x: x[0].name.lower())
+
+        logger.debug(f"Found {len(template_dirs)} templates in module '{module_name}'")
+        return template_dirs
+
+
+class LibraryManager:
+    """Manages multiple libraries and provides methods to find templates."""
+
+    def __init__(self) -> None:
+        """Initialize LibraryManager with git-based libraries from config."""
+        from .config import ConfigManager
+
+        self.config = ConfigManager()
+        self.libraries = self._load_libraries_from_config()
+
+    def _load_libraries_from_config(self) -> list[Library]:
+        """Load libraries from configuration.
+
+        Returns:
+            List of Library instances
+        """
+        libraries = []
+        libraries_path = self.config.get_libraries_path()
+
+        # Get library configurations from config
+        library_configs = self.config.get_libraries()
+
+        for i, lib_config in enumerate(library_configs):
+            # Skip disabled libraries
+            if not lib_config.get("enabled", True):
+                logger.debug(f"Skipping disabled library: {lib_config.get('name')}")
+                continue
+
+            name = lib_config.get("name")
+            lib_type = lib_config.get(
+                "type", "git"
+            )  # Default to "git" for backward compat
+
+            # Handle library type-specific path resolution
+            if lib_type == "git":
+                # Existing git logic
+                directory = lib_config.get("directory", ".")
+
+                # Build path to library: ~/.config/boilerplates/libraries/{name}/{directory}/
+                # For sparse-checkout, files remain in the specified directory
+                library_base = libraries_path / name
+                if directory and directory != ".":
+                    library_path = library_base / directory
+                else:
+                    library_path = library_base
+
+            elif lib_type == "static":
+                # New static logic - use path directly
+                path_str = lib_config.get("path")
+                if not path_str:
+                    logger.warning(f"Static library '{name}' has no path configured")
+                    continue
+
+                # Expand ~ and resolve relative paths
+                library_path = Path(path_str).expanduser()
+                if not library_path.is_absolute():
+                    # Resolve relative to config directory
+                    library_path = (
+                        self.config.config_path.parent / library_path
+                    ).resolve()
+
+            else:
+                logger.warning(
+                    f"Unknown library type '{lib_type}' for library '{name}'"
+                )
+                continue
+
+            # Check if library path exists
+            if not library_path.exists():
+                if lib_type == "git":
+                    logger.warning(
+                        f"Library '{name}' not found at {library_path}. "
+                        f"Run 'repo update' to sync libraries."
+                    )
+                else:
+                    logger.warning(
+                        f"Static library '{name}' not found at {library_path}"
+                    )
+                continue
+
+            # Create Library instance with type and priority based on order (first = highest priority)
+            priority = len(library_configs) - i
+            libraries.append(
+                Library(
+                    name=name,
+                    path=library_path,
+                    priority=priority,
+                    library_type=lib_type,
+                )
+            )
+            logger.debug(
+                f"Loaded {lib_type} library '{name}' from {library_path} with priority {priority}"
+            )
+
+        if not libraries:
+            logger.warning("No libraries loaded. Run 'repo update' to sync libraries.")
+
+        return libraries
+
+    def find_by_id(
+        self, module_name: str, template_id: str
+    ) -> Optional[tuple[Path, str]]:
+        """Find a template by its ID across all libraries.
+
+        Supports both simple IDs and qualified IDs (template.library format).
+
+        Args:
+            module_name: The module name (e.g., 'compose', 'terraform')
+            template_id: The template ID to find (simple or qualified)
+
+        Returns:
+            Tuple of (template_path, library_name) if found, None otherwise
+        """
+        logger.debug(
+            f"Searching for template '{template_id}' in module '{module_name}' across all libraries"
+        )
+
+        # Check if this is a qualified ID (contains '.')
+        if "." in template_id:
+            parts = template_id.rsplit(".", 1)
+            if len(parts) == 2:
+                base_id, requested_lib = parts
+                logger.debug(
+                    f"Parsing qualified ID: base='{base_id}', library='{requested_lib}'"
+                )
+
+                # Try to find in the specific library
+                for library in self.libraries:
+                    if library.name == requested_lib:
+                        try:
+                            template_path, lib_name = library.find_by_id(
+                                module_name, base_id
+                            )
+                            logger.debug(
+                                f"Found template '{base_id}' in library '{requested_lib}'"
+                            )
+                            return template_path, lib_name
+                        except TemplateNotFoundError:
+                            logger.debug(
+                                f"Template '{base_id}' not found in library '{requested_lib}'"
+                            )
+                            return None
+
+                logger.debug(f"Library '{requested_lib}' not found")
+                return None
+
+        # Simple ID - search by priority
+        for library in sorted(self.libraries, key=lambda x: x.priority, reverse=True):
             try:
             try:
-              template_path, lib_name = library.find_by_id(module_name, base_id)
-              logger.debug(f"Found template '{base_id}' in library '{requested_lib}'")
-              return template_path, lib_name
+                template_path, lib_name = library.find_by_id(module_name, template_id)
+                logger.debug(
+                    f"Found template '{template_id}' in library '{library.name}'"
+                )
+                return template_path, lib_name
             except TemplateNotFoundError:
             except TemplateNotFoundError:
-              logger.debug(f"Template '{base_id}' not found in library '{requested_lib}'")
-              return None
-        
-        logger.debug(f"Library '{requested_lib}' not found")
+                # Continue searching in next library
+                continue
+
+        logger.debug(f"Template '{template_id}' not found in any library")
         return None
         return None
-    
-    # Simple ID - search by priority
-    for library in sorted(self.libraries, key=lambda x: x.priority, reverse=True):
-      try:
-        template_path, lib_name = library.find_by_id(module_name, template_id)
-        logger.debug(f"Found template '{template_id}' in library '{library.name}'")
-        return template_path, lib_name
-      except TemplateNotFoundError:
-        # Continue searching in next library
-        continue
-    
-    logger.debug(f"Template '{template_id}' not found in any library")
-    return None
-  
-  def find(self, module_name: str, sort_results: bool = False) -> list[tuple[Path, str, bool]]:
-    """Find templates across all libraries for a specific module.
-    
-    Handles duplicates by qualifying IDs with library names when needed.
-    
-    Args:
-        module_name: The module name (e.g., 'compose', 'terraform')
-        sort_results: Whether to return results sorted alphabetically
-    
-    Returns:
-        List of tuples (template_path, library_name, needs_qualification)
-        where needs_qualification is True if the template ID appears in multiple libraries
-    """
-    logger.debug(f"Searching for templates in module '{module_name}' across all libraries")
-    
-    all_templates = []
-    
-    # Collect templates from all libraries
-    for library in sorted(self.libraries, key=lambda x: x.priority, reverse=True):
-      try:
-        templates = library.find(module_name, sort_results=False)
-        all_templates.extend(templates)
-        logger.debug(f"Found {len(templates)} templates in library '{library.name}'")
-      except (LibraryError, DuplicateTemplateError) as e:
-        # DuplicateTemplateError from library.find() should propagate up
-        if isinstance(e, DuplicateTemplateError):
-          raise
-        logger.debug(f"Module '{module_name}' not found in library '{library.name}'")
-        continue
-    
-    # Track template IDs and their libraries to detect cross-library duplicates
-    id_to_occurrences = {}
-    for template_path, library_name in all_templates:
-      template_id = template_path.name
-      if template_id not in id_to_occurrences:
-        id_to_occurrences[template_id] = []
-      id_to_occurrences[template_id].append((template_path, library_name))
-    
-    # Build result with qualification markers for duplicates
-    result = []
-    for template_id, occurrences in id_to_occurrences.items():
-      if len(occurrences) > 1:
-        # Duplicate across libraries - mark for qualified IDs
-        lib_names = ', '.join(lib for _, lib in occurrences)
-        logger.info(
-          f"Template '{template_id}' found in multiple libraries: {lib_names}. "
-          f"Using qualified IDs."
+
+    def find(
+        self, module_name: str, sort_results: bool = False
+    ) -> list[tuple[Path, str, bool]]:
+        """Find templates across all libraries for a specific module.
+
+        Handles duplicates by qualifying IDs with library names when needed.
+
+        Args:
+            module_name: The module name (e.g., 'compose', 'terraform')
+            sort_results: Whether to return results sorted alphabetically
+
+        Returns:
+            List of tuples (template_path, library_name, needs_qualification)
+            where needs_qualification is True if the template ID appears in multiple libraries
+        """
+        logger.debug(
+            f"Searching for templates in module '{module_name}' across all libraries"
         )
         )
-        for template_path, library_name in occurrences:
-          # Mark that this ID needs qualification
-          result.append((template_path, library_name, True))
-      else:
-        # Unique template - no qualification needed
-        template_path, library_name = occurrences[0]
-        result.append((template_path, library_name, False))
-    
-    # Sort if requested
-    if sort_results:
-      result.sort(key=lambda x: x[0].name.lower())
-    
-    logger.debug(f"Found {len(result)} templates total")
-    return result
+
+        all_templates = []
+
+        # Collect templates from all libraries
+        for library in sorted(self.libraries, key=lambda x: x.priority, reverse=True):
+            try:
+                templates = library.find(module_name, sort_results=False)
+                all_templates.extend(templates)
+                logger.debug(
+                    f"Found {len(templates)} templates in library '{library.name}'"
+                )
+            except (LibraryError, DuplicateTemplateError) as e:
+                # DuplicateTemplateError from library.find() should propagate up
+                if isinstance(e, DuplicateTemplateError):
+                    raise
+                logger.debug(
+                    f"Module '{module_name}' not found in library '{library.name}'"
+                )
+                continue
+
+        # Track template IDs and their libraries to detect cross-library duplicates
+        id_to_occurrences = {}
+        for template_path, library_name in all_templates:
+            template_id = template_path.name
+            if template_id not in id_to_occurrences:
+                id_to_occurrences[template_id] = []
+            id_to_occurrences[template_id].append((template_path, library_name))
+
+        # Build result with qualification markers for duplicates
+        result = []
+        for template_id, occurrences in id_to_occurrences.items():
+            if len(occurrences) > 1:
+                # Duplicate across libraries - mark for qualified IDs
+                lib_names = ", ".join(lib for _, lib in occurrences)
+                logger.info(
+                    f"Template '{template_id}' found in multiple libraries: {lib_names}. "
+                    f"Using qualified IDs."
+                )
+                for template_path, library_name in occurrences:
+                    # Mark that this ID needs qualification
+                    result.append((template_path, library_name, True))
+            else:
+                # Unique template - no qualification needed
+                template_path, library_name = occurrences[0]
+                result.append((template_path, library_name, False))
+
+        # Sort if requested
+        if sort_results:
+            result.sort(key=lambda x: x[0].name.lower())
+
+        logger.debug(f"Found {len(result)} templates total")
+        return result

+ 1263 - 1024
cli/core/module.py

@@ -1,21 +1,20 @@
 from __future__ import annotations
 from __future__ import annotations
 
 
 import logging
 import logging
-import sys
 from abc import ABC
 from abc import ABC
 from pathlib import Path
 from pathlib import Path
-from typing import Any, Optional, List, Dict, Tuple
+from typing import Any, Optional, List, Dict
 
 
 from rich.console import Console
 from rich.console import Console
 from rich.panel import Panel
 from rich.panel import Panel
 from rich.prompt import Confirm
 from rich.prompt import Confirm
-from typer import Argument, Context, Option, Typer, Exit
+from typer import Argument, Option, Typer, Exit
 
 
 from .display import DisplayManager
 from .display import DisplayManager
 from .exceptions import (
 from .exceptions import (
     TemplateRenderError,
     TemplateRenderError,
     TemplateSyntaxError,
     TemplateSyntaxError,
-    TemplateValidationError
+    TemplateValidationError,
 )
 )
 from .library import LibraryManager
 from .library import LibraryManager
 from .prompt import PromptHandler
 from .prompt import PromptHandler
@@ -27,1032 +26,1272 @@ console_err = Console(stderr=True)
 
 
 
 
 def parse_var_inputs(var_options: List[str], extra_args: List[str]) -> Dict[str, Any]:
 def parse_var_inputs(var_options: List[str], extra_args: List[str]) -> Dict[str, Any]:
-  """Parse variable inputs from --var options and extra args.
-  
-  Supports formats:
-    --var KEY=VALUE
-    --var KEY VALUE
-    
-  Args:
-    var_options: List of variable options from CLI
-    extra_args: Additional arguments that may contain values
-    
-  Returns:
-    Dictionary of parsed variables
-  """
-  variables = {}
-  
-  # Parse --var KEY=VALUE format
-  for var_option in var_options:
-    if '=' in var_option:
-      key, value = var_option.split('=', 1)
-      variables[key] = value
-    else:
-      # --var KEY VALUE format - value should be in extra_args
-      if extra_args:
-        variables[var_option] = extra_args.pop(0)
-      else:
-        logger.warning(f"No value provided for variable '{var_option}'")
-  
-  return variables
+    """Parse variable inputs from --var options and extra args.
+
+    Supports formats:
+      --var KEY=VALUE
+      --var KEY VALUE
 
 
-class Module(ABC):
-  """Streamlined base module that auto-detects variables from templates."""
-  
-  # Schema version supported by this module (override in subclasses)
-  schema_version: str = "1.0"
-
-  def __init__(self) -> None:
-    if not all([self.name, self.description]):
-      raise ValueError(
-        f"Module {self.__class__.__name__} must define name and description"
-      )
-    
-    logger.info(f"Initializing module '{self.name}'")
-    logger.debug(f"Module '{self.name}' configuration: description='{self.description}'")
-    self.libraries = LibraryManager()
-    self.display = DisplayManager()
-
-  def list(
-    self,
-    raw: bool = Option(False, "--raw", help="Output raw list format instead of rich table")
-  ) -> list[Template]:
-    """List all templates."""
-    logger.debug(f"Listing templates for module '{self.name}'")
-    templates = []
-
-    entries = self.libraries.find(self.name, sort_results=True)
-    for entry in entries:
-      # Unpack entry - now returns (path, library_name, needs_qualification)
-      template_dir = entry[0]
-      library_name = entry[1]
-      needs_qualification = entry[2] if len(entry) > 2 else False
-      
-      try:
-        # Get library object to determine type
-        library = next((lib for lib in self.libraries.libraries if lib.name == library_name), None)
-        library_type = library.library_type if library else "git"
-        
-        template = Template(template_dir, library_name=library_name, library_type=library_type)
-        
-        # Validate schema version compatibility
-        template._validate_schema_version(self.schema_version, self.name)
-        
-        # If template ID needs qualification, set qualified ID
-        if needs_qualification:
-          template.set_qualified_id()
-        
-        templates.append(template)
-      except Exception as exc:
-        logger.error(f"Failed to load template from {template_dir}: {exc}")
-        continue
-    
-    filtered_templates = templates
-    
-    if filtered_templates:
-      if raw:
-        # Output raw format (tab-separated values for easy filtering with awk/sed/cut)
-        # Format: ID\tNAME\tTAGS\tVERSION\tLIBRARY
-        for template in filtered_templates:
-          name = template.metadata.name or "Unnamed Template"
-          tags_list = template.metadata.tags or []
-          tags = ",".join(tags_list) if tags_list else "-"
-          version = str(template.metadata.version) if template.metadata.version else "-"
-          library = template.metadata.library or "-"
-          print(f"{template.id}\t{name}\t{tags}\t{version}\t{library}")
-      else:
-        # Output rich table format
-        self.display.display_templates_table(
-          filtered_templates,
-          self.name,
-          f"{self.name.capitalize()} templates"
-        )
-    else:
-      logger.info(f"No templates found for module '{self.name}'")
-
-    return filtered_templates
-
-  def search(
-    self,
-    query: str = Argument(..., help="Search string to filter templates by ID")
-  ) -> list[Template]:
-    """Search for templates by ID containing the search string."""
-    logger.debug(f"Searching templates for module '{self.name}' with query='{query}'")
-    templates = []
-
-    entries = self.libraries.find(self.name, sort_results=True)
-    for entry in entries:
-      # Unpack entry - now returns (path, library_name, needs_qualification)
-      template_dir = entry[0]
-      library_name = entry[1]
-      needs_qualification = entry[2] if len(entry) > 2 else False
-      
-      try:
-        # Get library object to determine type
-        library = next((lib for lib in self.libraries.libraries if lib.name == library_name), None)
-        library_type = library.library_type if library else "git"
-        
-        template = Template(template_dir, library_name=library_name, library_type=library_type)
-        
-        # Validate schema version compatibility
-        template._validate_schema_version(self.schema_version, self.name)
-        
-        # If template ID needs qualification, set qualified ID
-        if needs_qualification:
-          template.set_qualified_id()
-        
-        templates.append(template)
-      except Exception as exc:
-        logger.error(f"Failed to load template from {template_dir}: {exc}")
-        continue
-    
-    # Apply search filtering
-    filtered_templates = [t for t in templates if query.lower() in t.id.lower()]
-    
-    if filtered_templates:
-      logger.info(f"Found {len(filtered_templates)} templates matching '{query}' for module '{self.name}'")
-      self.display.display_templates_table(
-        filtered_templates,
-        self.name,
-        f"{self.name.capitalize()} templates matching '{query}'"
-      )
-    else:
-      logger.info(f"No templates found matching '{query}' for module '{self.name}'")
-      self.display.display_warning(f"No templates found matching '{query}'", context=f"module '{self.name}'")
-
-    return filtered_templates
-
-
-  def show(
-    self,
-    id: str,
-    all_vars: bool = Option(False, "--all", help="Show all variables/sections, even those with unsatisfied needs"),
-  ) -> None:
-    """Show template details."""
-    logger.debug(f"Showing template '{id}' from module '{self.name}'")
-    template = self._load_template_by_id(id)
-
-    if not template:
-      self.display.display_error(f"Template '{id}' not found", context=f"module '{self.name}'")
-      return
-    
-    # Apply config defaults (same as in generate)
-    # This ensures the display shows the actual defaults that will be used
-    if template.variables:
-      from .config import ConfigManager
-      config = ConfigManager()
-      config_defaults = config.get_defaults(self.name)
-      
-      if config_defaults:
-        logger.debug(f"Loading config defaults for module '{self.name}'")
-        # Apply config defaults (this respects the variable types and validation)
-        successful = template.variables.apply_defaults(config_defaults, "config")
-        if successful:
-          logger.debug(f"Applied config defaults for: {', '.join(successful)}")
-      
-      # Re-sort sections after applying config (toggle values may have changed)
-      template.variables.sort_sections()
-    
-    self._display_template_details(template, id, show_all=all_vars)
-
-  def _apply_variable_defaults(self, template: Template) -> None:
-    """Apply config defaults and CLI overrides to template variables.
-    
-    Args:
-        template: Template instance with variables to configure
-    """
-    if not template.variables:
-      return
-    
-    from .config import ConfigManager
-    config = ConfigManager()
-    config_defaults = config.get_defaults(self.name)
-    
-    if config_defaults:
-      logger.info(f"Loading config defaults for module '{self.name}'")
-      successful = template.variables.apply_defaults(config_defaults, "config")
-      if successful:
-        logger.debug(f"Applied config defaults for: {', '.join(successful)}")
-
-  def _apply_cli_overrides(self, template: Template, var: Optional[List[str]], ctx=None) -> None:
-    """Apply CLI variable overrides to template.
-    
-    Args:
-        template: Template instance to apply overrides to
-        var: List of variable override strings from --var flags
-        ctx: Context object containing extra args (optional, will get current context if None)
-    """
-    if not template.variables:
-      return
-    
-    # Get context if not provided (compatible with all Typer versions)
-    if ctx is None:
-      import click
-      try:
-        ctx = click.get_current_context()
-      except RuntimeError:
-        ctx = None
-    
-    extra_args = list(ctx.args) if ctx and hasattr(ctx, "args") else []
-    cli_overrides = parse_var_inputs(var or [], extra_args)
-    
-    if cli_overrides:
-      logger.info(f"Received {len(cli_overrides)} variable overrides from CLI")
-      successful_overrides = template.variables.apply_defaults(cli_overrides, "cli")
-      if successful_overrides:
-        logger.debug(f"Applied CLI overrides for: {', '.join(successful_overrides)}")
-
-  def _collect_variable_values(self, template: Template, interactive: bool) -> Dict[str, Any]:
-    """Collect variable values from user prompts and template defaults.
-    
-    Args:
-        template: Template instance with variables
-        interactive: Whether to prompt user for values interactively
-        
-    Returns:
-        Dictionary of variable names to values
-    """
-    variable_values = {}
-    
-    # Collect values interactively if enabled
-    if interactive and template.variables:
-      prompt_handler = PromptHandler()
-      collected_values = prompt_handler.collect_variables(template.variables)
-      if collected_values:
-        variable_values.update(collected_values)
-        logger.info(f"Collected {len(collected_values)} variable values from user input")
-    
-    # Add satisfied variable values (respects dependencies and toggles)
-    if template.variables:
-      variable_values.update(template.variables.get_satisfied_values())
-    
-    return variable_values
-  def _check_output_directory(self, output_dir: Path, rendered_files: Dict[str, str], 
-                              interactive: bool) -> Optional[List[Path]]:
-    """Check output directory for conflicts and get user confirmation if needed.
-    
-    Args:
-        output_dir: Directory where files will be written
-        rendered_files: Dictionary of file paths to rendered content
-        interactive: Whether to prompt user for confirmation
-        
-    Returns:
-        List of existing files that will be overwritten, or None to cancel
-    """
-    dir_exists = output_dir.exists()
-    dir_not_empty = dir_exists and any(output_dir.iterdir())
-    
-    # Check which files already exist
-    existing_files = []
-    if dir_exists:
-      for file_path in rendered_files.keys():
-        full_path = output_dir / file_path
-        if full_path.exists():
-          existing_files.append(full_path)
-    
-    # Warn if directory is not empty
-    if dir_not_empty:
-      if interactive:
-        details = []
-        if existing_files:
-          details.append(f"{len(existing_files)} file(s) will be overwritten.")
-        
-        if not self.display.display_warning_with_confirmation(
-          f"Directory '{output_dir}' is not empty.",
-          details if details else None,
-          default=False
-        ):
-          self.display.display_info("Generation cancelled")
-          return None
-      else:
-        # Non-interactive mode: show warning but continue
-        logger.warning(f"Directory '{output_dir}' is not empty")
-        if existing_files:
-          logger.warning(f"{len(existing_files)} file(s) will be overwritten")
-    
-    return existing_files
-
-  def _get_generation_confirmation(self, output_dir: Path, rendered_files: Dict[str, str], 
-                                    existing_files: Optional[List[Path]], dir_not_empty: bool, 
-                                    dry_run: bool, interactive: bool) -> bool:
-    """Display file generation confirmation and get user approval.
-    
     Args:
     Args:
-        output_dir: Output directory path
-        rendered_files: Dictionary of file paths to content
-        existing_files: List of existing files that will be overwritten
-        dir_not_empty: Whether output directory already contains files
-        dry_run: Whether this is a dry run
-        interactive: Whether to prompt for confirmation
-        
+      var_options: List of variable options from CLI
+      extra_args: Additional arguments that may contain values
+
     Returns:
     Returns:
-        True if user confirms generation, False to cancel
+      Dictionary of parsed variables
     """
     """
-    if not interactive:
-      return True
-    
-    self.display.display_file_generation_confirmation(
-      output_dir, 
-      rendered_files, 
-      existing_files if existing_files else None
-    )
-    
-    # Final confirmation (only if we didn't already ask about overwriting)
-    if not dir_not_empty and not dry_run:
-      if not Confirm.ask("Generate these files?", default=True):
-        self.display.display_info("Generation cancelled")
-        return False
-    
-    return True
-
-  def _execute_dry_run(self, id: str, output_dir: Path, rendered_files: Dict[str, str], show_files: bool) -> None:
-    """Execute dry run mode with comprehensive simulation.
-    
-    Simulates all filesystem operations that would occur during actual generation,
-    including directory creation, file writing, and permission checks.
-    
-    Args:
-        id: Template ID
-        output_dir: Directory where files would be written
-        rendered_files: Dictionary of file paths to rendered content
-        show_files: Whether to display file contents
-    """
-    import os
-    
-    console.print()
-    console.print("[bold cyan]Dry Run Mode - Simulating File Generation[/bold cyan]")
-    console.print()
-    
-    # Simulate directory creation
-    self.display.display_heading("Directory Operations", icon_type="folder")
-    
-    # Check if output directory exists
-    if output_dir.exists():
-      self.display.display_success(f"Output directory exists: [cyan]{output_dir}[/cyan]")
-      # Check if we have write permissions
-      if os.access(output_dir, os.W_OK):
-        self.display.display_success("Write permission verified")
-      else:
-        self.display.display_warning("Write permission may be denied")
-    else:
-      console.print(f"  [dim]→[/dim] Would create output directory: [cyan]{output_dir}[/cyan]")
-      # Check if parent directory exists and is writable
-      parent = output_dir.parent
-      if parent.exists() and os.access(parent, os.W_OK):
-        self.display.display_success("Parent directory writable")
-      else:
-        self.display.display_warning("Parent directory may not be writable")
-    
-    # Collect unique subdirectories that would be created
-    subdirs = set()
-    for file_path in rendered_files.keys():
-      parts = Path(file_path).parts
-      for i in range(1, len(parts)):
-        subdirs.add(Path(*parts[:i]))
-    
-    if subdirs:
-      console.print(f"  [dim]→[/dim] Would create {len(subdirs)} subdirectory(ies)")
-      for subdir in sorted(subdirs):
-        console.print(f"    [dim]📁[/dim] {subdir}/")
-    
-    console.print()
-    
-    # Display file operations in a table
-    self.display.display_heading("File Operations", icon_type="file")
-    
-    total_size = 0
-    new_files = 0
-    overwrite_files = 0
-    file_operations = []
-    
-    for file_path, content in sorted(rendered_files.items()):
-      full_path = output_dir / file_path
-      file_size = len(content.encode('utf-8'))
-      total_size += file_size
-      
-      # Determine status
-      if full_path.exists():
-        status = "Overwrite"
-        overwrite_files += 1
-      else:
-        status = "Create"
-        new_files += 1
-      
-      file_operations.append((file_path, file_size, status))
-    
-    self.display.display_file_operation_table(file_operations)
-    console.print()
-    
-    # Summary statistics
-    if total_size < 1024:
-      size_str = f"{total_size}B"
-    elif total_size < 1024 * 1024:
-      size_str = f"{total_size / 1024:.1f}KB"
-    else:
-      size_str = f"{total_size / (1024 * 1024):.1f}MB"
-    
-    summary_items = {
-      "Total files:": str(len(rendered_files)),
-      "New files:": str(new_files),
-      "Files to overwrite:": str(overwrite_files),
-      "Total size:": size_str
-    }
-    self.display.display_summary_table("Summary", summary_items)
-    console.print()
-    
-    # Show file contents if requested
-    if show_files:
-      console.print("[bold cyan]Generated File Contents:[/bold cyan]")
-      console.print()
-      for file_path, content in sorted(rendered_files.items()):
-        console.print(f"[cyan]File:[/cyan] {file_path}")
-        print(f"{'─'*80}")
-        print(content)
-        print()  # Add blank line after content
-      console.print()
-    
-    self.display.display_success("Dry run complete - no files were written")
-    console.print(f"[dim]Files would have been generated in '{output_dir}'[/dim]")
-    logger.info(f"Dry run completed for template '{id}' - {len(rendered_files)} files, {total_size} bytes")
-
-  def _write_generated_files(self, output_dir: Path, rendered_files: Dict[str, str], quiet: bool = False) -> None:
-    """Write rendered files to the output directory.
-    
-    Args:
-        output_dir: Directory to write files to
-        rendered_files: Dictionary of file paths to rendered content
-        quiet: Suppress output messages
-    """
-    output_dir.mkdir(parents=True, exist_ok=True)
-    
-    for file_path, content in rendered_files.items():
-      full_path = output_dir / file_path
-      full_path.parent.mkdir(parents=True, exist_ok=True)
-      with open(full_path, 'w', encoding='utf-8') as f:
-        f.write(content)
-      if not quiet:
-        console.print(f"[green]Generated file: {file_path}[/green]")  # Keep simple per-file output
-    
-    if not quiet:
-      self.display.display_success(f"Template generated successfully in '{output_dir}'")
-    logger.info(f"Template written to directory: {output_dir}")
-
-  def generate(
-    self,
-    id: str = Argument(..., help="Template ID"),
-    directory: Optional[str] = Argument(None, help="Output directory (defaults to template ID)"),
-    interactive: bool = Option(True, "--interactive/--no-interactive", "-i/-n", help="Enable interactive prompting for variables"),
-    var: Optional[list[str]] = Option(None, "--var", "-v", help="Variable override (repeatable). Supports: KEY=VALUE or KEY VALUE"),
-    dry_run: bool = Option(False, "--dry-run", help="Preview template generation without writing files"),
-    show_files: bool = Option(False, "--show-files", help="Display generated file contents in plain text (use with --dry-run)"),
-    quiet: bool = Option(False, "--quiet", "-q", help="Suppress all non-error output"),
-    all_vars: bool = Option(False, "--all", help="Show all variables/sections, even those with unsatisfied needs"),
-  ) -> None:
-    """Generate from template.
-    
-    Variable precedence chain (lowest to highest):
-    1. Module spec (defined in cli/modules/*.py)
-    2. Template spec (from template.yaml)
-    3. Config defaults (from ~/.config/boilerplates/config.yaml)
-    4. CLI overrides (--var flags)
-    
-    Examples:
-        # Generate to directory named after template
-        cli compose generate traefik
-        
-        # Generate to custom directory
-        cli compose generate traefik my-proxy
-        
-        # Generate with variables
-        cli compose generate traefik --var traefik_enabled=false
-        
-        # Preview without writing files (dry run)
-        cli compose generate traefik --dry-run
-        
-        # Preview and show generated file contents
-        cli compose generate traefik --dry-run --show-files
-    """
-    logger.info(f"Starting generation for template '{id}' from module '{self.name}'")
-    
-    # Create a display manager with quiet mode if needed
-    display = DisplayManager(quiet=quiet) if quiet else self.display
-    
-    template = self._load_template_by_id(id)
-
-    # Apply defaults and overrides
-    self._apply_variable_defaults(template)
-    self._apply_cli_overrides(template, var)
-    
-    # Re-sort sections after all overrides (toggle values may have changed)
-    if template.variables:
-      template.variables.sort_sections()
-
-    if not quiet:
-      self._display_template_details(template, id, show_all=all_vars)
-      console.print()
-
-    # Collect variable values
-    variable_values = self._collect_variable_values(template, interactive)
-
-    try:
-      # Validate and render template
-      if template.variables:
-        template.variables.validate_all()
-      
-      # Check if we're in debug mode (logger level is DEBUG)
-      debug_mode = logger.isEnabledFor(logging.DEBUG)
-      
-      rendered_files, variable_values = template.render(template.variables, debug=debug_mode)
-      
-      if not rendered_files:
-        display.display_error("Template rendering returned no files", context="template generation")
-        raise Exit(code=1)
-      
-      logger.info(f"Successfully rendered template '{id}'")
-      
-      # Determine output directory
-      if directory:
-        output_dir = Path(directory)
-        # Check if path looks like an absolute path but is missing the leading slash
-        # This handles cases like "Users/username/path" which should be "/Users/username/path"
-        if not output_dir.is_absolute() and str(output_dir).startswith(("Users/", "home/", "usr/", "opt/", "var/", "tmp/")):
-          output_dir = Path("/") / output_dir
-          logger.debug(f"Normalized relative-looking absolute path to: {output_dir}")
-      else:
-        output_dir = Path(id)
-      
-      # Check for conflicts and get confirmation (skip in quiet mode)
-      if not quiet:
-        existing_files = self._check_output_directory(output_dir, rendered_files, interactive)
-        if existing_files is None:
-          return  # User cancelled
-        
-        # Get final confirmation for generation
-        dir_not_empty = output_dir.exists() and any(output_dir.iterdir())
-        if not self._get_generation_confirmation(output_dir, rendered_files, existing_files, 
-                                                 dir_not_empty, dry_run, interactive):
-          return  # User cancelled
-      else:
-        # In quiet mode, just check for existing files without prompts
+    variables = {}
+
+    # Parse --var KEY=VALUE format
+    for var_option in var_options:
+        if "=" in var_option:
+            key, value = var_option.split("=", 1)
+            variables[key] = value
+        else:
+            # --var KEY VALUE format - value should be in extra_args
+            if extra_args:
+                variables[var_option] = extra_args.pop(0)
+            else:
+                logger.warning(f"No value provided for variable '{var_option}'")
+
+    return variables
+
+
+class Module(ABC):
+    """Streamlined base module that auto-detects variables from templates."""
+
+    # Schema version supported by this module (override in subclasses)
+    schema_version: str = "1.0"
+
+    def __init__(self) -> None:
+        if not all([self.name, self.description]):
+            raise ValueError(
+                f"Module {self.__class__.__name__} must define name and description"
+            )
+
+        logger.info(f"Initializing module '{self.name}'")
+        logger.debug(
+            f"Module '{self.name}' configuration: description='{self.description}'"
+        )
+        self.libraries = LibraryManager()
+        self.display = DisplayManager()
+
+    def list(
+        self,
+        raw: bool = Option(
+            False, "--raw", help="Output raw list format instead of rich table"
+        ),
+    ) -> list[Template]:
+        """List all templates."""
+        logger.debug(f"Listing templates for module '{self.name}'")
+        templates = []
+
+        entries = self.libraries.find(self.name, sort_results=True)
+        for entry in entries:
+            # Unpack entry - now returns (path, library_name, needs_qualification)
+            template_dir = entry[0]
+            library_name = entry[1]
+            needs_qualification = entry[2] if len(entry) > 2 else False
+
+            try:
+                # Get library object to determine type
+                library = next(
+                    (
+                        lib
+                        for lib in self.libraries.libraries
+                        if lib.name == library_name
+                    ),
+                    None,
+                )
+                library_type = library.library_type if library else "git"
+
+                template = Template(
+                    template_dir, library_name=library_name, library_type=library_type
+                )
+
+                # Validate schema version compatibility
+                template._validate_schema_version(self.schema_version, self.name)
+
+                # If template ID needs qualification, set qualified ID
+                if needs_qualification:
+                    template.set_qualified_id()
+
+                templates.append(template)
+            except Exception as exc:
+                logger.error(f"Failed to load template from {template_dir}: {exc}")
+                continue
+
+        filtered_templates = templates
+
+        if filtered_templates:
+            if raw:
+                # Output raw format (tab-separated values for easy filtering with awk/sed/cut)
+                # Format: ID\tNAME\tTAGS\tVERSION\tLIBRARY
+                for template in filtered_templates:
+                    name = template.metadata.name or "Unnamed Template"
+                    tags_list = template.metadata.tags or []
+                    tags = ",".join(tags_list) if tags_list else "-"
+                    version = (
+                        str(template.metadata.version)
+                        if template.metadata.version
+                        else "-"
+                    )
+                    library = template.metadata.library or "-"
+                    print(f"{template.id}\t{name}\t{tags}\t{version}\t{library}")
+            else:
+                # Output rich table format
+                self.display.display_templates_table(
+                    filtered_templates, self.name, f"{self.name.capitalize()} templates"
+                )
+        else:
+            logger.info(f"No templates found for module '{self.name}'")
+
+        return filtered_templates
+
+    def search(
+        self, query: str = Argument(..., help="Search string to filter templates by ID")
+    ) -> list[Template]:
+        """Search for templates by ID containing the search string."""
+        logger.debug(
+            f"Searching templates for module '{self.name}' with query='{query}'"
+        )
+        templates = []
+
+        entries = self.libraries.find(self.name, sort_results=True)
+        for entry in entries:
+            # Unpack entry - now returns (path, library_name, needs_qualification)
+            template_dir = entry[0]
+            library_name = entry[1]
+            needs_qualification = entry[2] if len(entry) > 2 else False
+
+            try:
+                # Get library object to determine type
+                library = next(
+                    (
+                        lib
+                        for lib in self.libraries.libraries
+                        if lib.name == library_name
+                    ),
+                    None,
+                )
+                library_type = library.library_type if library else "git"
+
+                template = Template(
+                    template_dir, library_name=library_name, library_type=library_type
+                )
+
+                # Validate schema version compatibility
+                template._validate_schema_version(self.schema_version, self.name)
+
+                # If template ID needs qualification, set qualified ID
+                if needs_qualification:
+                    template.set_qualified_id()
+
+                templates.append(template)
+            except Exception as exc:
+                logger.error(f"Failed to load template from {template_dir}: {exc}")
+                continue
+
+        # Apply search filtering
+        filtered_templates = [t for t in templates if query.lower() in t.id.lower()]
+
+        if filtered_templates:
+            logger.info(
+                f"Found {len(filtered_templates)} templates matching '{query}' for module '{self.name}'"
+            )
+            self.display.display_templates_table(
+                filtered_templates,
+                self.name,
+                f"{self.name.capitalize()} templates matching '{query}'",
+            )
+        else:
+            logger.info(
+                f"No templates found matching '{query}' for module '{self.name}'"
+            )
+            self.display.display_warning(
+                f"No templates found matching '{query}'",
+                context=f"module '{self.name}'",
+            )
+
+        return filtered_templates
+
+    def show(
+        self,
+        id: str,
+        all_vars: bool = Option(
+            False,
+            "--all",
+            help="Show all variables/sections, even those with unsatisfied needs",
+        ),
+    ) -> None:
+        """Show template details."""
+        logger.debug(f"Showing template '{id}' from module '{self.name}'")
+        template = self._load_template_by_id(id)
+
+        if not template:
+            self.display.display_error(
+                f"Template '{id}' not found", context=f"module '{self.name}'"
+            )
+            return
+
+        # Apply config defaults (same as in generate)
+        # This ensures the display shows the actual defaults that will be used
+        if template.variables:
+            from .config import ConfigManager
+
+            config = ConfigManager()
+            config_defaults = config.get_defaults(self.name)
+
+            if config_defaults:
+                logger.debug(f"Loading config defaults for module '{self.name}'")
+                # Apply config defaults (this respects the variable types and validation)
+                successful = template.variables.apply_defaults(
+                    config_defaults, "config"
+                )
+                if successful:
+                    logger.debug(
+                        f"Applied config defaults for: {', '.join(successful)}"
+                    )
+
+            # Re-sort sections after applying config (toggle values may have changed)
+            template.variables.sort_sections()
+
+        self._display_template_details(template, id, show_all=all_vars)
+
+    def _apply_variable_defaults(self, template: Template) -> None:
+        """Apply config defaults and CLI overrides to template variables.
+
+        Args:
+            template: Template instance with variables to configure
+        """
+        if not template.variables:
+            return
+
+        from .config import ConfigManager
+
+        config = ConfigManager()
+        config_defaults = config.get_defaults(self.name)
+
+        if config_defaults:
+            logger.info(f"Loading config defaults for module '{self.name}'")
+            successful = template.variables.apply_defaults(config_defaults, "config")
+            if successful:
+                logger.debug(f"Applied config defaults for: {', '.join(successful)}")
+
+    def _apply_cli_overrides(
+        self, template: Template, var: Optional[List[str]], ctx=None
+    ) -> None:
+        """Apply CLI variable overrides to template.
+
+        Args:
+            template: Template instance to apply overrides to
+            var: List of variable override strings from --var flags
+            ctx: Context object containing extra args (optional, will get current context if None)
+        """
+        if not template.variables:
+            return
+
+        # Get context if not provided (compatible with all Typer versions)
+        if ctx is None:
+            import click
+
+            try:
+                ctx = click.get_current_context()
+            except RuntimeError:
+                ctx = None
+
+        extra_args = list(ctx.args) if ctx and hasattr(ctx, "args") else []
+        cli_overrides = parse_var_inputs(var or [], extra_args)
+
+        if cli_overrides:
+            logger.info(f"Received {len(cli_overrides)} variable overrides from CLI")
+            successful_overrides = template.variables.apply_defaults(
+                cli_overrides, "cli"
+            )
+            if successful_overrides:
+                logger.debug(
+                    f"Applied CLI overrides for: {', '.join(successful_overrides)}"
+                )
+
+    def _collect_variable_values(
+        self, template: Template, interactive: bool
+    ) -> Dict[str, Any]:
+        """Collect variable values from user prompts and template defaults.
+
+        Args:
+            template: Template instance with variables
+            interactive: Whether to prompt user for values interactively
+
+        Returns:
+            Dictionary of variable names to values
+        """
+        variable_values = {}
+
+        # Collect values interactively if enabled
+        if interactive and template.variables:
+            prompt_handler = PromptHandler()
+            collected_values = prompt_handler.collect_variables(template.variables)
+            if collected_values:
+                variable_values.update(collected_values)
+                logger.info(
+                    f"Collected {len(collected_values)} variable values from user input"
+                )
+
+        # Add satisfied variable values (respects dependencies and toggles)
+        if template.variables:
+            variable_values.update(template.variables.get_satisfied_values())
+
+        return variable_values
+
+    def _check_output_directory(
+        self, output_dir: Path, rendered_files: Dict[str, str], interactive: bool
+    ) -> Optional[List[Path]]:
+        """Check output directory for conflicts and get user confirmation if needed.
+
+        Args:
+            output_dir: Directory where files will be written
+            rendered_files: Dictionary of file paths to rendered content
+            interactive: Whether to prompt user for confirmation
+
+        Returns:
+            List of existing files that will be overwritten, or None to cancel
+        """
+        dir_exists = output_dir.exists()
+        dir_not_empty = dir_exists and any(output_dir.iterdir())
+
+        # Check which files already exist
         existing_files = []
         existing_files = []
-      
-      # Execute generation (dry run or actual)
-      if dry_run:
-        if not quiet:
-          self._execute_dry_run(id, output_dir, rendered_files, show_files)
-      else:
-        self._write_generated_files(output_dir, rendered_files, quiet=quiet)
-      
-      # Display next steps (not in quiet mode)
-      if template.metadata.next_steps and not quiet:
-        display.display_next_steps(template.metadata.next_steps, variable_values)
-
-    except TemplateRenderError as e:
-      # Display enhanced error information for template rendering errors (always show errors)
-      display.display_template_render_error(e, context=f"template '{id}'")
-      raise Exit(code=1)
-    except Exception as e:
-      display.display_error(str(e), context=f"generating template '{id}'")
-      raise Exit(code=1)
-
-  def config_get(
-    self,
-    var_name: Optional[str] = Argument(None, help="Variable name to get (omit to show all defaults)"),
-  ) -> None:
-    """Get default value(s) for this module.
-    
-    Examples:
-        # Get all defaults for module
-        cli compose defaults get
-        
-        # Get specific variable default
-        cli compose defaults get service_name
-    """
-    from .config import ConfigManager
-    config = ConfigManager()
-    
-    if var_name:
-      # Get specific variable default
-      value = config.get_default_value(self.name, var_name)
-      if value is not None:
-        console.print(f"[green]{var_name}[/green] = [yellow]{value}[/yellow]")
-      else:
-        self.display.display_warning(f"No default set for variable '{var_name}'", context=f"module '{self.name}'")
-    else:
-      # Show all defaults (flat list)
-      defaults = config.get_defaults(self.name)
-      if defaults:
-        console.print(f"[bold]Config defaults for module '{self.name}':[/bold]\n")
-        for var_name, var_value in defaults.items():
-          console.print(f"  [green]{var_name}[/green] = [yellow]{var_value}[/yellow]")
-      else:
-        console.print(f"[yellow]No defaults configured for module '{self.name}'[/yellow]")
-
-  def config_set(
-    self,
-    var_name: str = Argument(..., help="Variable name or var=value format"),
-    value: Optional[str] = Argument(None, help="Default value (not needed if using var=value format)"),
-  ) -> None:
-    """Set a default value for a variable.
-    
-    This only sets the DEFAULT VALUE, not the variable spec.
-    The variable must be defined in the module or template spec.
-    
-    Supports both formats:
-      - var_name value
-      - var_name=value
-    
-    Examples:
-        # Set default value (format 1)
-        cli compose defaults set service_name my-awesome-app
-        
-        # Set default value (format 2)
-        cli compose defaults set service_name=my-awesome-app
-        
-        # Set author for all compose templates
-        cli compose defaults set author "Christian Lempa"
-    """
-    from .config import ConfigManager
-    config = ConfigManager()
-    
-    # Parse var_name and value - support both "var value" and "var=value" formats
-    if '=' in var_name and value is None:
-      # Format: var_name=value
-      parts = var_name.split('=', 1)
-      actual_var_name = parts[0]
-      actual_value = parts[1]
-    elif value is not None:
-      # Format: var_name value
-      actual_var_name = var_name
-      actual_value = value
-    else:
-      self.display.display_error(f"Missing value for variable '{var_name}'", context="config set")
-      console.print(f"[dim]Usage: defaults set VAR_NAME VALUE or defaults set VAR_NAME=VALUE[/dim]")
-      raise Exit(code=1)
-    
-    # Set the default value
-    config.set_default_value(self.name, actual_var_name, actual_value)
-    self.display.display_success(f"Set default: [cyan]{actual_var_name}[/cyan] = [yellow]{actual_value}[/yellow]")
-    console.print(f"\n[dim]This will be used as the default value when generating templates with this module.[/dim]")
-
-  def config_remove(
-    self,
-    var_name: str = Argument(..., help="Variable name to remove"),
-  ) -> None:
-    """Remove a specific default variable value.
-    
-    Examples:
-        # Remove a default value
-        cli compose defaults rm service_name
-    """
-    from .config import ConfigManager
-    config = ConfigManager()
-    defaults = config.get_defaults(self.name)
-    
-    if not defaults:
-      console.print(f"[yellow]No defaults configured for module '{self.name}'[/yellow]")
-      return
-    
-    if var_name in defaults:
-      del defaults[var_name]
-      config.set_defaults(self.name, defaults)
-      self.display.display_success(f"Removed default for '{var_name}'")
-    else:
-      self.display.display_error(f"No default found for variable '{var_name}'")
-
-  def config_clear(
-    self,
-    var_name: Optional[str] = Argument(None, help="Variable name to clear (omit to clear all defaults)"),
-    force: bool = Option(False, "--force", "-f", help="Skip confirmation prompt"),
-  ) -> None:
-    """Clear default value(s) for this module.
-    
-    Examples:
-        # Clear specific variable default
-        cli compose defaults clear service_name
-        
-        # Clear all defaults for module
-        cli compose defaults clear --force
-    """
-    from .config import ConfigManager
-    config = ConfigManager()
-    defaults = config.get_defaults(self.name)
-    
-    if not defaults:
-      console.print(f"[yellow]No defaults configured for module '{self.name}'[/yellow]")
-      return
-    
-    if var_name:
-      # Clear specific variable
-      if var_name in defaults:
-        del defaults[var_name]
-        config.set_defaults(self.name, defaults)
-        self.display.display_success(f"Cleared default for '{var_name}'")
-      else:
-        self.display.display_error(f"No default found for variable '{var_name}'")
-    else:
-      # Clear all defaults
-      if not force:
-        detail_lines = [f"This will clear ALL defaults for module '{self.name}':", ""]
-        for var_name, var_value in defaults.items():
-          detail_lines.append(f"  [green]{var_name}[/green] = [yellow]{var_value}[/yellow]")
-        
-        self.display.display_warning("Warning: This will clear ALL defaults")
+        if dir_exists:
+            for file_path in rendered_files.keys():
+                full_path = output_dir / file_path
+                if full_path.exists():
+                    existing_files.append(full_path)
+
+        # Warn if directory is not empty
+        if dir_not_empty:
+            if interactive:
+                details = []
+                if existing_files:
+                    details.append(
+                        f"{len(existing_files)} file(s) will be overwritten."
+                    )
+
+                if not self.display.display_warning_with_confirmation(
+                    f"Directory '{output_dir}' is not empty.",
+                    details if details else None,
+                    default=False,
+                ):
+                    self.display.display_info("Generation cancelled")
+                    return None
+            else:
+                # Non-interactive mode: show warning but continue
+                logger.warning(f"Directory '{output_dir}' is not empty")
+                if existing_files:
+                    logger.warning(f"{len(existing_files)} file(s) will be overwritten")
+
+        return existing_files
+
+    def _get_generation_confirmation(
+        self,
+        output_dir: Path,
+        rendered_files: Dict[str, str],
+        existing_files: Optional[List[Path]],
+        dir_not_empty: bool,
+        dry_run: bool,
+        interactive: bool,
+    ) -> bool:
+        """Display file generation confirmation and get user approval.
+
+        Args:
+            output_dir: Output directory path
+            rendered_files: Dictionary of file paths to content
+            existing_files: List of existing files that will be overwritten
+            dir_not_empty: Whether output directory already contains files
+            dry_run: Whether this is a dry run
+            interactive: Whether to prompt for confirmation
+
+        Returns:
+            True if user confirms generation, False to cancel
+        """
+        if not interactive:
+            return True
+
+        self.display.display_file_generation_confirmation(
+            output_dir, rendered_files, existing_files if existing_files else None
+        )
+
+        # Final confirmation (only if we didn't already ask about overwriting)
+        if not dir_not_empty and not dry_run:
+            if not Confirm.ask("Generate these files?", default=True):
+                self.display.display_info("Generation cancelled")
+                return False
+
+        return True
+
+    def _execute_dry_run(
+        self,
+        id: str,
+        output_dir: Path,
+        rendered_files: Dict[str, str],
+        show_files: bool,
+    ) -> None:
+        """Execute dry run mode with comprehensive simulation.
+
+        Simulates all filesystem operations that would occur during actual generation,
+        including directory creation, file writing, and permission checks.
+
+        Args:
+            id: Template ID
+            output_dir: Directory where files would be written
+            rendered_files: Dictionary of file paths to rendered content
+            show_files: Whether to display file contents
+        """
+        import os
+
         console.print()
         console.print()
-        for line in detail_lines:
-          console.print(line)
+        console.print(
+            "[bold cyan]Dry Run Mode - Simulating File Generation[/bold cyan]"
+        )
         console.print()
         console.print()
-        if not Confirm.ask(f"[bold red]Are you sure?[/bold red]", default=False):
-          console.print("[green]Operation cancelled.[/green]")
-          return
-      
-      config.clear_defaults(self.name)
-      self.display.display_success(f"Cleared all defaults for module '{self.name}'")
-
-  def config_list(self) -> None:
-    """Display the defaults for this specific module in YAML format.
-    
-    Examples:
-        # Show the defaults for the current module
-        cli compose defaults list
-    """
-    from .config import ConfigManager
-    import yaml
-    
-    config = ConfigManager()
-    
-    # Get only the defaults for this module
-    defaults = config.get_defaults(self.name)
-    
-    if not defaults:
-      console.print(f"[yellow]No configuration found for module '{self.name}'[/yellow]")
-      console.print(f"\n[dim]Config file location: {config.get_config_path()}[/dim]")
-      return
-    
-    # Create a minimal config structure with only this module's defaults
-    module_config = {
-      "defaults": {
-        self.name: defaults
-      }
-    }
-    
-    # Convert config to YAML string
-    yaml_output = yaml.dump(module_config, default_flow_style=False, sort_keys=False)
-    
-    console.print(f"[bold]Configuration for module:[/bold] [cyan]{self.name}[/cyan]")
-    console.print(f"[dim]Config file: {config.get_config_path()}[/dim]\n")
-    console.print(Panel(yaml_output, title=f"{self.name.capitalize()} Config", border_style="blue"))
-
-  def validate(
-    self,
-    template_id: str = Argument(None, help="Template ID to validate (if omitted, validates all templates)"),
-    path: Optional[str] = Option(None, "--path", "-p", help="Validate a template from a specific directory path"),
-    verbose: bool = Option(False, "--verbose", "-v", help="Show detailed validation information"),
-    semantic: bool = Option(True, "--semantic/--no-semantic", help="Enable semantic validation (Docker Compose schema, etc.)")
-  ) -> None:
-    """Validate templates for Jinja2 syntax, undefined variables, and semantic correctness.
-    
-    Validation includes:
-    - Jinja2 syntax checking
-    - Variable definition checking
-    - Semantic validation (when --semantic is enabled):
-      - Docker Compose file structure
-      - YAML syntax
-      - Configuration best practices
-    
-    Examples:
-        # Validate all templates in this module
-        cli compose validate
-        
-        # Validate a specific template
-        cli compose validate gitlab
-        
-        # Validate a template from a specific path
-        cli compose validate --path /path/to/template
-        
-        # Validate with verbose output
-        cli compose validate --verbose
-        
-        # Skip semantic validation (only Jinja2)
-        cli compose validate --no-semantic
-    """
-    from rich.table import Table
-    from .validators import get_validator_registry
-    
-    # Validate from path takes precedence
-    if path:
-      try:
-        template_path = Path(path).resolve()
-        if not template_path.exists():
-          self.display.display_error(f"Path does not exist: {path}")
-          raise Exit(code=1)
-        if not template_path.is_dir():
-          self.display.display_error(f"Path is not a directory: {path}")
-          raise Exit(code=1)
-        
-        console.print(f"[bold]Validating template from path:[/bold] [cyan]{template_path}[/cyan]\n")
-        template = Template(template_path, library_name="local")
-        template_id = template.id
-      except Exception as e:
-        self.display.display_error(f"Failed to load template from path '{path}': {e}")
-        raise Exit(code=1)
-    elif template_id:
-      # Validate a specific template by ID
-      try:
-        template = self._load_template_by_id(template_id)
-        console.print(f"[bold]Validating template:[/bold] [cyan]{template_id}[/cyan]\n")
-      except Exception as e:
-        self.display.display_error(f"Failed to load template '{template_id}': {e}")
-        raise Exit(code=1)
-    else:
-      # Validate all templates - handled separately below
-      template = None
-    
-    # Single template validation
-    if template:
-      try:
-        # Trigger validation by accessing used_variables
-        _ = template.used_variables
-        # Trigger variable definition validation by accessing variables
-        _ = template.variables
-        self.display.display_success("Jinja2 validation passed")
-        
-        # Semantic validation
-        if semantic:
-          console.print(f"\n[bold cyan]Running semantic validation...[/bold cyan]")
-          registry = get_validator_registry()
-          has_semantic_errors = False
-          
-          # Render template with default values for validation
-          debug_mode = logger.isEnabledFor(logging.DEBUG)
-          rendered_files, _ = template.render(template.variables, debug=debug_mode)
-          
-          for file_path, content in rendered_files.items():
-            result = registry.validate_file(content, file_path)
-            
-            if result.errors or result.warnings or (verbose and result.info):
-              console.print(f"\n[cyan]File:[/cyan] {file_path}")
-              result.display(f"{file_path}")
-              
-              if result.errors:
-                has_semantic_errors = True
-          
-          if not has_semantic_errors:
-            self.display.display_success("Semantic validation passed")
-          else:
-            self.display.display_error("Semantic validation found errors")
-            raise Exit(code=1)
-        
-        if verbose:
-          console.print(f"\n[dim]Template path: {template.template_dir}[/dim]")
-          console.print(f"[dim]Found {len(template.used_variables)} variables[/dim]")
-          if semantic:
-            console.print(f"[dim]Generated {len(rendered_files)} files[/dim]")
-      
-      except TemplateRenderError as e:
-        # Display enhanced error information for template rendering errors
-        self.display.display_template_render_error(e, context=f"template '{template_id}'")
-        raise Exit(code=1)
-      except (TemplateSyntaxError, TemplateValidationError, ValueError) as e:
-        self.display.display_error(f"Validation failed for '{template_id}':")
-        console.print(f"\n{e}")
-        raise Exit(code=1)
-      except Exception as e:
-        self.display.display_error(f"Unexpected error validating '{template_id}': {e}")
-        raise Exit(code=1)
-      
-      return
-    else:
-      # Validate all templates
-      console.print(f"[bold]Validating all {self.name} templates...[/bold]\n")
-      
-      entries = self.libraries.find(self.name, sort_results=True)
-      total = len(entries)
-      valid_count = 0
-      invalid_count = 0
-      errors = []
-      
-      for template_dir, library_name in entries:
-        template_id = template_dir.name
+
+        # Simulate directory creation
+        self.display.display_heading("Directory Operations", icon_type="folder")
+
+        # Check if output directory exists
+        if output_dir.exists():
+            self.display.display_success(
+                f"Output directory exists: [cyan]{output_dir}[/cyan]"
+            )
+            # Check if we have write permissions
+            if os.access(output_dir, os.W_OK):
+                self.display.display_success("Write permission verified")
+            else:
+                self.display.display_warning("Write permission may be denied")
+        else:
+            console.print(
+                f"  [dim]→[/dim] Would create output directory: [cyan]{output_dir}[/cyan]"
+            )
+            # Check if parent directory exists and is writable
+            parent = output_dir.parent
+            if parent.exists() and os.access(parent, os.W_OK):
+                self.display.display_success("Parent directory writable")
+            else:
+                self.display.display_warning("Parent directory may not be writable")
+
+        # Collect unique subdirectories that would be created
+        subdirs = set()
+        for file_path in rendered_files.keys():
+            parts = Path(file_path).parts
+            for i in range(1, len(parts)):
+                subdirs.add(Path(*parts[:i]))
+
+        if subdirs:
+            console.print(
+                f"  [dim]→[/dim] Would create {len(subdirs)} subdirectory(ies)"
+            )
+            for subdir in sorted(subdirs):
+                console.print(f"    [dim]📁[/dim] {subdir}/")
+
+        console.print()
+
+        # Display file operations in a table
+        self.display.display_heading("File Operations", icon_type="file")
+
+        total_size = 0
+        new_files = 0
+        overwrite_files = 0
+        file_operations = []
+
+        for file_path, content in sorted(rendered_files.items()):
+            full_path = output_dir / file_path
+            file_size = len(content.encode("utf-8"))
+            total_size += file_size
+
+            # Determine status
+            if full_path.exists():
+                status = "Overwrite"
+                overwrite_files += 1
+            else:
+                status = "Create"
+                new_files += 1
+
+            file_operations.append((file_path, file_size, status))
+
+        self.display.display_file_operation_table(file_operations)
+        console.print()
+
+        # Summary statistics
+        if total_size < 1024:
+            size_str = f"{total_size}B"
+        elif total_size < 1024 * 1024:
+            size_str = f"{total_size / 1024:.1f}KB"
+        else:
+            size_str = f"{total_size / (1024 * 1024):.1f}MB"
+
+        summary_items = {
+            "Total files:": str(len(rendered_files)),
+            "New files:": str(new_files),
+            "Files to overwrite:": str(overwrite_files),
+            "Total size:": size_str,
+        }
+        self.display.display_summary_table("Summary", summary_items)
+        console.print()
+
+        # Show file contents if requested
+        if show_files:
+            console.print("[bold cyan]Generated File Contents:[/bold cyan]")
+            console.print()
+            for file_path, content in sorted(rendered_files.items()):
+                console.print(f"[cyan]File:[/cyan] {file_path}")
+                print(f"{'─' * 80}")
+                print(content)
+                print()  # Add blank line after content
+            console.print()
+
+        self.display.display_success("Dry run complete - no files were written")
+        console.print(f"[dim]Files would have been generated in '{output_dir}'[/dim]")
+        logger.info(
+            f"Dry run completed for template '{id}' - {len(rendered_files)} files, {total_size} bytes"
+        )
+
+    def _write_generated_files(
+        self, output_dir: Path, rendered_files: Dict[str, str], quiet: bool = False
+    ) -> None:
+        """Write rendered files to the output directory.
+
+        Args:
+            output_dir: Directory to write files to
+            rendered_files: Dictionary of file paths to rendered content
+            quiet: Suppress output messages
+        """
+        output_dir.mkdir(parents=True, exist_ok=True)
+
+        for file_path, content in rendered_files.items():
+            full_path = output_dir / file_path
+            full_path.parent.mkdir(parents=True, exist_ok=True)
+            with open(full_path, "w", encoding="utf-8") as f:
+                f.write(content)
+            if not quiet:
+                console.print(
+                    f"[green]Generated file: {file_path}[/green]"
+                )  # Keep simple per-file output
+
+        if not quiet:
+            self.display.display_success(
+                f"Template generated successfully in '{output_dir}'"
+            )
+        logger.info(f"Template written to directory: {output_dir}")
+
+    def generate(
+        self,
+        id: str = Argument(..., help="Template ID"),
+        directory: Optional[str] = Argument(
+            None, help="Output directory (defaults to template ID)"
+        ),
+        interactive: bool = Option(
+            True,
+            "--interactive/--no-interactive",
+            "-i/-n",
+            help="Enable interactive prompting for variables",
+        ),
+        var: Optional[list[str]] = Option(
+            None,
+            "--var",
+            "-v",
+            help="Variable override (repeatable). Supports: KEY=VALUE or KEY VALUE",
+        ),
+        dry_run: bool = Option(
+            False, "--dry-run", help="Preview template generation without writing files"
+        ),
+        show_files: bool = Option(
+            False,
+            "--show-files",
+            help="Display generated file contents in plain text (use with --dry-run)",
+        ),
+        quiet: bool = Option(
+            False, "--quiet", "-q", help="Suppress all non-error output"
+        ),
+        all_vars: bool = Option(
+            False,
+            "--all",
+            help="Show all variables/sections, even those with unsatisfied needs",
+        ),
+    ) -> None:
+        """Generate from template.
+
+        Variable precedence chain (lowest to highest):
+        1. Module spec (defined in cli/modules/*.py)
+        2. Template spec (from template.yaml)
+        3. Config defaults (from ~/.config/boilerplates/config.yaml)
+        4. CLI overrides (--var flags)
+
+        Examples:
+            # Generate to directory named after template
+            cli compose generate traefik
+
+            # Generate to custom directory
+            cli compose generate traefik my-proxy
+
+            # Generate with variables
+            cli compose generate traefik --var traefik_enabled=false
+
+            # Preview without writing files (dry run)
+            cli compose generate traefik --dry-run
+
+            # Preview and show generated file contents
+            cli compose generate traefik --dry-run --show-files
+        """
+        logger.info(
+            f"Starting generation for template '{id}' from module '{self.name}'"
+        )
+
+        # Create a display manager with quiet mode if needed
+        display = DisplayManager(quiet=quiet) if quiet else self.display
+
+        template = self._load_template_by_id(id)
+
+        # Apply defaults and overrides
+        self._apply_variable_defaults(template)
+        self._apply_cli_overrides(template, var)
+
+        # Re-sort sections after all overrides (toggle values may have changed)
+        if template.variables:
+            template.variables.sort_sections()
+
+        if not quiet:
+            self._display_template_details(template, id, show_all=all_vars)
+            console.print()
+
+        # Collect variable values
+        variable_values = self._collect_variable_values(template, interactive)
+
         try:
         try:
-          template = Template(template_dir, library_name=library_name)
-          # Trigger validation
-          _ = template.used_variables
-          _ = template.variables
-          valid_count += 1
-          if verbose:
-            self.display.display_success(template_id)
-        except ValueError as e:
-          invalid_count += 1
-          errors.append((template_id, str(e)))
-          if verbose:
-            self.display.display_error(template_id)
+            # Validate and render template
+            if template.variables:
+                template.variables.validate_all()
+
+            # Check if we're in debug mode (logger level is DEBUG)
+            debug_mode = logger.isEnabledFor(logging.DEBUG)
+
+            rendered_files, variable_values = template.render(
+                template.variables, debug=debug_mode
+            )
+
+            if not rendered_files:
+                display.display_error(
+                    "Template rendering returned no files",
+                    context="template generation",
+                )
+                raise Exit(code=1)
+
+            logger.info(f"Successfully rendered template '{id}'")
+
+            # Determine output directory
+            if directory:
+                output_dir = Path(directory)
+                # Check if path looks like an absolute path but is missing the leading slash
+                # This handles cases like "Users/username/path" which should be "/Users/username/path"
+                if not output_dir.is_absolute() and str(output_dir).startswith(
+                    ("Users/", "home/", "usr/", "opt/", "var/", "tmp/")
+                ):
+                    output_dir = Path("/") / output_dir
+                    logger.debug(
+                        f"Normalized relative-looking absolute path to: {output_dir}"
+                    )
+            else:
+                output_dir = Path(id)
+
+            # Check for conflicts and get confirmation (skip in quiet mode)
+            if not quiet:
+                existing_files = self._check_output_directory(
+                    output_dir, rendered_files, interactive
+                )
+                if existing_files is None:
+                    return  # User cancelled
+
+                # Get final confirmation for generation
+                dir_not_empty = output_dir.exists() and any(output_dir.iterdir())
+                if not self._get_generation_confirmation(
+                    output_dir,
+                    rendered_files,
+                    existing_files,
+                    dir_not_empty,
+                    dry_run,
+                    interactive,
+                ):
+                    return  # User cancelled
+            else:
+                # In quiet mode, just check for existing files without prompts
+                existing_files = []
+
+            # Execute generation (dry run or actual)
+            if dry_run:
+                if not quiet:
+                    self._execute_dry_run(id, output_dir, rendered_files, show_files)
+            else:
+                self._write_generated_files(output_dir, rendered_files, quiet=quiet)
+
+            # Display next steps (not in quiet mode)
+            if template.metadata.next_steps and not quiet:
+                display.display_next_steps(
+                    template.metadata.next_steps, variable_values
+                )
+
+        except TemplateRenderError as e:
+            # Display enhanced error information for template rendering errors (always show errors)
+            display.display_template_render_error(e, context=f"template '{id}'")
+            raise Exit(code=1)
         except Exception as e:
         except Exception as e:
-          invalid_count += 1
-          errors.append((template_id, f"Load error: {e}"))
-          if verbose:
-            self.display.display_warning(template_id)
-      
-      # Summary
-      summary_items = {
-        "Total templates:": str(total),
-        "[green]Valid:[/green]": str(valid_count),
-        "[red]Invalid:[/red]": str(invalid_count)
-      }
-      self.display.display_summary_table("Validation Summary", summary_items)
-      
-      # Show errors if any
-      if errors:
-        console.print(f"\n[bold red]Validation Errors:[/bold red]")
-        for template_id, error_msg in errors:
-          console.print(f"\n[yellow]Template:[/yellow] [cyan]{template_id}[/cyan]")
-          console.print(f"[dim]{error_msg}[/dim]")
-        raise Exit(code=1)
-      else:
-        self.display.display_success("All templates are valid!")
-
-  @classmethod
-  def register_cli(cls, app: Typer) -> None:
-    """Register module commands with the main app."""
-    logger.debug(f"Registering CLI commands for module '{cls.name}'")
-    
-    module_instance = cls()
-    
-    module_app = Typer(help=cls.description)
-    
-    module_app.command("list")(module_instance.list)
-    module_app.command("search")(module_instance.search)
-    module_app.command("show")(module_instance.show)
-    module_app.command("validate")(module_instance.validate)
-    
-    module_app.command(
-      "generate", 
-      context_settings={"allow_extra_args": True, "ignore_unknown_options": True}
-    )(module_instance.generate)
-    
-    # Add defaults commands (simplified - only manage default values)
-    defaults_app = Typer(help="Manage default values for template variables")
-    defaults_app.command("get", help="Get default value(s)")(module_instance.config_get)
-    defaults_app.command("set", help="Set a default value")(module_instance.config_set)
-    defaults_app.command("rm", help="Remove a specific default value")(module_instance.config_remove)
-    defaults_app.command("clear", help="Clear default value(s)")(module_instance.config_clear)
-    defaults_app.command("list", help="Display the config for this module in YAML format")(module_instance.config_list)
-    module_app.add_typer(defaults_app, name="defaults")
-    
-    app.add_typer(module_app, name=cls.name, help=cls.description)
-    logger.info(f"Module '{cls.name}' CLI commands registered")
-
-  def _load_template_by_id(self, id: str) -> Template:
-    """Load a template by its ID, supporting qualified IDs.
-    
-    Supports both formats:
-    - Simple: "alloy" (uses priority system)
-    - Qualified: "alloy.default" (loads from specific library)
-    
-    Args:
-        id: Template ID (simple or qualified)
-    
-    Returns:
-        Template instance
-    
-    Raises:
-        FileNotFoundError: If template is not found
-    """
-    logger.debug(f"Loading template with ID '{id}' from module '{self.name}'")
-    
-    # find_by_id now handles both simple and qualified IDs
-    result = self.libraries.find_by_id(self.name, id)
-    
-    if not result:
-      raise FileNotFoundError(f"Template '{id}' not found in module '{self.name}'")
-    
-    template_dir, library_name = result
-    
-    # Get library type
-    library = next((lib for lib in self.libraries.libraries if lib.name == library_name), None)
-    library_type = library.library_type if library else "git"
-    
-    try:
-      template = Template(template_dir, library_name=library_name, library_type=library_type)
-      
-      # Validate schema version compatibility
-      template._validate_schema_version(self.schema_version, self.name)
-      
-      # If the original ID was qualified, preserve it
-      if '.' in id:
-        template.id = id
-      
-      return template
-    except Exception as exc:
-      logger.error(f"Failed to load template '{id}': {exc}")
-      raise FileNotFoundError(f"Template '{id}' could not be loaded: {exc}") from exc
-
-  def _display_template_details(self, template: Template, id: str, show_all: bool = False) -> None:
-    """Display template information panel and variables table.
-    
-    Args:
-        template: Template instance to display
-        id: Template ID
-        show_all: If True, show all variables/sections regardless of needs satisfaction
-    """
-    self.display.display_template_details(template, id, show_all=show_all)
+            display.display_error(str(e), context=f"generating template '{id}'")
+            raise Exit(code=1)
+
+    def config_get(
+        self,
+        var_name: Optional[str] = Argument(
+            None, help="Variable name to get (omit to show all defaults)"
+        ),
+    ) -> None:
+        """Get default value(s) for this module.
+
+        Examples:
+            # Get all defaults for module
+            cli compose defaults get
+
+            # Get specific variable default
+            cli compose defaults get service_name
+        """
+        from .config import ConfigManager
+
+        config = ConfigManager()
+
+        if var_name:
+            # Get specific variable default
+            value = config.get_default_value(self.name, var_name)
+            if value is not None:
+                console.print(f"[green]{var_name}[/green] = [yellow]{value}[/yellow]")
+            else:
+                self.display.display_warning(
+                    f"No default set for variable '{var_name}'",
+                    context=f"module '{self.name}'",
+                )
+        else:
+            # Show all defaults (flat list)
+            defaults = config.get_defaults(self.name)
+            if defaults:
+                console.print(
+                    f"[bold]Config defaults for module '{self.name}':[/bold]\n"
+                )
+                for var_name, var_value in defaults.items():
+                    console.print(
+                        f"  [green]{var_name}[/green] = [yellow]{var_value}[/yellow]"
+                    )
+            else:
+                console.print(
+                    f"[yellow]No defaults configured for module '{self.name}'[/yellow]"
+                )
+
+    def config_set(
+        self,
+        var_name: str = Argument(..., help="Variable name or var=value format"),
+        value: Optional[str] = Argument(
+            None, help="Default value (not needed if using var=value format)"
+        ),
+    ) -> None:
+        """Set a default value for a variable.
+
+        This only sets the DEFAULT VALUE, not the variable spec.
+        The variable must be defined in the module or template spec.
+
+        Supports both formats:
+          - var_name value
+          - var_name=value
+
+        Examples:
+            # Set default value (format 1)
+            cli compose defaults set service_name my-awesome-app
+
+            # Set default value (format 2)
+            cli compose defaults set service_name=my-awesome-app
+
+            # Set author for all compose templates
+            cli compose defaults set author "Christian Lempa"
+        """
+        from .config import ConfigManager
+
+        config = ConfigManager()
+
+        # Parse var_name and value - support both "var value" and "var=value" formats
+        if "=" in var_name and value is None:
+            # Format: var_name=value
+            parts = var_name.split("=", 1)
+            actual_var_name = parts[0]
+            actual_value = parts[1]
+        elif value is not None:
+            # Format: var_name value
+            actual_var_name = var_name
+            actual_value = value
+        else:
+            self.display.display_error(
+                f"Missing value for variable '{var_name}'", context="config set"
+            )
+            console.print(
+                "[dim]Usage: defaults set VAR_NAME VALUE or defaults set VAR_NAME=VALUE[/dim]"
+            )
+            raise Exit(code=1)
+
+        # Set the default value
+        config.set_default_value(self.name, actual_var_name, actual_value)
+        self.display.display_success(
+            f"Set default: [cyan]{actual_var_name}[/cyan] = [yellow]{actual_value}[/yellow]"
+        )
+        console.print(
+            "\n[dim]This will be used as the default value when generating templates with this module.[/dim]"
+        )
+
+    def config_remove(
+        self,
+        var_name: str = Argument(..., help="Variable name to remove"),
+    ) -> None:
+        """Remove a specific default variable value.
+
+        Examples:
+            # Remove a default value
+            cli compose defaults rm service_name
+        """
+        from .config import ConfigManager
+
+        config = ConfigManager()
+        defaults = config.get_defaults(self.name)
+
+        if not defaults:
+            console.print(
+                f"[yellow]No defaults configured for module '{self.name}'[/yellow]"
+            )
+            return
+
+        if var_name in defaults:
+            del defaults[var_name]
+            config.set_defaults(self.name, defaults)
+            self.display.display_success(f"Removed default for '{var_name}'")
+        else:
+            self.display.display_error(f"No default found for variable '{var_name}'")
+
+    def config_clear(
+        self,
+        var_name: Optional[str] = Argument(
+            None, help="Variable name to clear (omit to clear all defaults)"
+        ),
+        force: bool = Option(False, "--force", "-f", help="Skip confirmation prompt"),
+    ) -> None:
+        """Clear default value(s) for this module.
+
+        Examples:
+            # Clear specific variable default
+            cli compose defaults clear service_name
+
+            # Clear all defaults for module
+            cli compose defaults clear --force
+        """
+        from .config import ConfigManager
+
+        config = ConfigManager()
+        defaults = config.get_defaults(self.name)
+
+        if not defaults:
+            console.print(
+                f"[yellow]No defaults configured for module '{self.name}'[/yellow]"
+            )
+            return
+
+        if var_name:
+            # Clear specific variable
+            if var_name in defaults:
+                del defaults[var_name]
+                config.set_defaults(self.name, defaults)
+                self.display.display_success(f"Cleared default for '{var_name}'")
+            else:
+                self.display.display_error(
+                    f"No default found for variable '{var_name}'"
+                )
+        else:
+            # Clear all defaults
+            if not force:
+                detail_lines = [
+                    f"This will clear ALL defaults for module '{self.name}':",
+                    "",
+                ]
+                for var_name, var_value in defaults.items():
+                    detail_lines.append(
+                        f"  [green]{var_name}[/green] = [yellow]{var_value}[/yellow]"
+                    )
+
+                self.display.display_warning("Warning: This will clear ALL defaults")
+                console.print()
+                for line in detail_lines:
+                    console.print(line)
+                console.print()
+                if not Confirm.ask("[bold red]Are you sure?[/bold red]", default=False):
+                    console.print("[green]Operation cancelled.[/green]")
+                    return
+
+            config.clear_defaults(self.name)
+            self.display.display_success(
+                f"Cleared all defaults for module '{self.name}'"
+            )
+
+    def config_list(self) -> None:
+        """Display the defaults for this specific module in YAML format.
+
+        Examples:
+            # Show the defaults for the current module
+            cli compose defaults list
+        """
+        from .config import ConfigManager
+        import yaml
+
+        config = ConfigManager()
+
+        # Get only the defaults for this module
+        defaults = config.get_defaults(self.name)
+
+        if not defaults:
+            console.print(
+                f"[yellow]No configuration found for module '{self.name}'[/yellow]"
+            )
+            console.print(
+                f"\n[dim]Config file location: {config.get_config_path()}[/dim]"
+            )
+            return
+
+        # Create a minimal config structure with only this module's defaults
+        module_config = {"defaults": {self.name: defaults}}
+
+        # Convert config to YAML string
+        yaml_output = yaml.dump(
+            module_config, default_flow_style=False, sort_keys=False
+        )
+
+        console.print(
+            f"[bold]Configuration for module:[/bold] [cyan]{self.name}[/cyan]"
+        )
+        console.print(f"[dim]Config file: {config.get_config_path()}[/dim]\n")
+        console.print(
+            Panel(
+                yaml_output,
+                title=f"{self.name.capitalize()} Config",
+                border_style="blue",
+            )
+        )
+
+    def validate(
+        self,
+        template_id: str = Argument(
+            None, help="Template ID to validate (if omitted, validates all templates)"
+        ),
+        path: Optional[str] = Option(
+            None,
+            "--path",
+            "-p",
+            help="Validate a template from a specific directory path",
+        ),
+        verbose: bool = Option(
+            False, "--verbose", "-v", help="Show detailed validation information"
+        ),
+        semantic: bool = Option(
+            True,
+            "--semantic/--no-semantic",
+            help="Enable semantic validation (Docker Compose schema, etc.)",
+        ),
+    ) -> None:
+        """Validate templates for Jinja2 syntax, undefined variables, and semantic correctness.
+
+        Validation includes:
+        - Jinja2 syntax checking
+        - Variable definition checking
+        - Semantic validation (when --semantic is enabled):
+          - Docker Compose file structure
+          - YAML syntax
+          - Configuration best practices
+
+        Examples:
+            # Validate all templates in this module
+            cli compose validate
+
+            # Validate a specific template
+            cli compose validate gitlab
+
+            # Validate a template from a specific path
+            cli compose validate --path /path/to/template
+
+            # Validate with verbose output
+            cli compose validate --verbose
+
+            # Skip semantic validation (only Jinja2)
+            cli compose validate --no-semantic
+        """
+        from .validators import get_validator_registry
+
+        # Validate from path takes precedence
+        if path:
+            try:
+                template_path = Path(path).resolve()
+                if not template_path.exists():
+                    self.display.display_error(f"Path does not exist: {path}")
+                    raise Exit(code=1)
+                if not template_path.is_dir():
+                    self.display.display_error(f"Path is not a directory: {path}")
+                    raise Exit(code=1)
+
+                console.print(
+                    f"[bold]Validating template from path:[/bold] [cyan]{template_path}[/cyan]\n"
+                )
+                template = Template(template_path, library_name="local")
+                template_id = template.id
+            except Exception as e:
+                self.display.display_error(
+                    f"Failed to load template from path '{path}': {e}"
+                )
+                raise Exit(code=1)
+        elif template_id:
+            # Validate a specific template by ID
+            try:
+                template = self._load_template_by_id(template_id)
+                console.print(
+                    f"[bold]Validating template:[/bold] [cyan]{template_id}[/cyan]\n"
+                )
+            except Exception as e:
+                self.display.display_error(
+                    f"Failed to load template '{template_id}': {e}"
+                )
+                raise Exit(code=1)
+        else:
+            # Validate all templates - handled separately below
+            template = None
+
+        # Single template validation
+        if template:
+            try:
+                # Trigger validation by accessing used_variables
+                _ = template.used_variables
+                # Trigger variable definition validation by accessing variables
+                _ = template.variables
+                self.display.display_success("Jinja2 validation passed")
+
+                # Semantic validation
+                if semantic:
+                    console.print(
+                        "\n[bold cyan]Running semantic validation...[/bold cyan]"
+                    )
+                    registry = get_validator_registry()
+                    has_semantic_errors = False
+
+                    # Render template with default values for validation
+                    debug_mode = logger.isEnabledFor(logging.DEBUG)
+                    rendered_files, _ = template.render(
+                        template.variables, debug=debug_mode
+                    )
+
+                    for file_path, content in rendered_files.items():
+                        result = registry.validate_file(content, file_path)
+
+                        if (
+                            result.errors
+                            or result.warnings
+                            or (verbose and result.info)
+                        ):
+                            console.print(f"\n[cyan]File:[/cyan] {file_path}")
+                            result.display(f"{file_path}")
+
+                            if result.errors:
+                                has_semantic_errors = True
+
+                    if not has_semantic_errors:
+                        self.display.display_success("Semantic validation passed")
+                    else:
+                        self.display.display_error("Semantic validation found errors")
+                        raise Exit(code=1)
+
+                if verbose:
+                    console.print(
+                        f"\n[dim]Template path: {template.template_dir}[/dim]"
+                    )
+                    console.print(
+                        f"[dim]Found {len(template.used_variables)} variables[/dim]"
+                    )
+                    if semantic:
+                        console.print(
+                            f"[dim]Generated {len(rendered_files)} files[/dim]"
+                        )
+
+            except TemplateRenderError as e:
+                # Display enhanced error information for template rendering errors
+                self.display.display_template_render_error(
+                    e, context=f"template '{template_id}'"
+                )
+                raise Exit(code=1)
+            except (TemplateSyntaxError, TemplateValidationError, ValueError) as e:
+                self.display.display_error(f"Validation failed for '{template_id}':")
+                console.print(f"\n{e}")
+                raise Exit(code=1)
+            except Exception as e:
+                self.display.display_error(
+                    f"Unexpected error validating '{template_id}': {e}"
+                )
+                raise Exit(code=1)
+
+            return
+        else:
+            # Validate all templates
+            console.print(f"[bold]Validating all {self.name} templates...[/bold]\n")
+
+            entries = self.libraries.find(self.name, sort_results=True)
+            total = len(entries)
+            valid_count = 0
+            invalid_count = 0
+            errors = []
+
+            for template_dir, library_name in entries:
+                template_id = template_dir.name
+                try:
+                    template = Template(template_dir, library_name=library_name)
+                    # Trigger validation
+                    _ = template.used_variables
+                    _ = template.variables
+                    valid_count += 1
+                    if verbose:
+                        self.display.display_success(template_id)
+                except ValueError as e:
+                    invalid_count += 1
+                    errors.append((template_id, str(e)))
+                    if verbose:
+                        self.display.display_error(template_id)
+                except Exception as e:
+                    invalid_count += 1
+                    errors.append((template_id, f"Load error: {e}"))
+                    if verbose:
+                        self.display.display_warning(template_id)
+
+            # Summary
+            summary_items = {
+                "Total templates:": str(total),
+                "[green]Valid:[/green]": str(valid_count),
+                "[red]Invalid:[/red]": str(invalid_count),
+            }
+            self.display.display_summary_table("Validation Summary", summary_items)
+
+            # Show errors if any
+            if errors:
+                console.print("\n[bold red]Validation Errors:[/bold red]")
+                for template_id, error_msg in errors:
+                    console.print(
+                        f"\n[yellow]Template:[/yellow] [cyan]{template_id}[/cyan]"
+                    )
+                    console.print(f"[dim]{error_msg}[/dim]")
+                raise Exit(code=1)
+            else:
+                self.display.display_success("All templates are valid!")
+
+    @classmethod
+    def register_cli(cls, app: Typer) -> None:
+        """Register module commands with the main app."""
+        logger.debug(f"Registering CLI commands for module '{cls.name}'")
+
+        module_instance = cls()
+
+        module_app = Typer(help=cls.description)
+
+        module_app.command("list")(module_instance.list)
+        module_app.command("search")(module_instance.search)
+        module_app.command("show")(module_instance.show)
+        module_app.command("validate")(module_instance.validate)
+
+        module_app.command(
+            "generate",
+            context_settings={"allow_extra_args": True, "ignore_unknown_options": True},
+        )(module_instance.generate)
+
+        # Add defaults commands (simplified - only manage default values)
+        defaults_app = Typer(help="Manage default values for template variables")
+        defaults_app.command("get", help="Get default value(s)")(
+            module_instance.config_get
+        )
+        defaults_app.command("set", help="Set a default value")(
+            module_instance.config_set
+        )
+        defaults_app.command("rm", help="Remove a specific default value")(
+            module_instance.config_remove
+        )
+        defaults_app.command("clear", help="Clear default value(s)")(
+            module_instance.config_clear
+        )
+        defaults_app.command(
+            "list", help="Display the config for this module in YAML format"
+        )(module_instance.config_list)
+        module_app.add_typer(defaults_app, name="defaults")
+
+        app.add_typer(module_app, name=cls.name, help=cls.description)
+        logger.info(f"Module '{cls.name}' CLI commands registered")
+
+    def _load_template_by_id(self, id: str) -> Template:
+        """Load a template by its ID, supporting qualified IDs.
+
+        Supports both formats:
+        - Simple: "alloy" (uses priority system)
+        - Qualified: "alloy.default" (loads from specific library)
+
+        Args:
+            id: Template ID (simple or qualified)
+
+        Returns:
+            Template instance
+
+        Raises:
+            FileNotFoundError: If template is not found
+        """
+        logger.debug(f"Loading template with ID '{id}' from module '{self.name}'")
+
+        # find_by_id now handles both simple and qualified IDs
+        result = self.libraries.find_by_id(self.name, id)
+
+        if not result:
+            raise FileNotFoundError(
+                f"Template '{id}' not found in module '{self.name}'"
+            )
+
+        template_dir, library_name = result
+
+        # Get library type
+        library = next(
+            (lib for lib in self.libraries.libraries if lib.name == library_name), None
+        )
+        library_type = library.library_type if library else "git"
+
+        try:
+            template = Template(
+                template_dir, library_name=library_name, library_type=library_type
+            )
+
+            # Validate schema version compatibility
+            template._validate_schema_version(self.schema_version, self.name)
+
+            # If the original ID was qualified, preserve it
+            if "." in id:
+                template.id = id
+
+            return template
+        except Exception as exc:
+            logger.error(f"Failed to load template '{id}': {exc}")
+            raise FileNotFoundError(
+                f"Template '{id}' could not be loaded: {exc}"
+            ) from exc
+
+    def _display_template_details(
+        self, template: Template, id: str, show_all: bool = False
+    ) -> None:
+        """Display template information panel and variables table.
+
+        Args:
+            template: Template instance to display
+            id: Template ID
+            show_all: If True, show all variables/sections regardless of needs satisfaction
+        """
+        self.display.display_template_details(template, id, show_all=show_all)

+ 266 - 219
cli/core/prompt.py

@@ -1,10 +1,9 @@
 from __future__ import annotations
 from __future__ import annotations
 
 
-from typing import Dict, Any, List, Callable
+from typing import Dict, Any, Callable
 import logging
 import logging
 from rich.console import Console
 from rich.console import Console
 from rich.prompt import Prompt, Confirm, IntPrompt
 from rich.prompt import Prompt, Confirm, IntPrompt
-from rich.table import Table
 
 
 from .display import DisplayManager
 from .display import DisplayManager
 from .variable import Variable
 from .variable import Variable
@@ -14,222 +13,270 @@ logger = logging.getLogger(__name__)
 
 
 
 
 class PromptHandler:
 class PromptHandler:
-  """Simple interactive prompt handler for collecting template variables."""
-
-  def __init__(self) -> None:
-    self.console = Console()
-    self.display = DisplayManager()
-
-  def collect_variables(self, variables: VariableCollection) -> dict[str, Any]:
-    """Collect values for variables by iterating through sections.
-    
-    Args:
-        variables: VariableCollection with organized sections and variables
-        
-    Returns:
-        Dict of variable names to collected values
-    """
-    if not Confirm.ask("Customize any settings?", default=False):
-      logger.info("User opted to keep all default values")
-      return {}
-
-    collected: Dict[str, Any] = {}
-    prompted_variables: set[str] = set()  # Track which variables we've already prompted for
-
-    # Process each section
-    for section_key, section in variables.get_sections().items():
-      if not section.variables:
-        continue
-
-      # Check if dependencies are satisfied
-      if not variables.is_section_satisfied(section_key):
-        # Get list of unsatisfied dependencies for better user feedback
-        unsatisfied_keys = [dep for dep in section.needs if not variables.is_section_satisfied(dep)]
-        # Convert section keys to titles for user-friendly display
-        unsatisfied_titles = []
-        for dep_key in unsatisfied_keys:
-          dep_section = variables.get_section(dep_key)
-          if dep_section:
-            unsatisfied_titles.append(dep_section.title)
-          else:
-            unsatisfied_titles.append(dep_key)
-        dep_names = ", ".join(unsatisfied_titles) if unsatisfied_titles else "unknown"
-        self.display.display_skipped(section.title, f"requires {dep_names} to be enabled")
-        logger.debug(f"Skipping section '{section_key}' - dependencies not satisfied: {dep_names}")
-        continue
-
-      # Always show section header first
-      self.display.display_section_header(section.title, section.description)
-
-      # Track whether this section will be enabled
-      section_will_be_enabled = True
-      
-      # Handle section toggle - skip for required sections
-      if section.required:
-        # Required sections are always processed, no toggle prompt needed
-        logger.debug(f"Processing required section '{section.key}' without toggle prompt")
-      elif section.toggle:
-        toggle_var = section.variables.get(section.toggle)
-        if toggle_var:
-          # Use description for prompt if available, otherwise use title
-          prompt_text = section.description if section.description else f"Enable {section.title}?"
-          current_value = toggle_var.convert(toggle_var.value)
-          new_value = self._prompt_bool(prompt_text, current_value)
-          
-          if new_value != current_value:
-            collected[toggle_var.name] = new_value
-            toggle_var.value = new_value
-          
-          # Use section's native is_enabled() method
-          if not section.is_enabled():
-            section_will_be_enabled = False
-
-      # Collect variables in this section
-      for var_name, variable in section.variables.items():
-        # Skip toggle variable (already handled)
-        if section.toggle and var_name == section.toggle:
-          continue
-        
-        # Skip non-required variables if section is disabled
-        if not section_will_be_enabled and not variable.required:
-          logger.debug(f"Skipping non-required variable '{var_name}' from disabled section '{section_key}'")
-          continue
-        
-        # Prompt for the variable
-        current_value = variable.convert(variable.value)
-        # Pass section.required so _prompt_variable can enforce required inputs
-        new_value = self._prompt_variable(variable, required=section.required)
-        
-        # Track that we've prompted for this variable
-        prompted_variables.add(var_name)
-        
-        # For autogenerated variables, always update even if None (signals autogeneration)
-        if variable.autogenerated and new_value is None:
-          collected[var_name] = None
-          variable.value = None
-        elif new_value != current_value:
-          collected[var_name] = new_value
-          variable.value = new_value
-
-    logger.info(f"Variable collection completed. Collected {len(collected)} values")
-    return collected
-
-  def _prompt_variable(self, variable: Variable, required: bool = False) -> Any:
-    """Prompt for a single variable value based on its type.
-    
-    Args:
-        variable: The variable to prompt for
-        required: Whether the containing section is required (for context/display)
-        
-    Returns:
-        The validated value entered by the user
-    """
-    logger.debug(f"Prompting for variable '{variable.name}' (type: {variable.type})")
-    
-    # Use variable's native methods for prompt text and default value
-    prompt_text = variable.get_prompt_text()
-    default_value = variable.get_normalized_default()
-
-    # Add lock icon before default value for sensitive or autogenerated variables
-    if variable.sensitive or variable.autogenerated:
-      # Format: "Prompt text 🔒 (default)"
-      # The lock icon goes between the text and the default value in parentheses
-      prompt_text = f"{prompt_text} {self.display.get_lock_icon()}"
-
-    # Check if this specific variable is required (has no default and not autogenerated)
-    var_is_required = variable.is_required()
-    
-    # If variable is required, mark it in the prompt
-    if var_is_required:
-      prompt_text = f"{prompt_text} [bold red]*required[/bold red]"
-
-    handler = self._get_prompt_handler(variable)
-
-    # Add validation hint (includes both extra text and enum options)
-    hint = variable.get_validation_hint()
-    if hint:
-      # Show options/extra inline inside parentheses, before the default
-      prompt_text = f"{prompt_text} [dim]({hint})[/dim]"
-
-    while True:
-      try:
-        raw = handler(prompt_text, default_value)
-        # Use Variable's centralized validation method that handles:
-        # - Type conversion
-        # - Autogenerated variable detection
-        # - Required field validation
-        converted = variable.validate_and_convert(raw, check_required=True)
-        
-        # Return the converted value (caller will update variable.value)
-        return converted
-      except ValueError as exc:
-        # Conversion/validation failed — show a consistent error message and retry
-        self._show_validation_error(str(exc))
-      except Exception as e:
-        # Unexpected error — log and retry using the stored (unconverted) value
-        logger.error(f"Error prompting for variable '{variable.name}': {str(e)}")
-        default_value = variable.value
+    """Simple interactive prompt handler for collecting template variables."""
+
+    def __init__(self) -> None:
+        self.console = Console()
+        self.display = DisplayManager()
+
+    def collect_variables(self, variables: VariableCollection) -> dict[str, Any]:
+        """Collect values for variables by iterating through sections.
+
+        Args:
+            variables: VariableCollection with organized sections and variables
+
+        Returns:
+            Dict of variable names to collected values
+        """
+        if not Confirm.ask("Customize any settings?", default=False):
+            logger.info("User opted to keep all default values")
+            return {}
+
+        collected: Dict[str, Any] = {}
+        prompted_variables: set[str] = (
+            set()
+        )  # Track which variables we've already prompted for
+
+        # Process each section
+        for section_key, section in variables.get_sections().items():
+            if not section.variables:
+                continue
+
+            # Check if dependencies are satisfied
+            if not variables.is_section_satisfied(section_key):
+                # Get list of unsatisfied dependencies for better user feedback
+                unsatisfied_keys = [
+                    dep
+                    for dep in section.needs
+                    if not variables.is_section_satisfied(dep)
+                ]
+                # Convert section keys to titles for user-friendly display
+                unsatisfied_titles = []
+                for dep_key in unsatisfied_keys:
+                    dep_section = variables.get_section(dep_key)
+                    if dep_section:
+                        unsatisfied_titles.append(dep_section.title)
+                    else:
+                        unsatisfied_titles.append(dep_key)
+                dep_names = (
+                    ", ".join(unsatisfied_titles) if unsatisfied_titles else "unknown"
+                )
+                self.display.display_skipped(
+                    section.title, f"requires {dep_names} to be enabled"
+                )
+                logger.debug(
+                    f"Skipping section '{section_key}' - dependencies not satisfied: {dep_names}"
+                )
+                continue
+
+            # Always show section header first
+            self.display.display_section_header(section.title, section.description)
+
+            # Track whether this section will be enabled
+            section_will_be_enabled = True
+
+            # Handle section toggle - skip for required sections
+            if section.required:
+                # Required sections are always processed, no toggle prompt needed
+                logger.debug(
+                    f"Processing required section '{section.key}' without toggle prompt"
+                )
+            elif section.toggle:
+                toggle_var = section.variables.get(section.toggle)
+                if toggle_var:
+                    # Use description for prompt if available, otherwise use title
+                    prompt_text = (
+                        section.description
+                        if section.description
+                        else f"Enable {section.title}?"
+                    )
+                    current_value = toggle_var.convert(toggle_var.value)
+                    new_value = self._prompt_bool(prompt_text, current_value)
+
+                    if new_value != current_value:
+                        collected[toggle_var.name] = new_value
+                        toggle_var.value = new_value
+
+                    # Use section's native is_enabled() method
+                    if not section.is_enabled():
+                        section_will_be_enabled = False
+
+            # Collect variables in this section
+            for var_name, variable in section.variables.items():
+                # Skip toggle variable (already handled)
+                if section.toggle and var_name == section.toggle:
+                    continue
+
+                # Skip non-required variables if section is disabled
+                if not section_will_be_enabled and not variable.required:
+                    logger.debug(
+                        f"Skipping non-required variable '{var_name}' from disabled section '{section_key}'"
+                    )
+                    continue
+
+                # Prompt for the variable
+                current_value = variable.convert(variable.value)
+                # Pass section.required so _prompt_variable can enforce required inputs
+                new_value = self._prompt_variable(variable, required=section.required)
+
+                # Track that we've prompted for this variable
+                prompted_variables.add(var_name)
+
+                # For autogenerated variables, always update even if None (signals autogeneration)
+                if variable.autogenerated and new_value is None:
+                    collected[var_name] = None
+                    variable.value = None
+                elif new_value != current_value:
+                    collected[var_name] = new_value
+                    variable.value = new_value
+
+        logger.info(f"Variable collection completed. Collected {len(collected)} values")
+        return collected
+
+    def _prompt_variable(self, variable: Variable, required: bool = False) -> Any:
+        """Prompt for a single variable value based on its type.
+
+        Args:
+            variable: The variable to prompt for
+            required: Whether the containing section is required (for context/display)
+
+        Returns:
+            The validated value entered by the user
+        """
+        logger.debug(
+            f"Prompting for variable '{variable.name}' (type: {variable.type})"
+        )
+
+        # Use variable's native methods for prompt text and default value
+        prompt_text = variable.get_prompt_text()
+        default_value = variable.get_normalized_default()
+
+        # Add lock icon before default value for sensitive or autogenerated variables
+        if variable.sensitive or variable.autogenerated:
+            # Format: "Prompt text 🔒 (default)"
+            # The lock icon goes between the text and the default value in parentheses
+            prompt_text = f"{prompt_text} {self.display.get_lock_icon()}"
+
+        # Check if this specific variable is required (has no default and not autogenerated)
+        var_is_required = variable.is_required()
+
+        # If variable is required, mark it in the prompt
+        if var_is_required:
+            prompt_text = f"{prompt_text} [bold red]*required[/bold red]"
+
         handler = self._get_prompt_handler(variable)
         handler = self._get_prompt_handler(variable)
 
 
-  def _get_prompt_handler(self, variable: Variable) -> Callable:
-    """Return the prompt function for a variable type."""
-    handlers = {
-      "bool": self._prompt_bool,
-      "int": self._prompt_int,
-      # For enum prompts we pass the variable.extra through so options and extra
-      # can be combined into a single inline hint.
-      "enum": lambda text, default: self._prompt_enum(text, variable.options or [], default, extra=getattr(variable, 'extra', None)),
-    }
-    return handlers.get(variable.type, lambda text, default: self._prompt_string(text, default, is_sensitive=variable.sensitive))
-
-  def _show_validation_error(self, message: str) -> None:
-    """Display validation feedback consistently."""
-    self.display.display_validation_error(message)
-
-  def _prompt_string(self, prompt_text: str, default: Any = None, is_sensitive: bool = False) -> str | None:
-    value = Prompt.ask(
-      prompt_text,
-      default=str(default) if default is not None else "",
-      show_default=True,
-      password=is_sensitive
-    )
-    stripped = value.strip() if value else None
-    return stripped if stripped else None
-
-  def _prompt_bool(self, prompt_text: str, default: Any = None) -> bool | None:
-    if default is None:
-      return Confirm.ask(prompt_text, default=None)
-    converted = default if isinstance(default, bool) else str(default).lower() in ("true", "1", "yes", "on")
-    return Confirm.ask(prompt_text, default=converted)
-
-  def _prompt_int(self, prompt_text: str, default: Any = None) -> int | None:
-    converted = None
-    if default is not None:
-      try:
-        converted = int(default)
-      except (ValueError, TypeError):
-        logger.warning(f"Invalid default integer value: {default}")
-    return IntPrompt.ask(prompt_text, default=converted)
-
-  def _prompt_enum(self, prompt_text: str, options: list[str], default: Any = None, extra: str | None = None) -> str:
-    """Prompt for enum selection with validation.
-    
-    Note: prompt_text should already include hint from variable.get_validation_hint()
-    but we keep this for backward compatibility and fallback.
-    """
-    if not options:
-      return self._prompt_string(prompt_text, default)
-
-    # Validate default is in options
-    if default and str(default) not in options:
-      default = options[0]
-
-    while True:
-      value = Prompt.ask(
-        prompt_text,
-        default=str(default) if default else options[0],
-        show_default=True,
-      )
-      if value in options:
-        return value
-      self.console.print(f"[red]Invalid choice. Select from: {', '.join(options)}[/red]")
+        # Add validation hint (includes both extra text and enum options)
+        hint = variable.get_validation_hint()
+        if hint:
+            # Show options/extra inline inside parentheses, before the default
+            prompt_text = f"{prompt_text} [dim]({hint})[/dim]"
+
+        while True:
+            try:
+                raw = handler(prompt_text, default_value)
+                # Use Variable's centralized validation method that handles:
+                # - Type conversion
+                # - Autogenerated variable detection
+                # - Required field validation
+                converted = variable.validate_and_convert(raw, check_required=True)
+
+                # Return the converted value (caller will update variable.value)
+                return converted
+            except ValueError as exc:
+                # Conversion/validation failed — show a consistent error message and retry
+                self._show_validation_error(str(exc))
+            except Exception as e:
+                # Unexpected error — log and retry using the stored (unconverted) value
+                logger.error(
+                    f"Error prompting for variable '{variable.name}': {str(e)}"
+                )
+                default_value = variable.value
+                handler = self._get_prompt_handler(variable)
+
+    def _get_prompt_handler(self, variable: Variable) -> Callable:
+        """Return the prompt function for a variable type."""
+        handlers = {
+            "bool": self._prompt_bool,
+            "int": self._prompt_int,
+            # For enum prompts we pass the variable.extra through so options and extra
+            # can be combined into a single inline hint.
+            "enum": lambda text, default: self._prompt_enum(
+                text,
+                variable.options or [],
+                default,
+                extra=getattr(variable, "extra", None),
+            ),
+        }
+        return handlers.get(
+            variable.type,
+            lambda text, default: self._prompt_string(
+                text, default, is_sensitive=variable.sensitive
+            ),
+        )
+
+    def _show_validation_error(self, message: str) -> None:
+        """Display validation feedback consistently."""
+        self.display.display_validation_error(message)
+
+    def _prompt_string(
+        self, prompt_text: str, default: Any = None, is_sensitive: bool = False
+    ) -> str | None:
+        value = Prompt.ask(
+            prompt_text,
+            default=str(default) if default is not None else "",
+            show_default=True,
+            password=is_sensitive,
+        )
+        stripped = value.strip() if value else None
+        return stripped if stripped else None
+
+    def _prompt_bool(self, prompt_text: str, default: Any = None) -> bool | None:
+        if default is None:
+            return Confirm.ask(prompt_text, default=None)
+        converted = (
+            default
+            if isinstance(default, bool)
+            else str(default).lower() in ("true", "1", "yes", "on")
+        )
+        return Confirm.ask(prompt_text, default=converted)
+
+    def _prompt_int(self, prompt_text: str, default: Any = None) -> int | None:
+        converted = None
+        if default is not None:
+            try:
+                converted = int(default)
+            except (ValueError, TypeError):
+                logger.warning(f"Invalid default integer value: {default}")
+        return IntPrompt.ask(prompt_text, default=converted)
+
+    def _prompt_enum(
+        self,
+        prompt_text: str,
+        options: list[str],
+        default: Any = None,
+        extra: str | None = None,
+    ) -> str:
+        """Prompt for enum selection with validation.
+
+        Note: prompt_text should already include hint from variable.get_validation_hint()
+        but we keep this for backward compatibility and fallback.
+        """
+        if not options:
+            return self._prompt_string(prompt_text, default)
+
+        # Validate default is in options
+        if default and str(default) not in options:
+            default = options[0]
+
+        while True:
+            value = Prompt.ask(
+                prompt_text,
+                default=str(default) if default else options[0],
+                show_default=True,
+            )
+            if value in options:
+                return value
+            self.console.print(
+                f"[red]Invalid choice. Select from: {', '.join(options)}[/red]"
+            )

+ 31 - 23
cli/core/registry.py

@@ -1,4 +1,5 @@
 """Module registry system."""
 """Module registry system."""
+
 from __future__ import annotations
 from __future__ import annotations
 
 
 import logging
 import logging
@@ -8,29 +9,36 @@ logger = logging.getLogger(__name__)
 
 
 
 
 class ModuleRegistry:
 class ModuleRegistry:
-  """Simple module registry without magic."""
-  
-  def __init__(self) -> None:
-    self._modules = {}
-    logger.debug("Initializing module registry")
-  
-  def register(self, module_class: Type) -> None:
-    """Register a module class."""
-    # Module class defines its own name attribute
-    logger.debug(f"Attempting to register module class '{module_class.name}'")
-    
-    if module_class.name in self._modules:
-      logger.warning(f"Module '{module_class.name}' already registered, replacing with new implementation")
-    
-    self._modules[module_class.name] = module_class
-    logger.info(f"Registered module '{module_class.name}' (total modules: {len(self._modules)})")
-    logger.debug(f"Module '{module_class.name}' details: description='{module_class.description}'")
-  
-  def iter_module_classes(self) -> Iterator[tuple[str, Type]]:
-    """Yield registered module classes without instantiating them."""
-    logger.debug(f"Iterating over {len(self._modules)} registered module classes")
-    for name in sorted(self._modules.keys()):
-      yield name, self._modules[name]
+    """Simple module registry without magic."""
+
+    def __init__(self) -> None:
+        self._modules = {}
+        logger.debug("Initializing module registry")
+
+    def register(self, module_class: Type) -> None:
+        """Register a module class."""
+        # Module class defines its own name attribute
+        logger.debug(f"Attempting to register module class '{module_class.name}'")
+
+        if module_class.name in self._modules:
+            logger.warning(
+                f"Module '{module_class.name}' already registered, replacing with new implementation"
+            )
+
+        self._modules[module_class.name] = module_class
+        logger.info(
+            f"Registered module '{module_class.name}' (total modules: {len(self._modules)})"
+        )
+        logger.debug(
+            f"Module '{module_class.name}' details: description='{module_class.description}'"
+        )
+
+    def iter_module_classes(self) -> Iterator[tuple[str, Type]]:
+        """Yield registered module classes without instantiating them."""
+        logger.debug(f"Iterating over {len(self._modules)} registered module classes")
+        for name in sorted(self._modules.keys()):
+            yield name, self._modules[name]
+
 
 
 # Global registry
 # Global registry
 registry = ModuleRegistry()
 registry = ModuleRegistry()

+ 141 - 99
cli/core/repo.py

@@ -1,4 +1,5 @@
 """Repository management module for syncing library repositories."""
 """Repository management module for syncing library repositories."""
+
 from __future__ import annotations
 from __future__ import annotations
 
 
 import logging
 import logging
@@ -7,7 +8,6 @@ from pathlib import Path
 from typing import Optional
 from typing import Optional
 
 
 from rich.console import Console
 from rich.console import Console
-from rich.panel import Panel
 from rich.progress import Progress, SpinnerColumn, TextColumn
 from rich.progress import Progress, SpinnerColumn, TextColumn
 from rich.table import Table
 from rich.table import Table
 from typer import Argument, Option, Typer
 from typer import Argument, Option, Typer
@@ -24,13 +24,15 @@ display = DisplayManager()
 app = Typer(help="Manage library repositories")
 app = Typer(help="Manage library repositories")
 
 
 
 
-def _run_git_command(args: list[str], cwd: Optional[Path] = None) -> tuple[bool, str, str]:
+def _run_git_command(
+    args: list[str], cwd: Optional[Path] = None
+) -> tuple[bool, str, str]:
     """Run a git command and return the result.
     """Run a git command and return the result.
-    
+
     Args:
     Args:
         args: Git command arguments (without 'git' prefix)
         args: Git command arguments (without 'git' prefix)
         cwd: Working directory for the command
         cwd: Working directory for the command
-        
+
     Returns:
     Returns:
         Tuple of (success, stdout, stderr)
         Tuple of (success, stdout, stderr)
     """
     """
@@ -40,7 +42,7 @@ def _run_git_command(args: list[str], cwd: Optional[Path] = None) -> tuple[bool,
             cwd=cwd,
             cwd=cwd,
             capture_output=True,
             capture_output=True,
             text=True,
             text=True,
-            timeout=300  # 5 minute timeout
+            timeout=300,  # 5 minute timeout
         )
         )
         return result.returncode == 0, result.stdout, result.stderr
         return result.returncode == 0, result.stdout, result.stderr
     except subprocess.TimeoutExpired:
     except subprocess.TimeoutExpired:
@@ -51,32 +53,37 @@ def _run_git_command(args: list[str], cwd: Optional[Path] = None) -> tuple[bool,
         return False, "", str(e)
         return False, "", str(e)
 
 
 
 
-def _clone_or_pull_repo(name: str, url: str, target_path: Path, branch: Optional[str] = None, sparse_dir: Optional[str] = None) -> tuple[bool, str]:
+def _clone_or_pull_repo(
+    name: str,
+    url: str,
+    target_path: Path,
+    branch: Optional[str] = None,
+    sparse_dir: Optional[str] = None,
+) -> tuple[bool, str]:
     """Clone or pull a git repository with optional sparse-checkout.
     """Clone or pull a git repository with optional sparse-checkout.
-    
+
     Args:
     Args:
         name: Library name
         name: Library name
         url: Git repository URL
         url: Git repository URL
         target_path: Target directory for the repository
         target_path: Target directory for the repository
         branch: Git branch to clone/pull (optional)
         branch: Git branch to clone/pull (optional)
         sparse_dir: Directory to sparse-checkout (optional, use None or "." for full clone)
         sparse_dir: Directory to sparse-checkout (optional, use None or "." for full clone)
-        
+
     Returns:
     Returns:
         Tuple of (success, message)
         Tuple of (success, message)
     """
     """
     if target_path.exists() and (target_path / ".git").exists():
     if target_path.exists() and (target_path / ".git").exists():
         # Repository exists, pull updates
         # Repository exists, pull updates
         logger.debug(f"Pulling updates for library '{name}' at {target_path}")
         logger.debug(f"Pulling updates for library '{name}' at {target_path}")
-        
+
         # Determine which branch to pull
         # Determine which branch to pull
         pull_branch = branch if branch else "main"
         pull_branch = branch if branch else "main"
-        
+
         # Pull updates from specific branch
         # Pull updates from specific branch
         success, stdout, stderr = _run_git_command(
         success, stdout, stderr = _run_git_command(
-            ["pull", "--ff-only", "origin", pull_branch],
-            cwd=target_path
+            ["pull", "--ff-only", "origin", pull_branch], cwd=target_path
         )
         )
-        
+
         if success:
         if success:
             # Check if anything was updated
             # Check if anything was updated
             if "Already up to date" in stdout or "Already up-to-date" in stdout:
             if "Already up to date" in stdout or "Already up-to-date" in stdout:
@@ -90,69 +97,74 @@ def _clone_or_pull_repo(name: str, url: str, target_path: Path, branch: Optional
     else:
     else:
         # Repository doesn't exist, clone it
         # Repository doesn't exist, clone it
         logger.debug(f"Cloning library '{name}' from {url} to {target_path}")
         logger.debug(f"Cloning library '{name}' from {url} to {target_path}")
-        
+
         # Ensure parent directory exists
         # Ensure parent directory exists
         target_path.parent.mkdir(parents=True, exist_ok=True)
         target_path.parent.mkdir(parents=True, exist_ok=True)
-        
+
         # Determine if we should use sparse-checkout
         # Determine if we should use sparse-checkout
         use_sparse = sparse_dir and sparse_dir != "."
         use_sparse = sparse_dir and sparse_dir != "."
-        
+
         if use_sparse:
         if use_sparse:
             # Use sparse-checkout to clone only specific directory
             # Use sparse-checkout to clone only specific directory
             logger.debug(f"Using sparse-checkout for directory: {sparse_dir}")
             logger.debug(f"Using sparse-checkout for directory: {sparse_dir}")
-            
+
             # Initialize empty repo
             # Initialize empty repo
             success, stdout, stderr = _run_git_command(["init"], cwd=None)
             success, stdout, stderr = _run_git_command(["init"], cwd=None)
             if success:
             if success:
                 # Create target directory
                 # Create target directory
                 target_path.mkdir(parents=True, exist_ok=True)
                 target_path.mkdir(parents=True, exist_ok=True)
-                
+
                 # Initialize git repo
                 # Initialize git repo
                 success, stdout, stderr = _run_git_command(["init"], cwd=target_path)
                 success, stdout, stderr = _run_git_command(["init"], cwd=target_path)
                 if not success:
                 if not success:
                     return False, f"Failed to initialize repo: {stderr or stdout}"
                     return False, f"Failed to initialize repo: {stderr or stdout}"
-                
+
                 # Add remote
                 # Add remote
-                success, stdout, stderr = _run_git_command(["remote", "add", "origin", url], cwd=target_path)
+                success, stdout, stderr = _run_git_command(
+                    ["remote", "add", "origin", url], cwd=target_path
+                )
                 if not success:
                 if not success:
                     return False, f"Failed to add remote: {stderr or stdout}"
                     return False, f"Failed to add remote: {stderr or stdout}"
-                
+
                 # Enable sparse-checkout (non-cone mode to exclude root files)
                 # Enable sparse-checkout (non-cone mode to exclude root files)
                 success, stdout, stderr = _run_git_command(
                 success, stdout, stderr = _run_git_command(
-                    ["sparse-checkout", "init", "--no-cone"], 
-                    cwd=target_path
+                    ["sparse-checkout", "init", "--no-cone"], cwd=target_path
                 )
                 )
                 if not success:
                 if not success:
-                    return False, f"Failed to enable sparse-checkout: {stderr or stdout}"
-                
+                    return (
+                        False,
+                        f"Failed to enable sparse-checkout: {stderr or stdout}",
+                    )
+
                 # Set sparse-checkout to specific directory (non-cone uses patterns)
                 # Set sparse-checkout to specific directory (non-cone uses patterns)
                 success, stdout, stderr = _run_git_command(
                 success, stdout, stderr = _run_git_command(
-                    ["sparse-checkout", "set", f"{sparse_dir}/*"],
-                    cwd=target_path
+                    ["sparse-checkout", "set", f"{sparse_dir}/*"], cwd=target_path
                 )
                 )
                 if not success:
                 if not success:
-                    return False, f"Failed to set sparse-checkout directory: {stderr or stdout}"
-                
+                    return (
+                        False,
+                        f"Failed to set sparse-checkout directory: {stderr or stdout}",
+                    )
+
                 # Fetch specific branch (without attempting to update local ref)
                 # Fetch specific branch (without attempting to update local ref)
                 fetch_args = ["fetch", "--depth", "1", "origin"]
                 fetch_args = ["fetch", "--depth", "1", "origin"]
                 if branch:
                 if branch:
                     fetch_args.append(branch)
                     fetch_args.append(branch)
                 else:
                 else:
                     fetch_args.append("main")
                     fetch_args.append("main")
-                
+
                 success, stdout, stderr = _run_git_command(fetch_args, cwd=target_path)
                 success, stdout, stderr = _run_git_command(fetch_args, cwd=target_path)
                 if not success:
                 if not success:
                     return False, f"Fetch failed: {stderr or stdout}"
                     return False, f"Fetch failed: {stderr or stdout}"
-                
+
                 # Checkout the branch
                 # Checkout the branch
                 checkout_branch = branch if branch else "main"
                 checkout_branch = branch if branch else "main"
                 success, stdout, stderr = _run_git_command(
                 success, stdout, stderr = _run_git_command(
-                    ["checkout", checkout_branch],
-                    cwd=target_path
+                    ["checkout", checkout_branch], cwd=target_path
                 )
                 )
                 if not success:
                 if not success:
                     return False, f"Checkout failed: {stderr or stdout}"
                     return False, f"Checkout failed: {stderr or stdout}"
-                
+
                 # Done! Files are in target_path/sparse_dir/
                 # Done! Files are in target_path/sparse_dir/
                 return True, "Cloned successfully (sparse)"
                 return True, "Cloned successfully (sparse)"
             else:
             else:
@@ -163,9 +175,9 @@ def _clone_or_pull_repo(name: str, url: str, target_path: Path, branch: Optional
             if branch:
             if branch:
                 clone_args.extend(["--branch", branch])
                 clone_args.extend(["--branch", branch])
             clone_args.extend([url, str(target_path)])
             clone_args.extend([url, str(target_path)])
-            
+
             success, stdout, stderr = _run_git_command(clone_args)
             success, stdout, stderr = _run_git_command(clone_args)
-            
+
             if success:
             if success:
                 return True, "Cloned successfully"
                 return True, "Cloned successfully"
             else:
             else:
@@ -177,36 +189,39 @@ def _clone_or_pull_repo(name: str, url: str, target_path: Path, branch: Optional
 @app.command()
 @app.command()
 def update(
 def update(
     library_name: Optional[str] = Argument(
     library_name: Optional[str] = Argument(
-        None,
-        help="Name of specific library to update (updates all if not specified)"
+        None, help="Name of specific library to update (updates all if not specified)"
     ),
     ),
-    verbose: bool = Option(False, "--verbose", "-v", help="Show detailed output")
+    verbose: bool = Option(False, "--verbose", "-v", help="Show detailed output"),
 ) -> None:
 ) -> None:
     """Update library repositories by cloning or pulling from git.
     """Update library repositories by cloning or pulling from git.
-    
+
     This command syncs all configured libraries from their git repositories.
     This command syncs all configured libraries from their git repositories.
     If a library doesn't exist locally, it will be cloned. If it exists, it will be pulled.
     If a library doesn't exist locally, it will be cloned. If it exists, it will be pulled.
     """
     """
     config = ConfigManager()
     config = ConfigManager()
     libraries = config.get_libraries()
     libraries = config.get_libraries()
-    
+
     if not libraries:
     if not libraries:
         display.display_warning("No libraries configured")
         display.display_warning("No libraries configured")
-        console.print("Libraries are auto-configured on first run with a default library.")
+        console.print(
+            "Libraries are auto-configured on first run with a default library."
+        )
         return
         return
-    
+
     # Filter to specific library if requested
     # Filter to specific library if requested
     if library_name:
     if library_name:
         libraries = [lib for lib in libraries if lib.get("name") == library_name]
         libraries = [lib for lib in libraries if lib.get("name") == library_name]
         if not libraries:
         if not libraries:
-            console_err.print(f"[red]Error:[/red] Library '{library_name}' not found in configuration")
+            console_err.print(
+                f"[red]Error:[/red] Library '{library_name}' not found in configuration"
+            )
             return
             return
-    
+
     libraries_path = config.get_libraries_path()
     libraries_path = config.get_libraries_path()
-    
+
     # Create results table
     # Create results table
     results = []
     results = []
-    
+
     with Progress(
     with Progress(
         SpinnerColumn(),
         SpinnerColumn(),
         TextColumn("[progress.description]{task.description}"),
         TextColumn("[progress.description]{task.description}"),
@@ -216,60 +231,66 @@ def update(
             name = lib.get("name")
             name = lib.get("name")
             lib_type = lib.get("type", "git")
             lib_type = lib.get("type", "git")
             enabled = lib.get("enabled", True)
             enabled = lib.get("enabled", True)
-            
+
             if not enabled:
             if not enabled:
                 if verbose:
                 if verbose:
                     console.print(f"[dim]Skipping disabled library: {name}[/dim]")
                     console.print(f"[dim]Skipping disabled library: {name}[/dim]")
                 results.append((name, "Skipped (disabled)", False))
                 results.append((name, "Skipped (disabled)", False))
                 continue
                 continue
-            
+
             # Skip static libraries (no sync needed)
             # Skip static libraries (no sync needed)
             if lib_type == "static":
             if lib_type == "static":
                 if verbose:
                 if verbose:
-                    console.print(f"[dim]Skipping static library: {name} (no sync needed)[/dim]")
+                    console.print(
+                        f"[dim]Skipping static library: {name} (no sync needed)[/dim]"
+                    )
                 results.append((name, "N/A (static)", True))
                 results.append((name, "N/A (static)", True))
                 continue
                 continue
-            
+
             # Handle git libraries
             # Handle git libraries
             url = lib.get("url")
             url = lib.get("url")
             branch = lib.get("branch")
             branch = lib.get("branch")
             directory = lib.get("directory", "library")
             directory = lib.get("directory", "library")
-            
+
             task = progress.add_task(f"Updating {name}...", total=None)
             task = progress.add_task(f"Updating {name}...", total=None)
-            
+
             # Target path: ~/.config/boilerplates/libraries/{name}/
             # Target path: ~/.config/boilerplates/libraries/{name}/
             target_path = libraries_path / name
             target_path = libraries_path / name
-            
+
             # Clone or pull the repository with sparse-checkout if directory is specified
             # Clone or pull the repository with sparse-checkout if directory is specified
-            success, message = _clone_or_pull_repo(name, url, target_path, branch, directory)
-            
+            success, message = _clone_or_pull_repo(
+                name, url, target_path, branch, directory
+            )
+
             results.append((name, message, success))
             results.append((name, message, success))
             progress.remove_task(task)
             progress.remove_task(task)
-            
+
             if verbose:
             if verbose:
                 if success:
                 if success:
                     display.display_success(f"{name}: {message}")
                     display.display_success(f"{name}: {message}")
                 else:
                 else:
                     display.display_error(f"{name}: {message}")
                     display.display_error(f"{name}: {message}")
-    
+
     # Display summary table
     # Display summary table
     if not verbose:
     if not verbose:
         display.display_status_table(
         display.display_status_table(
-            "Library Update Summary",
-            results,
-            columns=("Library", "Status")
+            "Library Update Summary", results, columns=("Library", "Status")
         )
         )
-    
+
     # Summary
     # Summary
     total = len(results)
     total = len(results)
     successful = sum(1 for _, _, success in results if success)
     successful = sum(1 for _, _, success in results if success)
-    
+
     if successful == total:
     if successful == total:
-        console.print(f"\n[green]All libraries updated successfully ({successful}/{total})[/green]")
+        console.print(
+            f"\n[green]All libraries updated successfully ({successful}/{total})[/green]"
+        )
     elif successful > 0:
     elif successful > 0:
-        console.print(f"\n[yellow]Partially successful: {successful}/{total} libraries updated[/yellow]")
+        console.print(
+            f"\n[yellow]Partially successful: {successful}/{total} libraries updated[/yellow]"
+        )
     else:
     else:
-        console.print(f"\n[red]Failed to update libraries[/red]")
+        console.print("\n[red]Failed to update libraries[/red]")
 
 
 
 
 @app.command()
 @app.command()
@@ -277,11 +298,11 @@ def list() -> None:
     """List all configured libraries."""
     """List all configured libraries."""
     config = ConfigManager()
     config = ConfigManager()
     libraries = config.get_libraries()
     libraries = config.get_libraries()
-    
+
     if not libraries:
     if not libraries:
         console.print("[yellow]No libraries configured.[/yellow]")
         console.print("[yellow]No libraries configured.[/yellow]")
         return
         return
-    
+
     table = Table(title="Configured Libraries", show_header=True)
     table = Table(title="Configured Libraries", show_header=True)
     table.add_column("Name", style="cyan", no_wrap=True)
     table.add_column("Name", style="cyan", no_wrap=True)
     table.add_column("URL/Path", style="blue")
     table.add_column("URL/Path", style="blue")
@@ -289,19 +310,19 @@ def list() -> None:
     table.add_column("Directory", style="magenta")
     table.add_column("Directory", style="magenta")
     table.add_column("Type", style="cyan")
     table.add_column("Type", style="cyan")
     table.add_column("Status", style="green")
     table.add_column("Status", style="green")
-    
+
     libraries_path = config.get_libraries_path()
     libraries_path = config.get_libraries_path()
-    
+
     for lib in libraries:
     for lib in libraries:
         name = lib.get("name", "")
         name = lib.get("name", "")
         lib_type = lib.get("type", "git")
         lib_type = lib.get("type", "git")
         enabled = lib.get("enabled", True)
         enabled = lib.get("enabled", True)
-        
+
         if lib_type == "git":
         if lib_type == "git":
             url_or_path = lib.get("url", "")
             url_or_path = lib.get("url", "")
             branch = lib.get("branch", "main")
             branch = lib.get("branch", "main")
             directory = lib.get("directory", "library")
             directory = lib.get("directory", "library")
-            
+
             # Check if library exists locally
             # Check if library exists locally
             library_base = libraries_path / name
             library_base = libraries_path / name
             if directory and directory != ".":
             if directory and directory != ".":
@@ -309,28 +330,29 @@ def list() -> None:
             else:
             else:
                 library_path = library_base
                 library_path = library_base
             exists = library_path.exists()
             exists = library_path.exists()
-        
+
         elif lib_type == "static":
         elif lib_type == "static":
             url_or_path = lib.get("path", "")
             url_or_path = lib.get("path", "")
             branch = "-"
             branch = "-"
             directory = "-"
             directory = "-"
-            
+
             # Check if static path exists
             # Check if static path exists
             from pathlib import Path
             from pathlib import Path
+
             library_path = Path(url_or_path).expanduser()
             library_path = Path(url_or_path).expanduser()
             if not library_path.is_absolute():
             if not library_path.is_absolute():
                 library_path = (config.config_path.parent / library_path).resolve()
                 library_path = (config.config_path.parent / library_path).resolve()
             exists = library_path.exists()
             exists = library_path.exists()
-        
+
         else:
         else:
             # Unknown type
             # Unknown type
             url_or_path = "<unknown type>"
             url_or_path = "<unknown type>"
             branch = "-"
             branch = "-"
             directory = "-"
             directory = "-"
             exists = False
             exists = False
-        
+
         type_display = lib_type
         type_display = lib_type
-        
+
         status_parts = []
         status_parts = []
         if not enabled:
         if not enabled:
             status_parts.append("[dim]disabled[/dim]")
             status_parts.append("[dim]disabled[/dim]")
@@ -338,53 +360,72 @@ def list() -> None:
             status_parts.append("[green]available[/green]")
             status_parts.append("[green]available[/green]")
         else:
         else:
             status_parts.append("[yellow]not found[/yellow]")
             status_parts.append("[yellow]not found[/yellow]")
-        
+
         status = " ".join(status_parts)
         status = " ".join(status_parts)
-        
+
         table.add_row(name, url_or_path, branch, directory, type_display, status)
         table.add_row(name, url_or_path, branch, directory, type_display, status)
-    
+
     console.print(table)
     console.print(table)
 
 
 
 
 @app.command()
 @app.command()
 def add(
 def add(
     name: str = Argument(..., help="Unique name for the library"),
     name: str = Argument(..., help="Unique name for the library"),
-    library_type: str = Option("git", "--type", "-t", help="Library type (git or static)"),
-    url: Optional[str] = Option(None, "--url", "-u", help="Git repository URL (for git type)"),
+    library_type: str = Option(
+        "git", "--type", "-t", help="Library type (git or static)"
+    ),
+    url: Optional[str] = Option(
+        None, "--url", "-u", help="Git repository URL (for git type)"
+    ),
     branch: str = Option("main", "--branch", "-b", help="Git branch (for git type)"),
     branch: str = Option("main", "--branch", "-b", help="Git branch (for git type)"),
-    directory: str = Option("library", "--directory", "-d", help="Directory in repo (for git type)"),
-    path: Optional[str] = Option(None, "--path", "-p", help="Local path (for static type)"),
-    enabled: bool = Option(True, "--enabled/--disabled", help="Enable or disable the library"),
-    sync: bool = Option(True, "--sync/--no-sync", help="Sync after adding (git only)")
+    directory: str = Option(
+        "library", "--directory", "-d", help="Directory in repo (for git type)"
+    ),
+    path: Optional[str] = Option(
+        None, "--path", "-p", help="Local path (for static type)"
+    ),
+    enabled: bool = Option(
+        True, "--enabled/--disabled", help="Enable or disable the library"
+    ),
+    sync: bool = Option(True, "--sync/--no-sync", help="Sync after adding (git only)"),
 ) -> None:
 ) -> None:
     """Add a new library to the configuration.
     """Add a new library to the configuration.
-    
+
     Examples:
     Examples:
       # Add a git library
       # Add a git library
       repo add mylib --type git --url https://github.com/user/templates.git
       repo add mylib --type git --url https://github.com/user/templates.git
-      
+
       # Add a static library
       # Add a static library
       repo add local --type static --path ~/my-templates
       repo add local --type static --path ~/my-templates
     """
     """
     config = ConfigManager()
     config = ConfigManager()
-    
+
     try:
     try:
         if library_type == "git":
         if library_type == "git":
             if not url:
             if not url:
                 display.display_error("--url is required for git libraries")
                 display.display_error("--url is required for git libraries")
                 return
                 return
-            config.add_library(name, library_type="git", url=url, branch=branch, directory=directory, enabled=enabled)
+            config.add_library(
+                name,
+                library_type="git",
+                url=url,
+                branch=branch,
+                directory=directory,
+                enabled=enabled,
+            )
         elif library_type == "static":
         elif library_type == "static":
             if not path:
             if not path:
                 display.display_error("--path is required for static libraries")
                 display.display_error("--path is required for static libraries")
                 return
                 return
             config.add_library(name, library_type="static", path=path, enabled=enabled)
             config.add_library(name, library_type="static", path=path, enabled=enabled)
         else:
         else:
-            display.display_error(f"Invalid library type: {library_type}. Must be 'git' or 'static'.")
+            display.display_error(
+                f"Invalid library type: {library_type}. Must be 'git' or 'static'."
+            )
             return
             return
-        
+
         display.display_success(f"Added {library_type} library '{name}'")
         display.display_success(f"Added {library_type} library '{name}'")
-        
+
         if library_type == "git" and sync and enabled:
         if library_type == "git" and sync and enabled:
             console.print(f"\nSyncing library '{name}'...")
             console.print(f"\nSyncing library '{name}'...")
             update(library_name=name, verbose=True)
             update(library_name=name, verbose=True)
@@ -397,23 +438,26 @@ def add(
 @app.command()
 @app.command()
 def remove(
 def remove(
     name: str = Argument(..., help="Name of the library to remove"),
     name: str = Argument(..., help="Name of the library to remove"),
-    keep_files: bool = Option(False, "--keep-files", help="Keep the local library files (don't delete)")
+    keep_files: bool = Option(
+        False, "--keep-files", help="Keep the local library files (don't delete)"
+    ),
 ) -> None:
 ) -> None:
     """Remove a library from the configuration and delete its local files."""
     """Remove a library from the configuration and delete its local files."""
     config = ConfigManager()
     config = ConfigManager()
-    
+
     try:
     try:
         # Remove from config
         # Remove from config
         config.remove_library(name)
         config.remove_library(name)
         display.display_success(f"Removed library '{name}' from configuration")
         display.display_success(f"Removed library '{name}' from configuration")
-        
+
         # Delete local files unless --keep-files is specified
         # Delete local files unless --keep-files is specified
         if not keep_files:
         if not keep_files:
             libraries_path = config.get_libraries_path()
             libraries_path = config.get_libraries_path()
             library_path = libraries_path / name
             library_path = libraries_path / name
-            
+
             if library_path.exists():
             if library_path.exists():
                 import shutil
                 import shutil
+
                 shutil.rmtree(library_path)
                 shutil.rmtree(library_path)
                 display.display_success(f"Deleted local files at {library_path}")
                 display.display_success(f"Deleted local files at {library_path}")
             else:
             else:
@@ -422,8 +466,6 @@ def remove(
         display.display_error(str(e))
         display.display_error(str(e))
 
 
 
 
-
-
 # Register the repo command with the CLI
 # Register the repo command with the CLI
 def register_cli(parent_app: Typer) -> None:
 def register_cli(parent_app: Typer) -> None:
     """Register the repo command with the parent Typer app."""
     """Register the repo command with the parent Typer app."""

+ 114 - 106
cli/core/section.py

@@ -7,109 +7,117 @@ from .variable import Variable
 
 
 
 
 class VariableSection:
 class VariableSection:
-  """Groups variables together with shared metadata for presentation."""
-
-  def __init__(self, data: dict[str, Any]) -> None:
-    """Initialize VariableSection from a dictionary.
-    
-    Args:
-        data: Dictionary containing section specification with required 'key' and 'title' keys
-    """
-    if not isinstance(data, dict):
-      raise ValueError("VariableSection data must be a dictionary")
-    
-    if "key" not in data:
-      raise ValueError("VariableSection data must contain 'key'")
-    
-    if "title" not in data:
-      raise ValueError("VariableSection data must contain 'title'")
-    
-    self.key: str = data["key"]
-    self.title: str = data["title"]
-    self.variables: OrderedDict[str, Variable] = OrderedDict()
-    self.description: Optional[str] = data.get("description")
-    self.toggle: Optional[str] = data.get("toggle")
-    # Track which fields were explicitly provided (to support explicit clears)
-    self._explicit_fields: set[str] = set(data.keys())
-    # Default "general" section to required=True, all others to required=False
-    self.required: bool = data.get("required", data["key"] == "general")
-    # Section dependencies - can be string or list of strings
-    needs_value = data.get("needs")
-    if needs_value:
-      if isinstance(needs_value, str):
-        self.needs: List[str] = [needs_value]
-      elif isinstance(needs_value, list):
-        self.needs: List[str] = needs_value
-      else:
-        raise ValueError(f"Section '{self.key}' has invalid 'needs' value: must be string or list")
-    else:
-      self.needs: List[str] = []
-
-  def to_dict(self) -> Dict[str, Any]:
-    """Serialize VariableSection to a dictionary for storage."""
-    section_dict = {
-      'required': self.required,
-      'vars': {name: var.to_dict() for name, var in self.variables.items()}
-    }
-    
-    # Add optional fields if present
-    for field in ('title', 'description', 'toggle'):
-      if value := getattr(self, field):
-        section_dict[field] = value
-    
-    # Store dependencies (single value if only one, list otherwise)
-    if self.needs:
-      section_dict['needs'] = self.needs[0] if len(self.needs) == 1 else self.needs
-    
-    return section_dict
-  
-  def is_enabled(self) -> bool:
-    """Check if section is currently enabled based on toggle variable.
-    
-    Returns:
-        True if section is enabled (no toggle or toggle is True), False otherwise
-    """
-    if not self.toggle:
-      return True
-    
-    toggle_var = self.variables.get(self.toggle)
-    if not toggle_var:
-      return True
-    
-    try:
-      return bool(toggle_var.convert(toggle_var.value))
-    except Exception:
-      return False
-  
-  def clone(self, origin_update: Optional[str] = None) -> 'VariableSection':
-    """Create a deep copy of the section with all variables.
-    
-    This is more efficient than converting to dict and back when copying sections.
-    
-    Args:
-        origin_update: Optional origin string to apply to all cloned variables
-        
-    Returns:
-        New VariableSection instance with deep-copied variables
-        
-    Example:
-        section2 = section1.clone(origin_update='template')
-    """
-    # Create new section with same metadata
-    cloned = VariableSection({
-      'key': self.key,
-      'title': self.title,
-      'description': self.description,
-      'toggle': self.toggle,
-      'required': self.required,
-      'needs': self.needs.copy() if self.needs else None,
-    })
-    
-    # Deep copy all variables
-    for var_name, variable in self.variables.items():
-      if origin_update:
-        cloned.variables[var_name] = variable.clone(update={'origin': origin_update})
-      else:
-        cloned.variables[var_name] = variable.clone()
-    
-    return cloned
+    """Groups variables together with shared metadata for presentation."""
+
+    def __init__(self, data: dict[str, Any]) -> None:
+        """Initialize VariableSection from a dictionary.
+
+        Args:
+            data: Dictionary containing section specification with required 'key' and 'title' keys
+        """
+        if not isinstance(data, dict):
+            raise ValueError("VariableSection data must be a dictionary")
+
+        if "key" not in data:
+            raise ValueError("VariableSection data must contain 'key'")
+
+        if "title" not in data:
+            raise ValueError("VariableSection data must contain 'title'")
+
+        self.key: str = data["key"]
+        self.title: str = data["title"]
+        self.variables: OrderedDict[str, Variable] = OrderedDict()
+        self.description: Optional[str] = data.get("description")
+        self.toggle: Optional[str] = data.get("toggle")
+        # Track which fields were explicitly provided (to support explicit clears)
+        self._explicit_fields: set[str] = set(data.keys())
+        # Default "general" section to required=True, all others to required=False
+        self.required: bool = data.get("required", data["key"] == "general")
+        # Section dependencies - can be string or list of strings
+        needs_value = data.get("needs")
+        if needs_value:
+            if isinstance(needs_value, str):
+                self.needs: List[str] = [needs_value]
+            elif isinstance(needs_value, list):
+                self.needs: List[str] = needs_value
+            else:
+                raise ValueError(
+                    f"Section '{self.key}' has invalid 'needs' value: must be string or list"
+                )
+        else:
+            self.needs: List[str] = []
+
+    def to_dict(self) -> Dict[str, Any]:
+        """Serialize VariableSection to a dictionary for storage."""
+        section_dict = {
+            "required": self.required,
+            "vars": {name: var.to_dict() for name, var in self.variables.items()},
+        }
+
+        # Add optional fields if present
+        for field in ("title", "description", "toggle"):
+            if value := getattr(self, field):
+                section_dict[field] = value
+
+        # Store dependencies (single value if only one, list otherwise)
+        if self.needs:
+            section_dict["needs"] = (
+                self.needs[0] if len(self.needs) == 1 else self.needs
+            )
+
+        return section_dict
+
+    def is_enabled(self) -> bool:
+        """Check if section is currently enabled based on toggle variable.
+
+        Returns:
+            True if section is enabled (no toggle or toggle is True), False otherwise
+        """
+        if not self.toggle:
+            return True
+
+        toggle_var = self.variables.get(self.toggle)
+        if not toggle_var:
+            return True
+
+        try:
+            return bool(toggle_var.convert(toggle_var.value))
+        except Exception:
+            return False
+
+    def clone(self, origin_update: Optional[str] = None) -> "VariableSection":
+        """Create a deep copy of the section with all variables.
+
+        This is more efficient than converting to dict and back when copying sections.
+
+        Args:
+            origin_update: Optional origin string to apply to all cloned variables
+
+        Returns:
+            New VariableSection instance with deep-copied variables
+
+        Example:
+            section2 = section1.clone(origin_update='template')
+        """
+        # Create new section with same metadata
+        cloned = VariableSection(
+            {
+                "key": self.key,
+                "title": self.title,
+                "description": self.description,
+                "toggle": self.toggle,
+                "required": self.required,
+                "needs": self.needs.copy() if self.needs else None,
+            }
+        )
+
+        # Deep copy all variables
+        for var_name, variable in self.variables.items():
+            if origin_update:
+                cloned.variables[var_name] = variable.clone(
+                    update={"origin": origin_update}
+                )
+            else:
+                cloned.variables[var_name] = variable.clone()
+
+        return cloned

+ 888 - 771
cli/core/template.py

@@ -1,16 +1,13 @@
 from __future__ import annotations
 from __future__ import annotations
 
 
-from .variable import Variable
 from .collection import VariableCollection
 from .collection import VariableCollection
 from .exceptions import (
 from .exceptions import (
-    TemplateError,
     TemplateLoadError,
     TemplateLoadError,
     TemplateSyntaxError,
     TemplateSyntaxError,
     TemplateValidationError,
     TemplateValidationError,
     TemplateRenderError,
     TemplateRenderError,
     YAMLParseError,
     YAMLParseError,
-    ModuleLoadError,
-    IncompatibleSchemaVersionError
+    IncompatibleSchemaVersionError,
 )
 )
 from .version import is_compatible
 from .version import is_compatible
 from pathlib import Path
 from pathlib import Path
@@ -28,806 +25,926 @@ from jinja2.exceptions import (
     TemplateSyntaxError as Jinja2TemplateSyntaxError,
     TemplateSyntaxError as Jinja2TemplateSyntaxError,
     UndefinedError,
     UndefinedError,
     TemplateError as Jinja2TemplateError,
     TemplateError as Jinja2TemplateError,
-    TemplateNotFound as Jinja2TemplateNotFound
+    TemplateNotFound as Jinja2TemplateNotFound,
 )
 )
 
 
 logger = logging.getLogger(__name__)
 logger = logging.getLogger(__name__)
 
 
 
 
 def _extract_error_context(
 def _extract_error_context(
-    file_path: Path,
-    line_number: Optional[int],
-    context_size: int = 3
+    file_path: Path, line_number: Optional[int], context_size: int = 3
 ) -> List[str]:
 ) -> List[str]:
-  """Extract lines of context around an error location.
-  
-  Args:
-      file_path: Path to the file with the error
-      line_number: Line number where error occurred (1-indexed)
-      context_size: Number of lines to show before and after
-      
-  Returns:
-      List of context lines with line numbers
-  """
-  if not line_number or not file_path.exists():
-    return []
-  
-  try:
-    with open(file_path, 'r', encoding='utf-8') as f:
-      lines = f.readlines()
-    
-    start_line = max(0, line_number - context_size - 1)
-    end_line = min(len(lines), line_number + context_size)
-    
-    context = []
-    for i in range(start_line, end_line):
-      line_num = i + 1
-      marker = '>>>' if line_num == line_number else '   '
-      context.append(f"{marker} {line_num:4d} | {lines[i].rstrip()}")
-    
-    return context
-  except (IOError, OSError):
-    return []
+    """Extract lines of context around an error location.
+
+    Args:
+        file_path: Path to the file with the error
+        line_number: Line number where error occurred (1-indexed)
+        context_size: Number of lines to show before and after
+
+    Returns:
+        List of context lines with line numbers
+    """
+    if not line_number or not file_path.exists():
+        return []
+
+    try:
+        with open(file_path, "r", encoding="utf-8") as f:
+            lines = f.readlines()
+
+        start_line = max(0, line_number - context_size - 1)
+        end_line = min(len(lines), line_number + context_size)
+
+        context = []
+        for i in range(start_line, end_line):
+            line_num = i + 1
+            marker = ">>>" if line_num == line_number else "   "
+            context.append(f"{marker} {line_num:4d} | {lines[i].rstrip()}")
+
+        return context
+    except (IOError, OSError):
+        return []
 
 
 
 
 def _get_common_jinja_suggestions(error_msg: str, available_vars: set) -> List[str]:
 def _get_common_jinja_suggestions(error_msg: str, available_vars: set) -> List[str]:
-  """Generate helpful suggestions based on common Jinja2 errors.
-  
-  Args:
-      error_msg: The error message from Jinja2
-      available_vars: Set of available variable names
-      
-  Returns:
-      List of actionable suggestions
-  """
-  suggestions = []
-  error_lower = error_msg.lower()
-  
-  # Undefined variable errors
-  if 'undefined' in error_lower or 'is not defined' in error_lower:
-    # Try to extract variable name from error message
-    import re
-    var_match = re.search(r"'([^']+)'.*is undefined", error_msg)
-    if not var_match:
-      var_match = re.search(r"'([^']+)'.*is not defined", error_msg)
-    
-    if var_match:
-      undefined_var = var_match.group(1)
-      suggestions.append(f"Variable '{undefined_var}' is not defined in the template spec")
-      
-      # Suggest similar variable names (basic fuzzy matching)
-      similar = [v for v in available_vars if undefined_var.lower() in v.lower() or v.lower() in undefined_var.lower()]
-      if similar:
-        suggestions.append(f"Did you mean one of these? {', '.join(sorted(similar)[:5])}")
-      
-      suggestions.append(f"Add '{undefined_var}' to your template.yaml spec with a default value")
-      suggestions.append("Or use the Jinja2 default filter: {{ " + undefined_var + " | default('value') }}")
-    else:
-      suggestions.append("Check that all variables used in templates are defined in template.yaml")
-      suggestions.append("Use the Jinja2 default filter for optional variables: {{ var | default('value') }}")
-  
-  # Syntax errors
-  elif 'unexpected' in error_lower or 'expected' in error_lower:
-    suggestions.append("Check for syntax errors in your Jinja2 template")
-    suggestions.append("Common issues: missing {% endfor %}, {% endif %}, or {% endblock %}")
-    suggestions.append("Make sure all {{ }} and {% %} tags are properly closed")
-  
-  # Filter errors
-  elif 'filter' in error_lower:
-    suggestions.append("Check that the filter name is spelled correctly")
-    suggestions.append("Verify the filter exists in Jinja2 built-in filters")
-    suggestions.append("Make sure filter arguments are properly formatted")
-  
-  # Template not found
-  elif 'not found' in error_lower or 'does not exist' in error_lower:
-    suggestions.append("Check that the included/imported template file exists")
-    suggestions.append("Verify the template path is relative to the template directory")
-    suggestions.append("Make sure the file has the .j2 extension if it's a Jinja2 template")
-  
-  # Type errors
-  elif 'type' in error_lower and ('int' in error_lower or 'str' in error_lower or 'bool' in error_lower):
-    suggestions.append("Check that variable values have the correct type")
-    suggestions.append("Use Jinja2 filters to convert types: {{ var | int }}, {{ var | string }}")
-  
-  # Add generic helpful tip
-  if not suggestions:
-    suggestions.append("Check the Jinja2 template syntax and variable usage")
-    suggestions.append("Enable --debug mode for more detailed rendering information")
-  
-  return suggestions
+    """Generate helpful suggestions based on common Jinja2 errors.
+
+    Args:
+        error_msg: The error message from Jinja2
+        available_vars: Set of available variable names
+
+    Returns:
+        List of actionable suggestions
+    """
+    suggestions = []
+    error_lower = error_msg.lower()
+
+    # Undefined variable errors
+    if "undefined" in error_lower or "is not defined" in error_lower:
+        # Try to extract variable name from error message
+        import re
+
+        var_match = re.search(r"'([^']+)'.*is undefined", error_msg)
+        if not var_match:
+            var_match = re.search(r"'([^']+)'.*is not defined", error_msg)
+
+        if var_match:
+            undefined_var = var_match.group(1)
+            suggestions.append(
+                f"Variable '{undefined_var}' is not defined in the template spec"
+            )
+
+            # Suggest similar variable names (basic fuzzy matching)
+            similar = [
+                v
+                for v in available_vars
+                if undefined_var.lower() in v.lower()
+                or v.lower() in undefined_var.lower()
+            ]
+            if similar:
+                suggestions.append(
+                    f"Did you mean one of these? {', '.join(sorted(similar)[:5])}"
+                )
+
+            suggestions.append(
+                f"Add '{undefined_var}' to your template.yaml spec with a default value"
+            )
+            suggestions.append(
+                "Or use the Jinja2 default filter: {{ "
+                + undefined_var
+                + " | default('value') }}"
+            )
+        else:
+            suggestions.append(
+                "Check that all variables used in templates are defined in template.yaml"
+            )
+            suggestions.append(
+                "Use the Jinja2 default filter for optional variables: {{ var | default('value') }}"
+            )
+
+    # Syntax errors
+    elif "unexpected" in error_lower or "expected" in error_lower:
+        suggestions.append("Check for syntax errors in your Jinja2 template")
+        suggestions.append(
+            "Common issues: missing {% endfor %}, {% endif %}, or {% endblock %}"
+        )
+        suggestions.append("Make sure all {{ }} and {% %} tags are properly closed")
+
+    # Filter errors
+    elif "filter" in error_lower:
+        suggestions.append("Check that the filter name is spelled correctly")
+        suggestions.append("Verify the filter exists in Jinja2 built-in filters")
+        suggestions.append("Make sure filter arguments are properly formatted")
+
+    # Template not found
+    elif "not found" in error_lower or "does not exist" in error_lower:
+        suggestions.append("Check that the included/imported template file exists")
+        suggestions.append(
+            "Verify the template path is relative to the template directory"
+        )
+        suggestions.append(
+            "Make sure the file has the .j2 extension if it's a Jinja2 template"
+        )
+
+    # Type errors
+    elif "type" in error_lower and (
+        "int" in error_lower or "str" in error_lower or "bool" in error_lower
+    ):
+        suggestions.append("Check that variable values have the correct type")
+        suggestions.append(
+            "Use Jinja2 filters to convert types: {{ var | int }}, {{ var | string }}"
+        )
+
+    # Add generic helpful tip
+    if not suggestions:
+        suggestions.append("Check the Jinja2 template syntax and variable usage")
+        suggestions.append(
+            "Enable --debug mode for more detailed rendering information"
+        )
+
+    return suggestions
 
 
 
 
 def _parse_jinja_error(
 def _parse_jinja_error(
     error: Exception,
     error: Exception,
     template_file: TemplateFile,
     template_file: TemplateFile,
     template_dir: Path,
     template_dir: Path,
-    available_vars: set
+    available_vars: set,
 ) -> tuple[str, Optional[int], Optional[int], List[str], List[str]]:
 ) -> tuple[str, Optional[int], Optional[int], List[str], List[str]]:
-  """Parse a Jinja2 exception to extract detailed error information.
-  
-  Args:
-      error: The Jinja2 exception
-      template_file: The TemplateFile being rendered
-      template_dir: Template directory path
-      available_vars: Set of available variable names
-      
-  Returns:
-      Tuple of (error_message, line_number, column, context_lines, suggestions)
-  """
-  error_msg = str(error)
-  line_number = None
-  column = None
-  context_lines = []
-  suggestions = []
-  
-  # Extract line number from Jinja2 errors
-  if hasattr(error, 'lineno'):
-    line_number = error.lineno
-  
-  # Extract file path and get context
-  file_path = template_dir / template_file.relative_path
-  if line_number and file_path.exists():
-    context_lines = _extract_error_context(file_path, line_number)
-  
-  # Generate suggestions based on error type
-  if isinstance(error, UndefinedError):
-    error_msg = f"Undefined variable: {error}"
-    suggestions = _get_common_jinja_suggestions(str(error), available_vars)
-  elif isinstance(error, Jinja2TemplateSyntaxError):
-    error_msg = f"Template syntax error: {error}"
-    suggestions = _get_common_jinja_suggestions(str(error), available_vars)
-  elif isinstance(error, Jinja2TemplateNotFound):
-    error_msg = f"Template file not found: {error}"
-    suggestions = _get_common_jinja_suggestions(str(error), available_vars)
-  else:
-    # Generic Jinja2 error
-    suggestions = _get_common_jinja_suggestions(error_msg, available_vars)
-  
-  return error_msg, line_number, column, context_lines, suggestions
+    """Parse a Jinja2 exception to extract detailed error information.
+
+    Args:
+        error: The Jinja2 exception
+        template_file: The TemplateFile being rendered
+        template_dir: Template directory path
+        available_vars: Set of available variable names
+
+    Returns:
+        Tuple of (error_message, line_number, column, context_lines, suggestions)
+    """
+    error_msg = str(error)
+    line_number = None
+    column = None
+    context_lines = []
+    suggestions = []
+
+    # Extract line number from Jinja2 errors
+    if hasattr(error, "lineno"):
+        line_number = error.lineno
+
+    # Extract file path and get context
+    file_path = template_dir / template_file.relative_path
+    if line_number and file_path.exists():
+        context_lines = _extract_error_context(file_path, line_number)
+
+    # Generate suggestions based on error type
+    if isinstance(error, UndefinedError):
+        error_msg = f"Undefined variable: {error}"
+        suggestions = _get_common_jinja_suggestions(str(error), available_vars)
+    elif isinstance(error, Jinja2TemplateSyntaxError):
+        error_msg = f"Template syntax error: {error}"
+        suggestions = _get_common_jinja_suggestions(str(error), available_vars)
+    elif isinstance(error, Jinja2TemplateNotFound):
+        error_msg = f"Template file not found: {error}"
+        suggestions = _get_common_jinja_suggestions(str(error), available_vars)
+    else:
+        # Generic Jinja2 error
+        suggestions = _get_common_jinja_suggestions(error_msg, available_vars)
+
+    return error_msg, line_number, column, context_lines, suggestions
 
 
 
 
 @dataclass
 @dataclass
 class TemplateFile:
 class TemplateFile:
     """Represents a single file within a template directory."""
     """Represents a single file within a template directory."""
+
     relative_path: Path
     relative_path: Path
-    file_type: Literal['j2', 'static']
-    output_path: Path # The path it will have in the output directory
+    file_type: Literal["j2", "static"]
+    output_path: Path  # The path it will have in the output directory
+
 
 
 @dataclass
 @dataclass
 class TemplateMetadata:
 class TemplateMetadata:
-  """Represents template metadata with proper typing."""
-  name: str
-  description: str
-  author: str
-  date: str
-  version: str
-  module: str = ""
-  tags: List[str] = field(default_factory=list)
-  library: str = "unknown"
-  library_type: str = "git"  # Type of library ("git" or "static")
-  next_steps: str = ""
-  draft: bool = False
-
-  def __init__(self, template_data: dict, library_name: str | None = None, library_type: str = "git") -> None:
-    """Initialize TemplateMetadata from parsed YAML template data.
-    
-    Args:
-        template_data: Parsed YAML data from template.yaml
-        library_name: Name of the library this template belongs to
-    """
-    # Validate metadata format first
-    self._validate_metadata(template_data)
-    
-    # Extract metadata section
-    metadata_section = template_data.get("metadata", {})
-    
-    self.name = metadata_section.get("name", "")
-    # YAML block scalar (|) preserves a trailing newline. Remove only trailing newlines
-    # while preserving internal newlines/formatting.
-    raw_description = metadata_section.get("description", "")
-    if isinstance(raw_description, str):
-      description = raw_description.rstrip("\n")
-    else:
-      description = str(raw_description)
-    self.description = description or "No description available"
-    self.author = metadata_section.get("author", "")
-    self.date = metadata_section.get("date", "")
-    self.version = metadata_section.get("version", "")
-    self.module = metadata_section.get("module", "")
-    self.tags = metadata_section.get("tags", []) or []
-    self.library = library_name or "unknown"
-    self.library_type = library_type
-    self.draft = metadata_section.get("draft", False)
-    
-    # Extract next_steps (optional)
-    raw_next_steps = metadata_section.get("next_steps", "")
-    if isinstance(raw_next_steps, str):
-      next_steps = raw_next_steps.rstrip("\n")
-    else:
-      next_steps = str(raw_next_steps) if raw_next_steps else ""
-    self.next_steps = next_steps
+    """Represents template metadata with proper typing."""
+
+    name: str
+    description: str
+    author: str
+    date: str
+    version: str
+    module: str = ""
+    tags: List[str] = field(default_factory=list)
+    library: str = "unknown"
+    library_type: str = "git"  # Type of library ("git" or "static")
+    next_steps: str = ""
+    draft: bool = False
+
+    def __init__(
+        self,
+        template_data: dict,
+        library_name: str | None = None,
+        library_type: str = "git",
+    ) -> None:
+        """Initialize TemplateMetadata from parsed YAML template data.
+
+        Args:
+            template_data: Parsed YAML data from template.yaml
+            library_name: Name of the library this template belongs to
+        """
+        # Validate metadata format first
+        self._validate_metadata(template_data)
+
+        # Extract metadata section
+        metadata_section = template_data.get("metadata", {})
+
+        self.name = metadata_section.get("name", "")
+        # YAML block scalar (|) preserves a trailing newline. Remove only trailing newlines
+        # while preserving internal newlines/formatting.
+        raw_description = metadata_section.get("description", "")
+        if isinstance(raw_description, str):
+            description = raw_description.rstrip("\n")
+        else:
+            description = str(raw_description)
+        self.description = description or "No description available"
+        self.author = metadata_section.get("author", "")
+        self.date = metadata_section.get("date", "")
+        self.version = metadata_section.get("version", "")
+        self.module = metadata_section.get("module", "")
+        self.tags = metadata_section.get("tags", []) or []
+        self.library = library_name or "unknown"
+        self.library_type = library_type
+        self.draft = metadata_section.get("draft", False)
+
+        # Extract next_steps (optional)
+        raw_next_steps = metadata_section.get("next_steps", "")
+        if isinstance(raw_next_steps, str):
+            next_steps = raw_next_steps.rstrip("\n")
+        else:
+            next_steps = str(raw_next_steps) if raw_next_steps else ""
+        self.next_steps = next_steps
+
+    @staticmethod
+    def _validate_metadata(template_data: dict) -> None:
+        """Validate that template has required 'metadata' section with all required fields.
+
+        Args:
+            template_data: Parsed YAML data from template.yaml
+
+        Raises:
+            ValueError: If metadata section is missing or incomplete
+        """
+        metadata_section = template_data.get("metadata")
+        if metadata_section is None:
+            raise ValueError("Template format error: missing 'metadata' section")
+
+        # Validate that metadata section has all required fields
+        required_fields = ["name", "author", "version", "date", "description"]
+        missing_fields = [
+            field for field in required_fields if not metadata_section.get(field)
+        ]
+
+        if missing_fields:
+            raise ValueError(
+                f"Template format error: missing required metadata fields: {missing_fields}"
+            )
 
 
-  @staticmethod
-  def _validate_metadata(template_data: dict) -> None:
-    """Validate that template has required 'metadata' section with all required fields.
-    
-    Args:
-        template_data: Parsed YAML data from template.yaml
-        
-    Raises:
-        ValueError: If metadata section is missing or incomplete
-    """
-    metadata_section = template_data.get("metadata")
-    if metadata_section is None:
-      raise ValueError("Template format error: missing 'metadata' section")
-    
-    # Validate that metadata section has all required fields
-    required_fields = ["name", "author", "version", "date", "description"]
-    missing_fields = [field for field in required_fields if not metadata_section.get(field)]
-    
-    if missing_fields:
-      raise ValueError(f"Template format error: missing required metadata fields: {missing_fields}")
 
 
 @dataclass
 @dataclass
 class Template:
 class Template:
-  """Represents a template directory."""
-
-  def __init__(self, template_dir: Path, library_name: str, library_type: str = "git") -> None:
-    """Create a Template instance from a directory path.
-    
-    Args:
-        template_dir: Path to the template directory
-        library_name: Name of the library this template belongs to
-        library_type: Type of library ("git" or "static"), defaults to "git"
-    """
-    logger.debug(f"Loading template from directory: {template_dir}")
-    self.template_dir = template_dir
-    self.id = template_dir.name
-    self.original_id = template_dir.name  # Store the original ID
-    self.library_name = library_name
-    self.library_type = library_type
-
-    # Initialize caches for lazy loading
-    self.__module_specs: Optional[dict] = None
-    self.__merged_specs: Optional[dict] = None
-    self.__jinja_env: Optional[Environment] = None
-    self.__used_variables: Optional[Set[str]] = None
-    self.__variables: Optional[VariableCollection] = None
-    self.__template_files: Optional[List[TemplateFile]] = None # New attribute
+    """Represents a template directory."""
+
+    def __init__(
+        self, template_dir: Path, library_name: str, library_type: str = "git"
+    ) -> None:
+        """Create a Template instance from a directory path.
+
+        Args:
+            template_dir: Path to the template directory
+            library_name: Name of the library this template belongs to
+            library_type: Type of library ("git" or "static"), defaults to "git"
+        """
+        logger.debug(f"Loading template from directory: {template_dir}")
+        self.template_dir = template_dir
+        self.id = template_dir.name
+        self.original_id = template_dir.name  # Store the original ID
+        self.library_name = library_name
+        self.library_type = library_type
+
+        # Initialize caches for lazy loading
+        self.__module_specs: Optional[dict] = None
+        self.__merged_specs: Optional[dict] = None
+        self.__jinja_env: Optional[Environment] = None
+        self.__used_variables: Optional[Set[str]] = None
+        self.__variables: Optional[VariableCollection] = None
+        self.__template_files: Optional[List[TemplateFile]] = None  # New attribute
 
 
-    try:
-      # Find and parse the main template file (template.yaml or template.yml)
-      main_template_path = self._find_main_template_file()
-      with open(main_template_path, "r", encoding="utf-8") as f:
-        # Load all YAML documents (handles templates with empty lines before ---)
-        documents = list(yaml.safe_load_all(f))
-        
-        # Filter out None/empty documents and get the first non-empty one
-        valid_docs = [doc for doc in documents if doc is not None]
-        
-        if not valid_docs:
-          raise ValueError("Template file contains no valid YAML data")
-        
-        if len(valid_docs) > 1:
-          logger.warning(f"Template file contains multiple YAML documents, using the first one")
-        
-        self._template_data = valid_docs[0]
-      
-      # Validate template data
-      if not isinstance(self._template_data, dict):
-        raise ValueError("Template file must contain a valid YAML dictionary")
-
-      # Load metadata (always needed)
-      self.metadata = TemplateMetadata(self._template_data, library_name, library_type)
-      logger.debug(f"Loaded metadata: {self.metadata}")
-
-      # Validate 'kind' field (always needed)
-      self._validate_kind(self._template_data)
-      
-      # Extract schema version (default to 1.0 for backward compatibility)
-      self.schema_version = str(self._template_data.get("schema", "1.0"))
-      logger.debug(f"Template schema version: {self.schema_version}")
-      
-      # Note: Schema version validation is done by the module when loading templates
-
-      # NOTE: File collection is now lazy-loaded via the template_files property
-      # This significantly improves performance when listing many templates
-
-      logger.info(f"Loaded template '{self.id}' (v{self.metadata.version})")
-
-    except (ValueError, FileNotFoundError) as e:
-      logger.error(f"Error loading template from {template_dir}: {e}")
-      raise TemplateLoadError(f"Error loading template from {template_dir}: {e}")
-    except yaml.YAMLError as e:
-      logger.error(f"YAML parsing error in template {template_dir}: {e}")
-      raise YAMLParseError(str(template_dir / "template.y*ml"), e)
-    except (IOError, OSError) as e:
-      logger.error(f"File I/O error loading template {template_dir}: {e}")
-      raise TemplateLoadError(f"File I/O error loading template from {template_dir}: {e}")
-
-  def set_qualified_id(self, library_name: str | None = None) -> None:
-    """Set a qualified ID for this template (used when duplicates exist across libraries).
-    
-    Args:
-        library_name: Name of the library to qualify with. If None, uses self.library_name
-    """
-    lib_name = library_name or self.library_name
-    self.id = f"{self.original_id}.{lib_name}"
-    logger.debug(f"Template ID qualified: {self.original_id} -> {self.id}")
-
-  def _find_main_template_file(self) -> Path:
-    """Find the main template file (template.yaml or template.yml)."""
-    for filename in ["template.yaml", "template.yml"]:
-      path = self.template_dir / filename
-      if path.exists():
-        return path
-    raise FileNotFoundError(f"Main template file (template.yaml or template.yml) not found in {self.template_dir}")
-
-  @staticmethod
-  @lru_cache(maxsize=32)
-  def _load_module_specs(kind: str) -> dict:
-    """Load specifications from the corresponding module with caching.
-    
-    Uses LRU cache to avoid re-loading the same module spec multiple times.
-    This significantly improves performance when listing many templates of the same kind.
-    
-    Args:
-        kind: The module kind (e.g., 'compose', 'terraform')
-        
-    Returns:
-        Dictionary containing the module's spec, or empty dict if kind is empty
-        
-    Raises:
-        ValueError: If module cannot be loaded or spec is invalid
-    """
-    if not kind:
-      return {}
-    try:
-      import importlib
-      module = importlib.import_module(f"cli.modules.{kind}")
-      spec = getattr(module, 'spec', {})
-      logger.debug(f"Loaded and cached module spec for kind '{kind}'")
-      return spec
-    except Exception as e:
-      raise ValueError(f"Error loading module specifications for kind '{kind}': {e}")
-
-  def _merge_specs(self, module_specs: dict, template_specs: dict) -> dict:
-    """Deep merge template specs with module specs using VariableCollection.
-    
-    Uses VariableCollection's native merge() method for consistent merging logic.
-    Module specs are base, template specs override with origin tracking.
-    """
-    # Create VariableCollection from module specs (base)
-    module_collection = VariableCollection(module_specs) if module_specs else VariableCollection({})
-    
-    # Set origin for module variables
-    for section in module_collection.get_sections().values():
-      for variable in section.variables.values():
-        if not variable.origin:
-          variable.origin = "module"
-    
-    # Merge template specs into module specs (template overrides)
-    if template_specs:
-      merged_collection = module_collection.merge(template_specs, origin="template")
-    else:
-      merged_collection = module_collection
-    
-    # Convert back to dict format
-    merged_spec = {}
-    for section_key, section in merged_collection.get_sections().items():
-      merged_spec[section_key] = section.to_dict()
-    
-    return merged_spec
-
-  def _collect_template_files(self) -> None:
-    """Collects all TemplateFile objects in the template directory."""
-    template_files: List[TemplateFile] = []
-    
-    for root, _, files in os.walk(self.template_dir):
-      for filename in files:
-        file_path = Path(root) / filename
-        relative_path = file_path.relative_to(self.template_dir)
-        
-        # Skip the main template file
-        if filename in ["template.yaml", "template.yml"]:
-          continue
-        
-        if filename.endswith(".j2"):
-          file_type: Literal['j2', 'static'] = 'j2'
-          output_path = relative_path.with_suffix('') # Remove .j2 suffix
-        else:
-          file_type = 'static'
-          output_path = relative_path # Static files keep their name
-        
-        template_files.append(TemplateFile(relative_path=relative_path, file_type=file_type, output_path=output_path))
-          
-    self.__template_files = template_files
-
-  def _extract_all_used_variables(self) -> Set[str]:
-    """Extract all undeclared variables from all .j2 files in the template directory.
-    
-    Raises:
-        ValueError: If any Jinja2 template has syntax errors
-    """
-    used_variables: Set[str] = set()
-    syntax_errors = []
-    
-    for template_file in self.template_files: # Iterate over TemplateFile objects
-      if template_file.file_type == 'j2':
-        file_path = self.template_dir / template_file.relative_path
         try:
         try:
-          with open(file_path, "r", encoding="utf-8") as f:
-            content = f.read()
-            ast = self.jinja_env.parse(content) # Use lazy-loaded jinja_env
-            used_variables.update(meta.find_undeclared_variables(ast))
-        except (IOError, OSError) as e:
-          relative_path = file_path.relative_to(self.template_dir)
-          syntax_errors.append(f"  - {relative_path}: File I/O error: {e}")
-        except Exception as e:
-          # Collect syntax errors for Jinja2 issues
-          relative_path = file_path.relative_to(self.template_dir)
-          syntax_errors.append(f"  - {relative_path}: {e}")
-    
-    # Raise error if any syntax errors were found
-    if syntax_errors:
-      logger.error(f"Jinja2 syntax errors found in template '{self.id}'")
-      raise TemplateSyntaxError(self.id, syntax_errors)
-    
-    return used_variables
-
-  def _extract_jinja_default_values(self) -> dict[str, object]:
-    """Scan all .j2 files and extract literal arguments to the `default` filter.
-
-    Returns a mapping var_name -> literal_value for simple cases like
-    {{ var | default("value") }} or {{ var | default(123) }}.
-    This does not attempt to evaluate complex expressions.
-    """
-    defaults: dict[str, object] = {}
+            # Find and parse the main template file (template.yaml or template.yml)
+            main_template_path = self._find_main_template_file()
+            with open(main_template_path, "r", encoding="utf-8") as f:
+                # Load all YAML documents (handles templates with empty lines before ---)
+                documents = list(yaml.safe_load_all(f))
 
 
-    class _DefaultVisitor(NodeVisitor):
-      def __init__(self):
-        self.found: dict[str, object] = {}
+                # Filter out None/empty documents and get the first non-empty one
+                valid_docs = [doc for doc in documents if doc is not None]
 
 
-      def visit_Filter(self, node: nodes.Filter) -> None:  # type: ignore[override]
-        try:
-          if getattr(node, 'name', None) == 'default' and node.args:
-            # target variable name when filter is applied directly to a Name
-            target = None
-            if isinstance(node.node, nodes.Name):
-              target = node.node.name
-
-            # first arg literal
-            first = node.args[0]
-            if isinstance(first, nodes.Const) and target:
-              self.found[target] = first.value
-        except Exception:
-          # Be resilient to unexpected node shapes
-          pass
-        # continue traversal
-        self.generic_visit(node)
-
-    visitor = _DefaultVisitor()
-
-    for template_file in self.template_files:
-      if template_file.file_type != 'j2':
-        continue
-      file_path = self.template_dir / template_file.relative_path
-      try:
-        with open(file_path, 'r', encoding='utf-8') as f:
-          content = f.read()
-        ast = self.jinja_env.parse(content)
-        visitor.visit(ast)
-      except (IOError, OSError, yaml.YAMLError):
-        # Skip failures - this extraction is best-effort only
-        continue
-
-    return visitor.found
-
-  def _filter_specs_to_used(self, used_variables: set, merged_specs: dict, module_specs: dict, template_specs: dict) -> dict:
-    """Filter specs to only include variables used in templates using VariableCollection.
-    
-    Uses VariableCollection's native filter_to_used() method.
-    Keeps sensitive variables only if they're defined in the template spec or actually used.
-    """
-    # Build set of variables explicitly defined in template spec
-    template_defined_vars = set()
-    for section_data in (template_specs or {}).values():
-      if isinstance(section_data, dict) and 'vars' in section_data:
-        template_defined_vars.update(section_data['vars'].keys())
-    
-    # Create VariableCollection from merged specs
-    merged_collection = VariableCollection(merged_specs)
-    
-    # Filter to only used variables (and sensitive ones that are template-defined)
-    # We keep sensitive variables that are either:
-    # 1. Actually used in template files, OR
-    # 2. Explicitly defined in the template spec (even if not yet used)
-    variables_to_keep = used_variables | template_defined_vars
-    filtered_collection = merged_collection.filter_to_used(variables_to_keep, keep_sensitive=False)
-    
-    # Convert back to dict format
-    filtered_specs = {}
-    for section_key, section in filtered_collection.get_sections().items():
-      filtered_specs[section_key] = section.to_dict()
-    
-    return filtered_specs
-
-  def _validate_schema_version(self, module_schema: str, module_name: str) -> None:
-    """Validate that template schema version is supported by the module.
-    
-    Args:
-        module_schema: Schema version supported by the module
-        module_name: Name of the module (for error messages)
-    
-    Raises:
-        IncompatibleSchemaVersionError: If template schema > module schema
-    """
-    template_schema = self.schema_version
-    
-    # Compare schema versions
-    if not is_compatible(module_schema, template_schema):
-      logger.error(
-        f"Template '{self.id}' uses schema version {template_schema}, "
-        f"but module '{module_name}' only supports up to {module_schema}"
-      )
-      raise IncompatibleSchemaVersionError(
-        template_id=self.id,
-        template_schema=template_schema,
-        module_schema=module_schema,
-        module_name=module_name
-      )
-    
-    logger.debug(
-      f"Template '{self.id}' schema version compatible: "
-      f"template uses {template_schema}, module supports {module_schema}"
-    )
-  
-  @staticmethod
-  def _validate_kind(template_data: dict) -> None:
-    """Validate that template has required 'kind' field.
-    
-    Args:
-        template_data: Parsed YAML data from template.yaml
-        
-    Raises:
-        ValueError: If 'kind' field is missing
-    """
-    if not template_data.get("kind"):
-      raise TemplateValidationError("Template format error: missing 'kind' field")
-
-  def _validate_variable_definitions(self, used_variables: set[str], merged_specs: dict[str, Any]) -> None:
-    """Validate that all variables used in Jinja2 content are defined in the spec."""
-    defined_variables = set()
-    for section_data in merged_specs.values():
-      if "vars" in section_data and isinstance(section_data["vars"], dict):
-        defined_variables.update(section_data["vars"].keys())
-    
-    undefined_variables = used_variables - defined_variables
-    if undefined_variables:
-      undefined_list = sorted(undefined_variables)
-      error_msg = (
-          f"Template validation error in '{self.id}': "
-          f"Variables used in template content but not defined in spec: {undefined_list}\n\n"
-          f"Please add these variables to your template's template.yaml spec. "
-          f"Each variable must have a default value.\n\n"
-          f"Example:\n"
-          f"spec:\n"
-          f"  general:\n"
-          f"    vars:\n"
-      )
-      for var_name in undefined_list:
-          error_msg += (
-              f"      {var_name}:\n"
-              f"        type: str\n"
-              f"        description: Description for {var_name}\n"
-              f"        default: <your_default_value_here>\n"
-          )
-      logger.error(error_msg)
-      raise TemplateValidationError(error_msg)
-
-  @staticmethod
-  def _create_jinja_env(searchpath: Path) -> Environment:
-    """Create sandboxed Jinja2 environment for secure template processing.
-    
-    Uses SandboxedEnvironment to prevent code injection vulnerabilities
-    when processing untrusted templates. This restricts access to dangerous
-    operations while still allowing safe template rendering.
-    
-    Returns:
-        SandboxedEnvironment configured for template processing.
-    """
-    # NOTE Use SandboxedEnvironment for security - prevents arbitrary code execution
-    return SandboxedEnvironment(
-      loader=FileSystemLoader(searchpath),
-      trim_blocks=True,
-      lstrip_blocks=True,
-      keep_trailing_newline=False,
-    )
-
-  def render(self, variables: VariableCollection, debug: bool = False) -> tuple[Dict[str, str], Dict[str, Any]]:
-    """Render all .j2 files in the template directory.
-    
-    Args:
-        variables: VariableCollection with values to use for rendering
-        debug: Enable debug mode with verbose output
-        
-    Returns:
-        Tuple of (rendered_files, variable_values) where variable_values includes autogenerated values
-    """
-    # Use get_satisfied_values() to exclude variables from sections with unsatisfied dependencies
-    variable_values = variables.get_satisfied_values()
-    
-    # Auto-generate values for autogenerated variables that are empty
-    import secrets
-    import string
-    for section in variables.get_sections().values():
-      for var_name, variable in section.variables.items():
-        if variable.autogenerated and (variable.value is None or variable.value == ""):
-          # Generate a secure random string (32 characters by default)
-          alphabet = string.ascii_letters + string.digits
-          generated_value = ''.join(secrets.choice(alphabet) for _ in range(32))
-          variable_values[var_name] = generated_value
-          logger.debug(f"Auto-generated value for variable '{var_name}'")
-    
-    if debug:
-      logger.info(f"Rendering template '{self.id}' in debug mode")
-      logger.info(f"Available variables: {sorted(variable_values.keys())}")
-      logger.info(f"Variable values: {variable_values}")
-    else:
-      logger.debug(f"Rendering template '{self.id}' with variables: {variable_values}")
-    
-    rendered_files = {}
-    available_vars = set(variable_values.keys())
-    
-    for template_file in self.template_files: # Iterate over TemplateFile objects
-      if template_file.file_type == 'j2':
+                if not valid_docs:
+                    raise ValueError("Template file contains no valid YAML data")
+
+                if len(valid_docs) > 1:
+                    logger.warning(
+                        "Template file contains multiple YAML documents, using the first one"
+                    )
+
+                self._template_data = valid_docs[0]
+
+            # Validate template data
+            if not isinstance(self._template_data, dict):
+                raise ValueError("Template file must contain a valid YAML dictionary")
+
+            # Load metadata (always needed)
+            self.metadata = TemplateMetadata(
+                self._template_data, library_name, library_type
+            )
+            logger.debug(f"Loaded metadata: {self.metadata}")
+
+            # Validate 'kind' field (always needed)
+            self._validate_kind(self._template_data)
+
+            # Extract schema version (default to 1.0 for backward compatibility)
+            self.schema_version = str(self._template_data.get("schema", "1.0"))
+            logger.debug(f"Template schema version: {self.schema_version}")
+
+            # Note: Schema version validation is done by the module when loading templates
+
+            # NOTE: File collection is now lazy-loaded via the template_files property
+            # This significantly improves performance when listing many templates
+
+            logger.info(f"Loaded template '{self.id}' (v{self.metadata.version})")
+
+        except (ValueError, FileNotFoundError) as e:
+            logger.error(f"Error loading template from {template_dir}: {e}")
+            raise TemplateLoadError(f"Error loading template from {template_dir}: {e}")
+        except yaml.YAMLError as e:
+            logger.error(f"YAML parsing error in template {template_dir}: {e}")
+            raise YAMLParseError(str(template_dir / "template.y*ml"), e)
+        except (IOError, OSError) as e:
+            logger.error(f"File I/O error loading template {template_dir}: {e}")
+            raise TemplateLoadError(
+                f"File I/O error loading template from {template_dir}: {e}"
+            )
+
+    def set_qualified_id(self, library_name: str | None = None) -> None:
+        """Set a qualified ID for this template (used when duplicates exist across libraries).
+
+        Args:
+            library_name: Name of the library to qualify with. If None, uses self.library_name
+        """
+        lib_name = library_name or self.library_name
+        self.id = f"{self.original_id}.{lib_name}"
+        logger.debug(f"Template ID qualified: {self.original_id} -> {self.id}")
+
+    def _find_main_template_file(self) -> Path:
+        """Find the main template file (template.yaml or template.yml)."""
+        for filename in ["template.yaml", "template.yml"]:
+            path = self.template_dir / filename
+            if path.exists():
+                return path
+        raise FileNotFoundError(
+            f"Main template file (template.yaml or template.yml) not found in {self.template_dir}"
+        )
+
+    @staticmethod
+    @lru_cache(maxsize=32)
+    def _load_module_specs(kind: str) -> dict:
+        """Load specifications from the corresponding module with caching.
+
+        Uses LRU cache to avoid re-loading the same module spec multiple times.
+        This significantly improves performance when listing many templates of the same kind.
+
+        Args:
+            kind: The module kind (e.g., 'compose', 'terraform')
+
+        Returns:
+            Dictionary containing the module's spec, or empty dict if kind is empty
+
+        Raises:
+            ValueError: If module cannot be loaded or spec is invalid
+        """
+        if not kind:
+            return {}
         try:
         try:
-          if debug:
-            logger.info(f"Rendering Jinja2 template: {template_file.relative_path}")
-          
-          template = self.jinja_env.get_template(str(template_file.relative_path)) # Use lazy-loaded jinja_env
-          rendered_content = template.render(**variable_values)
-          
-          # Sanitize the rendered content to remove excessive blank lines
-          rendered_content = self._sanitize_content(rendered_content, template_file.output_path)
-          rendered_files[str(template_file.output_path)] = rendered_content
-          
-          if debug:
-            logger.info(f"Successfully rendered: {template_file.relative_path} -> {template_file.output_path}")
-        
-        except (UndefinedError, Jinja2TemplateSyntaxError, Jinja2TemplateNotFound, Jinja2TemplateError) as e:
-          # Parse Jinja2 error to extract detailed information
-          error_msg, line_num, col, context_lines, suggestions = _parse_jinja_error(
-              e, template_file, self.template_dir, available_vars
-          )
-          
-          logger.error(f"Error rendering template file {template_file.relative_path}: {error_msg}")
-          
-          # Create enhanced TemplateRenderError with all context
-          raise TemplateRenderError(
-              message=error_msg,
-              file_path=str(template_file.relative_path),
-              line_number=line_num,
-              column=col,
-              context_lines=context_lines,
-              variable_context={k: str(v) for k, v in variable_values.items()} if debug else {},
-              suggestions=suggestions,
-              original_error=e
-          )
-        
+            import importlib
+
+            module = importlib.import_module(f"cli.modules.{kind}")
+            spec = getattr(module, "spec", {})
+            logger.debug(f"Loaded and cached module spec for kind '{kind}'")
+            return spec
         except Exception as e:
         except Exception as e:
-          # Catch any other unexpected errors
-          logger.error(f"Unexpected error rendering template file {template_file.relative_path}: {e}")
-          raise TemplateRenderError(
-              message=f"Unexpected rendering error: {e}",
-              file_path=str(template_file.relative_path),
-              suggestions=["This is an unexpected error. Please check the template for issues."],
-              original_error=e
-          )
-      
-      elif template_file.file_type == 'static':
-          # For static files, just read their content and add to rendered_files
-          # This ensures static files are also part of the output dictionary
-          file_path = self.template_dir / template_file.relative_path
-          try:
-              if debug:
-                logger.info(f"Copying static file: {template_file.relative_path}")
-              
-              with open(file_path, "r", encoding="utf-8") as f:
-                  content = f.read()
-                  rendered_files[str(template_file.output_path)] = content
-          except (IOError, OSError) as e:
-              logger.error(f"Error reading static file {file_path}: {e}")
-              raise TemplateRenderError(
-                  message=f"Error reading static file: {e}",
-                  file_path=str(template_file.relative_path),
-                  suggestions=["Check that the file exists and has read permissions"],
-                  original_error=e
-              )
-          
-    return rendered_files, variable_values
-  
-  def _sanitize_content(self, content: str, file_path: Path) -> str:
-    """Sanitize rendered content by removing excessive blank lines and trailing whitespace."""
-    if not content:
-      return content
-    
-    lines = [line.rstrip() for line in content.split('\n')]
-    sanitized = []
-    prev_blank = False
-    
-    for line in lines:
-      is_blank = not line
-      if is_blank and prev_blank:
-        continue  # Skip consecutive blank lines
-      sanitized.append(line)
-      prev_blank = is_blank
-    
-    # Remove leading blanks and ensure single trailing newline
-    return '\n'.join(sanitized).lstrip('\n').rstrip('\n') + '\n'
-
-  
-  @property
-  def template_files(self) -> List[TemplateFile]:
-      if self.__template_files is None:
-          self._collect_template_files() # Populate self.__template_files
-      return self.__template_files
-
-  @property
-  def template_specs(self) -> dict:
-      """Get the spec section from template YAML data."""
-      return self._template_data.get("spec", {})
-
-  @property
-  def module_specs(self) -> dict:
-      """Get the spec from the module definition."""
-      if self.__module_specs is None:
-          kind = self._template_data.get("kind")
-          self.__module_specs = self._load_module_specs(kind)
-      return self.__module_specs
-
-  @property
-  def merged_specs(self) -> dict:
-      if self.__merged_specs is None:
-          self.__merged_specs = self._merge_specs(self.module_specs, self.template_specs)
-      return self.__merged_specs
-
-  @property
-  def jinja_env(self) -> Environment:
-      if self.__jinja_env is None:
-          self.__jinja_env = self._create_jinja_env(self.template_dir)
-      return self.__jinja_env
-
-  @property
-  def used_variables(self) -> Set[str]:
-      if self.__used_variables is None:
-          self.__used_variables = self._extract_all_used_variables()
-      return self.__used_variables
-
-  @property
-  def variables(self) -> VariableCollection:
-      if self.__variables is None:
-          # Validate that all used variables are defined
-          self._validate_variable_definitions(self.used_variables, self.merged_specs)
-          # Filter specs to only used variables
-          filtered_specs = self._filter_specs_to_used(self.used_variables, self.merged_specs, self.module_specs, self.template_specs)
-
-          # Best-effort: extract literal defaults from Jinja `default()` filter and
-          # merge them into the filtered_specs when no default exists there.
-          try:
-            jinja_defaults = self._extract_jinja_default_values()
-            for section_key, section_data in filtered_specs.items():
-              # Guard against None from empty YAML sections
-              vars_dict = section_data.get('vars') or {}
-              for var_name, var_data in vars_dict.items():
-                if 'default' not in var_data or var_data.get('default') in (None, ''):
-                  if var_name in jinja_defaults:
-                    var_data['default'] = jinja_defaults[var_name]
-          except (KeyError, TypeError, AttributeError):
-            # Keep behavior stable on any extraction errors
-            pass
-
-          self.__variables = VariableCollection(filtered_specs)
-          # Sort sections: required first, then enabled, then disabled
-          self.__variables.sort_sections()
-      return self.__variables
+            raise ValueError(
+                f"Error loading module specifications for kind '{kind}': {e}"
+            )
+
+    def _merge_specs(self, module_specs: dict, template_specs: dict) -> dict:
+        """Deep merge template specs with module specs using VariableCollection.
+
+        Uses VariableCollection's native merge() method for consistent merging logic.
+        Module specs are base, template specs override with origin tracking.
+        """
+        # Create VariableCollection from module specs (base)
+        module_collection = (
+            VariableCollection(module_specs) if module_specs else VariableCollection({})
+        )
+
+        # Set origin for module variables
+        for section in module_collection.get_sections().values():
+            for variable in section.variables.values():
+                if not variable.origin:
+                    variable.origin = "module"
+
+        # Merge template specs into module specs (template overrides)
+        if template_specs:
+            merged_collection = module_collection.merge(
+                template_specs, origin="template"
+            )
+        else:
+            merged_collection = module_collection
+
+        # Convert back to dict format
+        merged_spec = {}
+        for section_key, section in merged_collection.get_sections().items():
+            merged_spec[section_key] = section.to_dict()
+
+        return merged_spec
+
+    def _collect_template_files(self) -> None:
+        """Collects all TemplateFile objects in the template directory."""
+        template_files: List[TemplateFile] = []
+
+        for root, _, files in os.walk(self.template_dir):
+            for filename in files:
+                file_path = Path(root) / filename
+                relative_path = file_path.relative_to(self.template_dir)
+
+                # Skip the main template file
+                if filename in ["template.yaml", "template.yml"]:
+                    continue
+
+                if filename.endswith(".j2"):
+                    file_type: Literal["j2", "static"] = "j2"
+                    output_path = relative_path.with_suffix("")  # Remove .j2 suffix
+                else:
+                    file_type = "static"
+                    output_path = relative_path  # Static files keep their name
+
+                template_files.append(
+                    TemplateFile(
+                        relative_path=relative_path,
+                        file_type=file_type,
+                        output_path=output_path,
+                    )
+                )
+
+        self.__template_files = template_files
+
+    def _extract_all_used_variables(self) -> Set[str]:
+        """Extract all undeclared variables from all .j2 files in the template directory.
+
+        Raises:
+            ValueError: If any Jinja2 template has syntax errors
+        """
+        used_variables: Set[str] = set()
+        syntax_errors = []
+
+        for template_file in self.template_files:  # Iterate over TemplateFile objects
+            if template_file.file_type == "j2":
+                file_path = self.template_dir / template_file.relative_path
+                try:
+                    with open(file_path, "r", encoding="utf-8") as f:
+                        content = f.read()
+                        ast = self.jinja_env.parse(content)  # Use lazy-loaded jinja_env
+                        used_variables.update(meta.find_undeclared_variables(ast))
+                except (IOError, OSError) as e:
+                    relative_path = file_path.relative_to(self.template_dir)
+                    syntax_errors.append(f"  - {relative_path}: File I/O error: {e}")
+                except Exception as e:
+                    # Collect syntax errors for Jinja2 issues
+                    relative_path = file_path.relative_to(self.template_dir)
+                    syntax_errors.append(f"  - {relative_path}: {e}")
+
+        # Raise error if any syntax errors were found
+        if syntax_errors:
+            logger.error(f"Jinja2 syntax errors found in template '{self.id}'")
+            raise TemplateSyntaxError(self.id, syntax_errors)
+
+        return used_variables
+
+    def _extract_jinja_default_values(self) -> dict[str, object]:
+        """Scan all .j2 files and extract literal arguments to the `default` filter.
+
+        Returns a mapping var_name -> literal_value for simple cases like
+        {{ var | default("value") }} or {{ var | default(123) }}.
+        This does not attempt to evaluate complex expressions.
+        """
+
+        class _DefaultVisitor(NodeVisitor):
+            def __init__(self):
+                self.found: dict[str, object] = {}
+
+            def visit_Filter(self, node: nodes.Filter) -> None:  # type: ignore[override]
+                try:
+                    if getattr(node, "name", None) == "default" and node.args:
+                        # target variable name when filter is applied directly to a Name
+                        target = None
+                        if isinstance(node.node, nodes.Name):
+                            target = node.node.name
+
+                        # first arg literal
+                        first = node.args[0]
+                        if isinstance(first, nodes.Const) and target:
+                            self.found[target] = first.value
+                except Exception:
+                    # Be resilient to unexpected node shapes
+                    pass
+                # continue traversal
+                self.generic_visit(node)
+
+        visitor = _DefaultVisitor()
+
+        for template_file in self.template_files:
+            if template_file.file_type != "j2":
+                continue
+            file_path = self.template_dir / template_file.relative_path
+            try:
+                with open(file_path, "r", encoding="utf-8") as f:
+                    content = f.read()
+                ast = self.jinja_env.parse(content)
+                visitor.visit(ast)
+            except (IOError, OSError, yaml.YAMLError):
+                # Skip failures - this extraction is best-effort only
+                continue
+
+        return visitor.found
+
+    def _filter_specs_to_used(
+        self,
+        used_variables: set,
+        merged_specs: dict,
+        module_specs: dict,
+        template_specs: dict,
+    ) -> dict:
+        """Filter specs to only include variables used in templates using VariableCollection.
+
+        Uses VariableCollection's native filter_to_used() method.
+        Keeps sensitive variables only if they're defined in the template spec or actually used.
+        """
+        # Build set of variables explicitly defined in template spec
+        template_defined_vars = set()
+        for section_data in (template_specs or {}).values():
+            if isinstance(section_data, dict) and "vars" in section_data:
+                template_defined_vars.update(section_data["vars"].keys())
+
+        # Create VariableCollection from merged specs
+        merged_collection = VariableCollection(merged_specs)
+
+        # Filter to only used variables (and sensitive ones that are template-defined)
+        # We keep sensitive variables that are either:
+        # 1. Actually used in template files, OR
+        # 2. Explicitly defined in the template spec (even if not yet used)
+        variables_to_keep = used_variables | template_defined_vars
+        filtered_collection = merged_collection.filter_to_used(
+            variables_to_keep, keep_sensitive=False
+        )
+
+        # Convert back to dict format
+        filtered_specs = {}
+        for section_key, section in filtered_collection.get_sections().items():
+            filtered_specs[section_key] = section.to_dict()
+
+        return filtered_specs
+
+    def _validate_schema_version(self, module_schema: str, module_name: str) -> None:
+        """Validate that template schema version is supported by the module.
+
+        Args:
+            module_schema: Schema version supported by the module
+            module_name: Name of the module (for error messages)
+
+        Raises:
+            IncompatibleSchemaVersionError: If template schema > module schema
+        """
+        template_schema = self.schema_version
+
+        # Compare schema versions
+        if not is_compatible(module_schema, template_schema):
+            logger.error(
+                f"Template '{self.id}' uses schema version {template_schema}, "
+                f"but module '{module_name}' only supports up to {module_schema}"
+            )
+            raise IncompatibleSchemaVersionError(
+                template_id=self.id,
+                template_schema=template_schema,
+                module_schema=module_schema,
+                module_name=module_name,
+            )
+
+        logger.debug(
+            f"Template '{self.id}' schema version compatible: "
+            f"template uses {template_schema}, module supports {module_schema}"
+        )
+
+    @staticmethod
+    def _validate_kind(template_data: dict) -> None:
+        """Validate that template has required 'kind' field.
+
+        Args:
+            template_data: Parsed YAML data from template.yaml
+
+        Raises:
+            ValueError: If 'kind' field is missing
+        """
+        if not template_data.get("kind"):
+            raise TemplateValidationError("Template format error: missing 'kind' field")
+
+    def _validate_variable_definitions(
+        self, used_variables: set[str], merged_specs: dict[str, Any]
+    ) -> None:
+        """Validate that all variables used in Jinja2 content are defined in the spec."""
+        defined_variables = set()
+        for section_data in merged_specs.values():
+            if "vars" in section_data and isinstance(section_data["vars"], dict):
+                defined_variables.update(section_data["vars"].keys())
+
+        undefined_variables = used_variables - defined_variables
+        if undefined_variables:
+            undefined_list = sorted(undefined_variables)
+            error_msg = (
+                f"Template validation error in '{self.id}': "
+                f"Variables used in template content but not defined in spec: {undefined_list}\n\n"
+                f"Please add these variables to your template's template.yaml spec. "
+                f"Each variable must have a default value.\n\n"
+                f"Example:\n"
+                f"spec:\n"
+                f"  general:\n"
+                f"    vars:\n"
+            )
+            for var_name in undefined_list:
+                error_msg += (
+                    f"      {var_name}:\n"
+                    f"        type: str\n"
+                    f"        description: Description for {var_name}\n"
+                    f"        default: <your_default_value_here>\n"
+                )
+            logger.error(error_msg)
+            raise TemplateValidationError(error_msg)
+
+    @staticmethod
+    def _create_jinja_env(searchpath: Path) -> Environment:
+        """Create sandboxed Jinja2 environment for secure template processing.
+
+        Uses SandboxedEnvironment to prevent code injection vulnerabilities
+        when processing untrusted templates. This restricts access to dangerous
+        operations while still allowing safe template rendering.
+
+        Returns:
+            SandboxedEnvironment configured for template processing.
+        """
+        # NOTE Use SandboxedEnvironment for security - prevents arbitrary code execution
+        return SandboxedEnvironment(
+            loader=FileSystemLoader(searchpath),
+            trim_blocks=True,
+            lstrip_blocks=True,
+            keep_trailing_newline=False,
+        )
+
+    def render(
+        self, variables: VariableCollection, debug: bool = False
+    ) -> tuple[Dict[str, str], Dict[str, Any]]:
+        """Render all .j2 files in the template directory.
+
+        Args:
+            variables: VariableCollection with values to use for rendering
+            debug: Enable debug mode with verbose output
+
+        Returns:
+            Tuple of (rendered_files, variable_values) where variable_values includes autogenerated values
+        """
+        # Use get_satisfied_values() to exclude variables from sections with unsatisfied dependencies
+        variable_values = variables.get_satisfied_values()
+
+        # Auto-generate values for autogenerated variables that are empty
+        import secrets
+        import string
+
+        for section in variables.get_sections().values():
+            for var_name, variable in section.variables.items():
+                if variable.autogenerated and (
+                    variable.value is None or variable.value == ""
+                ):
+                    # Generate a secure random string (32 characters by default)
+                    alphabet = string.ascii_letters + string.digits
+                    generated_value = "".join(
+                        secrets.choice(alphabet) for _ in range(32)
+                    )
+                    variable_values[var_name] = generated_value
+                    logger.debug(f"Auto-generated value for variable '{var_name}'")
+
+        if debug:
+            logger.info(f"Rendering template '{self.id}' in debug mode")
+            logger.info(f"Available variables: {sorted(variable_values.keys())}")
+            logger.info(f"Variable values: {variable_values}")
+        else:
+            logger.debug(
+                f"Rendering template '{self.id}' with variables: {variable_values}"
+            )
+
+        rendered_files = {}
+        available_vars = set(variable_values.keys())
+
+        for template_file in self.template_files:  # Iterate over TemplateFile objects
+            if template_file.file_type == "j2":
+                try:
+                    if debug:
+                        logger.info(
+                            f"Rendering Jinja2 template: {template_file.relative_path}"
+                        )
+
+                    template = self.jinja_env.get_template(
+                        str(template_file.relative_path)
+                    )  # Use lazy-loaded jinja_env
+                    rendered_content = template.render(**variable_values)
+
+                    # Sanitize the rendered content to remove excessive blank lines
+                    rendered_content = self._sanitize_content(
+                        rendered_content, template_file.output_path
+                    )
+                    rendered_files[str(template_file.output_path)] = rendered_content
+
+                    if debug:
+                        logger.info(
+                            f"Successfully rendered: {template_file.relative_path} -> {template_file.output_path}"
+                        )
+
+                except (
+                    UndefinedError,
+                    Jinja2TemplateSyntaxError,
+                    Jinja2TemplateNotFound,
+                    Jinja2TemplateError,
+                ) as e:
+                    # Parse Jinja2 error to extract detailed information
+                    error_msg, line_num, col, context_lines, suggestions = (
+                        _parse_jinja_error(
+                            e, template_file, self.template_dir, available_vars
+                        )
+                    )
+
+                    logger.error(
+                        f"Error rendering template file {template_file.relative_path}: {error_msg}"
+                    )
+
+                    # Create enhanced TemplateRenderError with all context
+                    raise TemplateRenderError(
+                        message=error_msg,
+                        file_path=str(template_file.relative_path),
+                        line_number=line_num,
+                        column=col,
+                        context_lines=context_lines,
+                        variable_context={k: str(v) for k, v in variable_values.items()}
+                        if debug
+                        else {},
+                        suggestions=suggestions,
+                        original_error=e,
+                    )
+
+                except Exception as e:
+                    # Catch any other unexpected errors
+                    logger.error(
+                        f"Unexpected error rendering template file {template_file.relative_path}: {e}"
+                    )
+                    raise TemplateRenderError(
+                        message=f"Unexpected rendering error: {e}",
+                        file_path=str(template_file.relative_path),
+                        suggestions=[
+                            "This is an unexpected error. Please check the template for issues."
+                        ],
+                        original_error=e,
+                    )
+
+            elif template_file.file_type == "static":
+                # For static files, just read their content and add to rendered_files
+                # This ensures static files are also part of the output dictionary
+                file_path = self.template_dir / template_file.relative_path
+                try:
+                    if debug:
+                        logger.info(
+                            f"Copying static file: {template_file.relative_path}"
+                        )
+
+                    with open(file_path, "r", encoding="utf-8") as f:
+                        content = f.read()
+                        rendered_files[str(template_file.output_path)] = content
+                except (IOError, OSError) as e:
+                    logger.error(f"Error reading static file {file_path}: {e}")
+                    raise TemplateRenderError(
+                        message=f"Error reading static file: {e}",
+                        file_path=str(template_file.relative_path),
+                        suggestions=[
+                            "Check that the file exists and has read permissions"
+                        ],
+                        original_error=e,
+                    )
+
+        return rendered_files, variable_values
+
+    def _sanitize_content(self, content: str, file_path: Path) -> str:
+        """Sanitize rendered content by removing excessive blank lines and trailing whitespace."""
+        if not content:
+            return content
+
+        lines = [line.rstrip() for line in content.split("\n")]
+        sanitized = []
+        prev_blank = False
+
+        for line in lines:
+            is_blank = not line
+            if is_blank and prev_blank:
+                continue  # Skip consecutive blank lines
+            sanitized.append(line)
+            prev_blank = is_blank
+
+        # Remove leading blanks and ensure single trailing newline
+        return "\n".join(sanitized).lstrip("\n").rstrip("\n") + "\n"
+
+    @property
+    def template_files(self) -> List[TemplateFile]:
+        if self.__template_files is None:
+            self._collect_template_files()  # Populate self.__template_files
+        return self.__template_files
+
+    @property
+    def template_specs(self) -> dict:
+        """Get the spec section from template YAML data."""
+        return self._template_data.get("spec", {})
+
+    @property
+    def module_specs(self) -> dict:
+        """Get the spec from the module definition."""
+        if self.__module_specs is None:
+            kind = self._template_data.get("kind")
+            self.__module_specs = self._load_module_specs(kind)
+        return self.__module_specs
+
+    @property
+    def merged_specs(self) -> dict:
+        if self.__merged_specs is None:
+            self.__merged_specs = self._merge_specs(
+                self.module_specs, self.template_specs
+            )
+        return self.__merged_specs
+
+    @property
+    def jinja_env(self) -> Environment:
+        if self.__jinja_env is None:
+            self.__jinja_env = self._create_jinja_env(self.template_dir)
+        return self.__jinja_env
+
+    @property
+    def used_variables(self) -> Set[str]:
+        if self.__used_variables is None:
+            self.__used_variables = self._extract_all_used_variables()
+        return self.__used_variables
+
+    @property
+    def variables(self) -> VariableCollection:
+        if self.__variables is None:
+            # Validate that all used variables are defined
+            self._validate_variable_definitions(self.used_variables, self.merged_specs)
+            # Filter specs to only used variables
+            filtered_specs = self._filter_specs_to_used(
+                self.used_variables,
+                self.merged_specs,
+                self.module_specs,
+                self.template_specs,
+            )
+
+            # Best-effort: extract literal defaults from Jinja `default()` filter and
+            # merge them into the filtered_specs when no default exists there.
+            try:
+                jinja_defaults = self._extract_jinja_default_values()
+                for section_key, section_data in filtered_specs.items():
+                    # Guard against None from empty YAML sections
+                    vars_dict = section_data.get("vars") or {}
+                    for var_name, var_data in vars_dict.items():
+                        if "default" not in var_data or var_data.get("default") in (
+                            None,
+                            "",
+                        ):
+                            if var_name in jinja_defaults:
+                                var_data["default"] = jinja_defaults[var_name]
+            except (KeyError, TypeError, AttributeError):
+                # Keep behavior stable on any extraction errors
+                pass
+
+            self.__variables = VariableCollection(filtered_specs)
+            # Sort sections: required first, then enabled, then disabled
+            self.__variables.sort_sections()
+        return self.__variables

+ 62 - 56
cli/core/validators.py

@@ -9,7 +9,7 @@ from __future__ import annotations
 import logging
 import logging
 from abc import ABC, abstractmethod
 from abc import ABC, abstractmethod
 from pathlib import Path
 from pathlib import Path
-from typing import Any, Dict, List, Optional
+from typing import Any, List, Optional
 
 
 import yaml
 import yaml
 from rich.console import Console
 from rich.console import Console
@@ -20,81 +20,81 @@ console = Console()
 
 
 class ValidationResult:
 class ValidationResult:
     """Represents the result of a validation operation."""
     """Represents the result of a validation operation."""
-    
+
     def __init__(self):
     def __init__(self):
         self.errors: List[str] = []
         self.errors: List[str] = []
         self.warnings: List[str] = []
         self.warnings: List[str] = []
         self.info: List[str] = []
         self.info: List[str] = []
-    
+
     def add_error(self, message: str) -> None:
     def add_error(self, message: str) -> None:
         """Add an error message."""
         """Add an error message."""
         self.errors.append(message)
         self.errors.append(message)
         logger.error(f"Validation error: {message}")
         logger.error(f"Validation error: {message}")
-    
+
     def add_warning(self, message: str) -> None:
     def add_warning(self, message: str) -> None:
         """Add a warning message."""
         """Add a warning message."""
         self.warnings.append(message)
         self.warnings.append(message)
         logger.warning(f"Validation warning: {message}")
         logger.warning(f"Validation warning: {message}")
-    
+
     def add_info(self, message: str) -> None:
     def add_info(self, message: str) -> None:
         """Add an info message."""
         """Add an info message."""
         self.info.append(message)
         self.info.append(message)
         logger.info(f"Validation info: {message}")
         logger.info(f"Validation info: {message}")
-    
+
     @property
     @property
     def is_valid(self) -> bool:
     def is_valid(self) -> bool:
         """Check if validation passed (no errors)."""
         """Check if validation passed (no errors)."""
         return len(self.errors) == 0
         return len(self.errors) == 0
-    
+
     @property
     @property
     def has_warnings(self) -> bool:
     def has_warnings(self) -> bool:
         """Check if validation has warnings."""
         """Check if validation has warnings."""
         return len(self.warnings) > 0
         return len(self.warnings) > 0
-    
+
     def display(self, context: str = "Validation") -> None:
     def display(self, context: str = "Validation") -> None:
         """Display validation results to console."""
         """Display validation results to console."""
         if self.errors:
         if self.errors:
             console.print(f"\n[red]✗ {context} Failed:[/red]")
             console.print(f"\n[red]✗ {context} Failed:[/red]")
             for error in self.errors:
             for error in self.errors:
                 console.print(f"  [red]• {error}[/red]")
                 console.print(f"  [red]• {error}[/red]")
-        
+
         if self.warnings:
         if self.warnings:
             console.print(f"\n[yellow]⚠ {context} Warnings:[/yellow]")
             console.print(f"\n[yellow]⚠ {context} Warnings:[/yellow]")
             for warning in self.warnings:
             for warning in self.warnings:
                 console.print(f"  [yellow]• {warning}[/yellow]")
                 console.print(f"  [yellow]• {warning}[/yellow]")
-        
+
         if self.info:
         if self.info:
             console.print(f"\n[blue]ℹ {context} Info:[/blue]")
             console.print(f"\n[blue]ℹ {context} Info:[/blue]")
             for info_msg in self.info:
             for info_msg in self.info:
                 console.print(f"  [blue]• {info_msg}[/blue]")
                 console.print(f"  [blue]• {info_msg}[/blue]")
-        
+
         if self.is_valid and not self.has_warnings:
         if self.is_valid and not self.has_warnings:
             console.print(f"\n[green]✓ {context} Passed[/green]")
             console.print(f"\n[green]✓ {context} Passed[/green]")
 
 
 
 
 class ContentValidator(ABC):
 class ContentValidator(ABC):
     """Abstract base class for content validators."""
     """Abstract base class for content validators."""
-    
+
     @abstractmethod
     @abstractmethod
     def validate(self, content: str, file_path: str) -> ValidationResult:
     def validate(self, content: str, file_path: str) -> ValidationResult:
         """Validate content and return results.
         """Validate content and return results.
-        
+
         Args:
         Args:
             content: The file content to validate
             content: The file content to validate
             file_path: Path to the file (for error messages)
             file_path: Path to the file (for error messages)
-            
+
         Returns:
         Returns:
             ValidationResult with errors, warnings, and info
             ValidationResult with errors, warnings, and info
         """
         """
         pass
         pass
-    
+
     @abstractmethod
     @abstractmethod
     def can_validate(self, file_path: str) -> bool:
     def can_validate(self, file_path: str) -> bool:
         """Check if this validator can validate the given file.
         """Check if this validator can validate the given file.
-        
+
         Args:
         Args:
             file_path: Path to the file
             file_path: Path to the file
-            
+
         Returns:
         Returns:
             True if this validator can handle the file
             True if this validator can handle the file
         """
         """
@@ -103,84 +103,88 @@ class ContentValidator(ABC):
 
 
 class DockerComposeValidator(ContentValidator):
 class DockerComposeValidator(ContentValidator):
     """Validator for Docker Compose files."""
     """Validator for Docker Compose files."""
-    
+
     COMPOSE_FILENAMES = {
     COMPOSE_FILENAMES = {
         "docker-compose.yml",
         "docker-compose.yml",
         "docker-compose.yaml",
         "docker-compose.yaml",
         "compose.yml",
         "compose.yml",
         "compose.yaml",
         "compose.yaml",
     }
     }
-    
+
     def can_validate(self, file_path: str) -> bool:
     def can_validate(self, file_path: str) -> bool:
         """Check if file is a Docker Compose file."""
         """Check if file is a Docker Compose file."""
         filename = Path(file_path).name.lower()
         filename = Path(file_path).name.lower()
         return filename in self.COMPOSE_FILENAMES
         return filename in self.COMPOSE_FILENAMES
-    
+
     def validate(self, content: str, file_path: str) -> ValidationResult:
     def validate(self, content: str, file_path: str) -> ValidationResult:
         """Validate Docker Compose file structure."""
         """Validate Docker Compose file structure."""
         result = ValidationResult()
         result = ValidationResult()
-        
+
         try:
         try:
             # Parse YAML
             # Parse YAML
             data = yaml.safe_load(content)
             data = yaml.safe_load(content)
-            
+
             if not isinstance(data, dict):
             if not isinstance(data, dict):
                 result.add_error("Docker Compose file must be a YAML dictionary")
                 result.add_error("Docker Compose file must be a YAML dictionary")
                 return result
                 return result
-            
+
             # Check for version (optional in Compose v2, but good practice)
             # Check for version (optional in Compose v2, but good practice)
             if "version" not in data:
             if "version" not in data:
-                result.add_info("No 'version' field specified (using Compose v2 format)")
-            
+                result.add_info(
+                    "No 'version' field specified (using Compose v2 format)"
+                )
+
             # Check for services (required)
             # Check for services (required)
             if "services" not in data:
             if "services" not in data:
                 result.add_error("Missing required 'services' section")
                 result.add_error("Missing required 'services' section")
                 return result
                 return result
-            
+
             services = data.get("services", {})
             services = data.get("services", {})
             if not isinstance(services, dict):
             if not isinstance(services, dict):
                 result.add_error("'services' must be a dictionary")
                 result.add_error("'services' must be a dictionary")
                 return result
                 return result
-            
+
             if not services:
             if not services:
                 result.add_warning("No services defined")
                 result.add_warning("No services defined")
-            
+
             # Validate each service
             # Validate each service
             for service_name, service_config in services.items():
             for service_name, service_config in services.items():
                 self._validate_service(service_name, service_config, result)
                 self._validate_service(service_name, service_config, result)
-            
+
             # Check for networks (optional but recommended)
             # Check for networks (optional but recommended)
             if "networks" in data:
             if "networks" in data:
                 networks = data.get("networks", {})
                 networks = data.get("networks", {})
                 if networks and isinstance(networks, dict):
                 if networks and isinstance(networks, dict):
                     result.add_info(f"Defines {len(networks)} network(s)")
                     result.add_info(f"Defines {len(networks)} network(s)")
-            
+
             # Check for volumes (optional)
             # Check for volumes (optional)
             if "volumes" in data:
             if "volumes" in data:
                 volumes = data.get("volumes", {})
                 volumes = data.get("volumes", {})
                 if volumes and isinstance(volumes, dict):
                 if volumes and isinstance(volumes, dict):
                     result.add_info(f"Defines {len(volumes)} volume(s)")
                     result.add_info(f"Defines {len(volumes)} volume(s)")
-            
+
         except yaml.YAMLError as e:
         except yaml.YAMLError as e:
             result.add_error(f"YAML parsing error: {e}")
             result.add_error(f"YAML parsing error: {e}")
         except Exception as e:
         except Exception as e:
             result.add_error(f"Unexpected validation error: {e}")
             result.add_error(f"Unexpected validation error: {e}")
-        
+
         return result
         return result
-    
-    def _validate_service(self, name: str, config: Any, result: ValidationResult) -> None:
+
+    def _validate_service(
+        self, name: str, config: Any, result: ValidationResult
+    ) -> None:
         """Validate a single service configuration."""
         """Validate a single service configuration."""
         if not isinstance(config, dict):
         if not isinstance(config, dict):
             result.add_error(f"Service '{name}': configuration must be a dictionary")
             result.add_error(f"Service '{name}': configuration must be a dictionary")
             return
             return
-        
+
         # Check for image or build (at least one required)
         # Check for image or build (at least one required)
         has_image = "image" in config
         has_image = "image" in config
         has_build = "build" in config
         has_build = "build" in config
-        
+
         if not has_image and not has_build:
         if not has_image and not has_build:
             result.add_error(f"Service '{name}': must specify 'image' or 'build'")
             result.add_error(f"Service '{name}': must specify 'image' or 'build'")
-        
+
         # Warn about common misconfigurations
         # Warn about common misconfigurations
         if "restart" in config:
         if "restart" in config:
             restart_value = config["restart"]
             restart_value = config["restart"]
@@ -190,7 +194,7 @@ class DockerComposeValidator(ContentValidator):
                     f"Service '{name}': restart policy '{restart_value}' may be invalid. "
                     f"Service '{name}': restart policy '{restart_value}' may be invalid. "
                     f"Valid values: {', '.join(valid_restart_policies)}"
                     f"Valid values: {', '.join(valid_restart_policies)}"
                 )
                 )
-        
+
         # Check for environment variables
         # Check for environment variables
         if "environment" in config:
         if "environment" in config:
             env = config["environment"]
             env = config["environment"]
@@ -202,7 +206,7 @@ class DockerComposeValidator(ContentValidator):
                     result.add_warning(
                     result.add_warning(
                         f"Service '{name}': duplicate environment variables: {', '.join(duplicates)}"
                         f"Service '{name}': duplicate environment variables: {', '.join(duplicates)}"
                     )
                     )
-        
+
         # Check for ports
         # Check for ports
         if "ports" in config:
         if "ports" in config:
             ports = config["ports"]
             ports = config["ports"]
@@ -212,51 +216,51 @@ class DockerComposeValidator(ContentValidator):
 
 
 class YAMLValidator(ContentValidator):
 class YAMLValidator(ContentValidator):
     """Basic YAML syntax validator."""
     """Basic YAML syntax validator."""
-    
+
     def can_validate(self, file_path: str) -> bool:
     def can_validate(self, file_path: str) -> bool:
         """Check if file is a YAML file."""
         """Check if file is a YAML file."""
         return Path(file_path).suffix.lower() in [".yml", ".yaml"]
         return Path(file_path).suffix.lower() in [".yml", ".yaml"]
-    
+
     def validate(self, content: str, file_path: str) -> ValidationResult:
     def validate(self, content: str, file_path: str) -> ValidationResult:
         """Validate YAML syntax."""
         """Validate YAML syntax."""
         result = ValidationResult()
         result = ValidationResult()
-        
+
         try:
         try:
             yaml.safe_load(content)
             yaml.safe_load(content)
             result.add_info("YAML syntax is valid")
             result.add_info("YAML syntax is valid")
         except yaml.YAMLError as e:
         except yaml.YAMLError as e:
             result.add_error(f"YAML parsing error: {e}")
             result.add_error(f"YAML parsing error: {e}")
-        
+
         return result
         return result
 
 
 
 
 class ValidatorRegistry:
 class ValidatorRegistry:
     """Registry for content validators."""
     """Registry for content validators."""
-    
+
     def __init__(self):
     def __init__(self):
         self.validators: List[ContentValidator] = []
         self.validators: List[ContentValidator] = []
         self._register_default_validators()
         self._register_default_validators()
-    
+
     def _register_default_validators(self) -> None:
     def _register_default_validators(self) -> None:
         """Register built-in validators."""
         """Register built-in validators."""
         self.register(DockerComposeValidator())
         self.register(DockerComposeValidator())
         self.register(YAMLValidator())
         self.register(YAMLValidator())
-    
+
     def register(self, validator: ContentValidator) -> None:
     def register(self, validator: ContentValidator) -> None:
         """Register a validator.
         """Register a validator.
-        
+
         Args:
         Args:
             validator: The validator to register
             validator: The validator to register
         """
         """
         self.validators.append(validator)
         self.validators.append(validator)
         logger.debug(f"Registered validator: {validator.__class__.__name__}")
         logger.debug(f"Registered validator: {validator.__class__.__name__}")
-    
+
     def get_validator(self, file_path: str) -> Optional[ContentValidator]:
     def get_validator(self, file_path: str) -> Optional[ContentValidator]:
         """Get the most appropriate validator for a file.
         """Get the most appropriate validator for a file.
-        
+
         Args:
         Args:
             file_path: Path to the file
             file_path: Path to the file
-            
+
         Returns:
         Returns:
             ContentValidator if found, None otherwise
             ContentValidator if found, None otherwise
         """
         """
@@ -265,26 +269,28 @@ class ValidatorRegistry:
             if validator.can_validate(file_path):
             if validator.can_validate(file_path):
                 return validator
                 return validator
         return None
         return None
-    
+
     def validate_file(self, content: str, file_path: str) -> ValidationResult:
     def validate_file(self, content: str, file_path: str) -> ValidationResult:
         """Validate file content using appropriate validator.
         """Validate file content using appropriate validator.
-        
+
         Args:
         Args:
             content: The file content
             content: The file content
             file_path: Path to the file
             file_path: Path to the file
-            
+
         Returns:
         Returns:
             ValidationResult with validation results
             ValidationResult with validation results
         """
         """
         validator = self.get_validator(file_path)
         validator = self.get_validator(file_path)
-        
+
         if validator:
         if validator:
             logger.debug(f"Validating {file_path} with {validator.__class__.__name__}")
             logger.debug(f"Validating {file_path} with {validator.__class__.__name__}")
             return validator.validate(content, file_path)
             return validator.validate(content, file_path)
-        
+
         # No validator found - return empty result
         # No validator found - return empty result
         result = ValidationResult()
         result = ValidationResult()
-        result.add_info(f"No semantic validator available for {Path(file_path).suffix} files")
+        result.add_info(
+            f"No semantic validator available for {Path(file_path).suffix} files"
+        )
         return result
         return result
 
 
 
 

+ 429 - 409
cli/core/variable.py

@@ -13,413 +13,433 @@ EMAIL_REGEX = re.compile(r"^[^@\\s]+@[^@\\s]+\\.[^@\\s]+$")
 
 
 
 
 class Variable:
 class Variable:
-  """Represents a single templating variable with lightweight validation."""
-
-  def __init__(self, data: dict[str, Any]) -> None:
-    """Initialize Variable from a dictionary containing variable specification.
-    
-    Args:
-        data: Dictionary containing variable specification with required 'name' key
-              and optional keys: description, type, options, prompt, value, default, section, origin
-              
-    Raises:
-        ValueError: If data is not a dict, missing 'name' key, or has invalid default value
-    """
-    # Validate input
-    if not isinstance(data, dict):
-      raise ValueError("Variable data must be a dictionary")
-    
-    if "name" not in data:
-      raise ValueError("Variable data must contain 'name' key")
-    
-    # Track which fields were explicitly provided in source data
-    self._explicit_fields: Set[str] = set(data.keys())
-    
-    # Initialize fields
-    self.name: str = data["name"]
-    self.description: Optional[str] = data.get("description") or data.get("display", "")
-    self.type: str = data.get("type", "str")
-    self.options: Optional[List[Any]] = data.get("options", [])
-    self.prompt: Optional[str] = data.get("prompt")
-    if "value" in data:
-      self.value: Any = data.get("value")
-    elif "default" in data:
-      self.value: Any = data.get("default")
-    else:
-      self.value: Any = None
-    self.origin: Optional[str] = data.get("origin")
-    self.sensitive: bool = data.get("sensitive", False)
-    # Optional extra explanation used by interactive prompts
-    self.extra: Optional[str] = data.get("extra")
-    # Flag indicating this variable should be auto-generated when empty
-    self.autogenerated: bool = data.get("autogenerated", False)
-    # Flag indicating this variable is required even when section is disabled
-    self.required: bool = data.get("required", False)
-    # Flag indicating this variable can be empty/optional
-    self.optional: bool = data.get("optional", False)
-    # Original value before config override (used for display)
-    self.original_value: Optional[Any] = data.get("original_value")
-    # Variable dependencies - can be string or list of strings in format "var_name=value"
-    needs_value = data.get("needs")
-    if needs_value:
-      if isinstance(needs_value, str):
-        self.needs: List[str] = [needs_value]
-      elif isinstance(needs_value, list):
-        self.needs: List[str] = needs_value
-      else:
-        raise ValueError(f"Variable '{self.name}' has invalid 'needs' value: must be string or list")
-    else:
-      self.needs: List[str] = []
-
-    # Validate and convert the default/initial value if present
-    if self.value is not None:
-      try:
-        self.value = self.convert(self.value)
-      except ValueError as exc:
-        raise ValueError(f"Invalid default for variable '{self.name}': {exc}")
-
-
-  def convert(self, value: Any) -> Any:
-    """Validate and convert a raw value based on the variable type.
-    
-    This method performs type conversion but does NOT check if the value
-    is required. Use validate_and_convert() for full validation including
-    required field checks.
-    """
-    if value is None:
-      return None
-
-    # Treat empty strings as None to avoid storing "" for missing values.
-    if isinstance(value, str) and value.strip() == "":
-      return None
-
-    # Type conversion mapping for cleaner code
-    converters = {
-      "bool": self._convert_bool,
-      "int": self._convert_int, 
-      "float": self._convert_float,
-      "enum": self._convert_enum,
-      "url": self._convert_url,
-      "email": self._convert_email,
-    }
-    
-    converter = converters.get(self.type)
-    if converter:
-      return converter(value)
-    
-    # Default to string conversion
-    return str(value)
-  
-  def validate_and_convert(self, value: Any, check_required: bool = True) -> Any:
-    """Validate and convert a value with comprehensive checks.
-    
-    This method combines type conversion with validation logic including
-    required field checks. It's the recommended method for user input validation.
-    
-    Args:
-        value: The raw value to validate and convert
-        check_required: If True, raises ValueError for required fields with empty values
-        
-    Returns:
-        The converted and validated value
-        
-    Raises:
-        ValueError: If validation fails (invalid format, required field empty, etc.)
-        
-    Examples:
-        # Basic validation
-        var.validate_and_convert("example@email.com")  # Returns validated email
-        
-        # Required field validation
-        var.validate_and_convert("", check_required=True)  # Raises ValueError if required
-        
-        # Autogenerated variables - allow empty values
-        var.validate_and_convert("", check_required=False)  # Returns None for autogeneration
-    """
-    # First, convert the value using standard type conversion
-    converted = self.convert(value)
-    
-    # Special handling for autogenerated variables
-    # Allow empty values as they will be auto-generated later
-    if self.autogenerated and (converted is None or (isinstance(converted, str) and (converted == "" or converted == "*auto"))):
-      return None  # Signal that auto-generation should happen
-    
-    # Allow empty values for optional variables
-    if self.optional and (converted is None or (isinstance(converted, str) and converted == "")):
-      return None
-    
-    # Check if this is a required field and the value is empty
-    if check_required and self.is_required():
-      if converted is None or (isinstance(converted, str) and converted == ""):
-        raise ValueError("This field is required and cannot be empty")
-    
-    return converted
-
-  def _convert_bool(self, value: Any) -> bool:
-    """Convert value to boolean."""
-    if isinstance(value, bool):
-      return value
-    if isinstance(value, str):
-      lowered = value.strip().lower()
-      if lowered in TRUE_VALUES:
+    """Represents a single templating variable with lightweight validation."""
+
+    def __init__(self, data: dict[str, Any]) -> None:
+        """Initialize Variable from a dictionary containing variable specification.
+
+        Args:
+            data: Dictionary containing variable specification with required 'name' key
+                  and optional keys: description, type, options, prompt, value, default, section, origin
+
+        Raises:
+            ValueError: If data is not a dict, missing 'name' key, or has invalid default value
+        """
+        # Validate input
+        if not isinstance(data, dict):
+            raise ValueError("Variable data must be a dictionary")
+
+        if "name" not in data:
+            raise ValueError("Variable data must contain 'name' key")
+
+        # Track which fields were explicitly provided in source data
+        self._explicit_fields: Set[str] = set(data.keys())
+
+        # Initialize fields
+        self.name: str = data["name"]
+        self.description: Optional[str] = data.get("description") or data.get(
+            "display", ""
+        )
+        self.type: str = data.get("type", "str")
+        self.options: Optional[List[Any]] = data.get("options", [])
+        self.prompt: Optional[str] = data.get("prompt")
+        if "value" in data:
+            self.value: Any = data.get("value")
+        elif "default" in data:
+            self.value: Any = data.get("default")
+        else:
+            self.value: Any = None
+        self.origin: Optional[str] = data.get("origin")
+        self.sensitive: bool = data.get("sensitive", False)
+        # Optional extra explanation used by interactive prompts
+        self.extra: Optional[str] = data.get("extra")
+        # Flag indicating this variable should be auto-generated when empty
+        self.autogenerated: bool = data.get("autogenerated", False)
+        # Flag indicating this variable is required even when section is disabled
+        self.required: bool = data.get("required", False)
+        # Flag indicating this variable can be empty/optional
+        self.optional: bool = data.get("optional", False)
+        # Original value before config override (used for display)
+        self.original_value: Optional[Any] = data.get("original_value")
+        # Variable dependencies - can be string or list of strings in format "var_name=value"
+        needs_value = data.get("needs")
+        if needs_value:
+            if isinstance(needs_value, str):
+                self.needs: List[str] = [needs_value]
+            elif isinstance(needs_value, list):
+                self.needs: List[str] = needs_value
+            else:
+                raise ValueError(
+                    f"Variable '{self.name}' has invalid 'needs' value: must be string or list"
+                )
+        else:
+            self.needs: List[str] = []
+
+        # Validate and convert the default/initial value if present
+        if self.value is not None:
+            try:
+                self.value = self.convert(self.value)
+            except ValueError as exc:
+                raise ValueError(f"Invalid default for variable '{self.name}': {exc}")
+
+    def convert(self, value: Any) -> Any:
+        """Validate and convert a raw value based on the variable type.
+
+        This method performs type conversion but does NOT check if the value
+        is required. Use validate_and_convert() for full validation including
+        required field checks.
+        """
+        if value is None:
+            return None
+
+        # Treat empty strings as None to avoid storing "" for missing values.
+        if isinstance(value, str) and value.strip() == "":
+            return None
+
+        # Type conversion mapping for cleaner code
+        converters = {
+            "bool": self._convert_bool,
+            "int": self._convert_int,
+            "float": self._convert_float,
+            "enum": self._convert_enum,
+            "url": self._convert_url,
+            "email": self._convert_email,
+        }
+
+        converter = converters.get(self.type)
+        if converter:
+            return converter(value)
+
+        # Default to string conversion
+        return str(value)
+
+    def validate_and_convert(self, value: Any, check_required: bool = True) -> Any:
+        """Validate and convert a value with comprehensive checks.
+
+        This method combines type conversion with validation logic including
+        required field checks. It's the recommended method for user input validation.
+
+        Args:
+            value: The raw value to validate and convert
+            check_required: If True, raises ValueError for required fields with empty values
+
+        Returns:
+            The converted and validated value
+
+        Raises:
+            ValueError: If validation fails (invalid format, required field empty, etc.)
+
+        Examples:
+            # Basic validation
+            var.validate_and_convert("example@email.com")  # Returns validated email
+
+            # Required field validation
+            var.validate_and_convert("", check_required=True)  # Raises ValueError if required
+
+            # Autogenerated variables - allow empty values
+            var.validate_and_convert("", check_required=False)  # Returns None for autogeneration
+        """
+        # First, convert the value using standard type conversion
+        converted = self.convert(value)
+
+        # Special handling for autogenerated variables
+        # Allow empty values as they will be auto-generated later
+        if self.autogenerated and (
+            converted is None
+            or (
+                isinstance(converted, str) and (converted == "" or converted == "*auto")
+            )
+        ):
+            return None  # Signal that auto-generation should happen
+
+        # Allow empty values for optional variables
+        if self.optional and (
+            converted is None or (isinstance(converted, str) and converted == "")
+        ):
+            return None
+
+        # Check if this is a required field and the value is empty
+        if check_required and self.is_required():
+            if converted is None or (isinstance(converted, str) and converted == ""):
+                raise ValueError("This field is required and cannot be empty")
+
+        return converted
+
+    def _convert_bool(self, value: Any) -> bool:
+        """Convert value to boolean."""
+        if isinstance(value, bool):
+            return value
+        if isinstance(value, str):
+            lowered = value.strip().lower()
+            if lowered in TRUE_VALUES:
+                return True
+            if lowered in FALSE_VALUES:
+                return False
+        raise ValueError("value must be a boolean (true/false)")
+
+    def _convert_int(self, value: Any) -> Optional[int]:
+        """Convert value to integer."""
+        if isinstance(value, int):
+            return value
+        if isinstance(value, str) and value.strip() == "":
+            return None
+        try:
+            return int(value)
+        except (TypeError, ValueError) as exc:
+            raise ValueError("value must be an integer") from exc
+
+    def _convert_float(self, value: Any) -> Optional[float]:
+        """Convert value to float."""
+        if isinstance(value, float):
+            return value
+        if isinstance(value, str) and value.strip() == "":
+            return None
+        try:
+            return float(value)
+        except (TypeError, ValueError) as exc:
+            raise ValueError("value must be a float") from exc
+
+    def _convert_enum(self, value: Any) -> Optional[str]:
+        if value == "":
+            return None
+        val = str(value)
+        if self.options and val not in self.options:
+            raise ValueError(f"value must be one of: {', '.join(self.options)}")
+        return val
+
+    def _convert_url(self, value: Any) -> str:
+        val = str(value).strip()
+        if not val:
+            return None
+        parsed = urlparse(val)
+        if not (parsed.scheme and parsed.netloc):
+            raise ValueError("value must be a valid URL (include scheme and host)")
+        return val
+
+    def _convert_email(self, value: Any) -> str:
+        val = str(value).strip()
+        if not val:
+            return None
+        if not EMAIL_REGEX.fullmatch(val):
+            raise ValueError("value must be a valid email address")
+        return val
+
+    def to_dict(self) -> Dict[str, Any]:
+        """Serialize Variable to a dictionary for storage."""
+        result = {}
+
+        # Always include type
+        if self.type:
+            result["type"] = self.type
+
+        # Include value/default if not None
+        if self.value is not None:
+            result["default"] = self.value
+
+        # Include string fields if truthy
+        for field in ("description", "prompt", "extra", "origin"):
+            if value := getattr(self, field):
+                result[field] = value
+
+        # Include boolean/list fields if truthy (but empty list is OK for options)
+        if self.sensitive:
+            result["sensitive"] = True
+        if self.autogenerated:
+            result["autogenerated"] = True
+        if self.required:
+            result["required"] = True
+        if self.optional:
+            result["optional"] = True
+        if self.options is not None:  # Allow empty list
+            result["options"] = self.options
+
+        # Store dependencies (single value if only one, list otherwise)
+        if self.needs:
+            result["needs"] = self.needs[0] if len(self.needs) == 1 else self.needs
+
+        return result
+
+    def get_display_value(
+        self, mask_sensitive: bool = True, max_length: int = 30, show_none: bool = True
+    ) -> str:
+        """Get formatted display value with optional masking and truncation.
+
+        Args:
+            mask_sensitive: If True, mask sensitive values with asterisks
+            max_length: Maximum length before truncation (0 = no limit)
+            show_none: If True, display "(none)" for None values instead of empty string
+
+        Returns:
+            Formatted string representation of the value
+        """
+        if self.value is None or self.value == "":
+            # Show (*auto) for autogenerated variables instead of (none)
+            if self.autogenerated:
+                return "[dim](*auto)[/dim]" if show_none else ""
+            return "[dim](none)[/dim]" if show_none else ""
+
+        # Mask sensitive values
+        if self.sensitive and mask_sensitive:
+            return "********"
+
+        # Convert to string
+        display = str(self.value)
+
+        # Truncate if needed
+        if max_length > 0 and len(display) > max_length:
+            return display[: max_length - 3] + "..."
+
+        return display
+
+    def get_normalized_default(self) -> Any:
+        """Get normalized default value suitable for prompts and display."""
+        try:
+            typed = self.convert(self.value)
+        except Exception:
+            typed = self.value
+
+        # Autogenerated: return display hint
+        if self.autogenerated and not typed:
+            return "*auto"
+
+        # Type-specific handlers
+        if self.type == "enum":
+            if not self.options:
+                return typed
+            return (
+                self.options[0]
+                if typed is None or str(typed) not in self.options
+                else str(typed)
+            )
+
+        if self.type == "bool":
+            return (
+                typed
+                if isinstance(typed, bool)
+                else (None if typed is None else bool(typed))
+            )
+
+        if self.type == "int":
+            try:
+                return int(typed) if typed not in (None, "") else None
+            except Exception:
+                return None
+
+        # Default: return string or None
+        return None if typed is None else str(typed)
+
+    def get_prompt_text(self) -> str:
+        """Get formatted prompt text for interactive input.
+
+        Returns:
+            Prompt text with optional type hints and descriptions
+        """
+        prompt_text = self.prompt or self.description or self.name
+
+        # Add type hint for semantic types if there's a default
+        if self.value is not None and self.type in ["email", "url"]:
+            prompt_text += f" ({self.type})"
+
+        return prompt_text
+
+    def get_validation_hint(self) -> Optional[str]:
+        """Get validation hint for prompts (e.g., enum options).
+
+        Returns:
+            Formatted hint string or None if no hint needed
+        """
+        hints = []
+
+        # Add enum options
+        if self.type == "enum" and self.options:
+            hints.append(f"Options: {', '.join(self.options)}")
+
+        # Add extra help text
+        if self.extra:
+            hints.append(self.extra)
+
+        return " — ".join(hints) if hints else None
+
+    def is_required(self) -> bool:
+        """Check if this variable requires a value (cannot be empty/None).
+
+        A variable is considered required if:
+        - It has an explicit 'required: true' flag (highest precedence)
+        - OR it doesn't have a default value (value is None)
+          AND it's not marked as autogenerated (which can be empty and generated later)
+          AND it's not marked as optional (which can be empty)
+          AND it's not a boolean type (booleans default to False if not set)
+
+        Returns:
+            True if the variable must have a non-empty value, False otherwise
+        """
+        # Optional variables can always be empty
+        if self.optional:
+            return False
+
+        # Explicit required flag takes highest precedence
+        if self.required:
+            # But autogenerated variables can still be empty (will be generated later)
+            if self.autogenerated:
+                return False
+            return True
+
+        # Autogenerated variables can be empty (will be generated later)
+        if self.autogenerated:
+            return False
+
+        # Boolean variables always have a value (True or False)
+        if self.type == "bool":
+            return False
+
+        # Variables with a default value are not required
+        if self.value is not None:
+            return False
+
+        # No default value and not autogenerated = required
         return True
         return True
-      if lowered in FALSE_VALUES:
-        return False
-    raise ValueError("value must be a boolean (true/false)")
-
-  def _convert_int(self, value: Any) -> Optional[int]:
-    """Convert value to integer."""
-    if isinstance(value, int):
-      return value
-    if isinstance(value, str) and value.strip() == "":
-      return None
-    try:
-      return int(value)
-    except (TypeError, ValueError) as exc:
-      raise ValueError("value must be an integer") from exc
-
-  def _convert_float(self, value: Any) -> Optional[float]:
-    """Convert value to float."""
-    if isinstance(value, float):
-      return value
-    if isinstance(value, str) and value.strip() == "":
-      return None
-    try:
-      return float(value)
-    except (TypeError, ValueError) as exc:
-      raise ValueError("value must be a float") from exc
-
-  def _convert_enum(self, value: Any) -> Optional[str]:
-    if value == "":
-      return None
-    val = str(value)
-    if self.options and val not in self.options:
-      raise ValueError(f"value must be one of: {', '.join(self.options)}")
-    return val
-
-  def _convert_url(self, value: Any) -> str:
-    val = str(value).strip()
-    if not val:
-      return None
-    parsed = urlparse(val)
-    if not (parsed.scheme and parsed.netloc):
-      raise ValueError("value must be a valid URL (include scheme and host)")
-    return val
-
-  def _convert_email(self, value: Any) -> str:
-    val = str(value).strip()
-    if not val:
-      return None
-    if not EMAIL_REGEX.fullmatch(val):
-      raise ValueError("value must be a valid email address")
-    return val
-
-  def to_dict(self) -> Dict[str, Any]:
-    """Serialize Variable to a dictionary for storage."""
-    result = {}
-    
-    # Always include type
-    if self.type:
-      result['type'] = self.type
-    
-    # Include value/default if not None
-    if self.value is not None:
-      result['default'] = self.value
-    
-    # Include string fields if truthy
-    for field in ('description', 'prompt', 'extra', 'origin'):
-      if value := getattr(self, field):
-        result[field] = value
-    
-    # Include boolean/list fields if truthy (but empty list is OK for options)
-    if self.sensitive:
-      result['sensitive'] = True
-    if self.autogenerated:
-      result['autogenerated'] = True
-    if self.required:
-      result['required'] = True
-    if self.optional:
-      result['optional'] = True
-    if self.options is not None:  # Allow empty list
-      result['options'] = self.options
-    
-    # Store dependencies (single value if only one, list otherwise)
-    if self.needs:
-      result['needs'] = self.needs[0] if len(self.needs) == 1 else self.needs
-    
-    return result
-  
-  def get_display_value(self, mask_sensitive: bool = True, max_length: int = 30, show_none: bool = True) -> str:
-    """Get formatted display value with optional masking and truncation.
-    
-    Args:
-        mask_sensitive: If True, mask sensitive values with asterisks
-        max_length: Maximum length before truncation (0 = no limit)
-        show_none: If True, display "(none)" for None values instead of empty string
-        
-    Returns:
-        Formatted string representation of the value
-    """
-    if self.value is None or self.value == "":
-      # Show (*auto) for autogenerated variables instead of (none)
-      if self.autogenerated:
-        return "[dim](*auto)[/dim]" if show_none else ""
-      return "[dim](none)[/dim]" if show_none else ""
-    
-    # Mask sensitive values
-    if self.sensitive and mask_sensitive:
-      return "********"
-    
-    # Convert to string
-    display = str(self.value)
-    
-    # Truncate if needed
-    if max_length > 0 and len(display) > max_length:
-      return display[:max_length - 3] + "..."
-    
-    return display
-  
-  def get_normalized_default(self) -> Any:
-    """Get normalized default value suitable for prompts and display."""
-    try:
-      typed = self.convert(self.value)
-    except Exception:
-      typed = self.value
-    
-    # Autogenerated: return display hint
-    if self.autogenerated and not typed:
-      return "*auto"
-    
-    # Type-specific handlers
-    if self.type == "enum":
-      if not self.options:
-        return typed
-      return self.options[0] if typed is None or str(typed) not in self.options else str(typed)
-    
-    if self.type == "bool":
-      return typed if isinstance(typed, bool) else (None if typed is None else bool(typed))
-    
-    if self.type == "int":
-      try:
-        return int(typed) if typed not in (None, "") else None
-      except Exception:
-        return None
-    
-    # Default: return string or None
-    return None if typed is None else str(typed)
-  
-  def get_prompt_text(self) -> str:
-    """Get formatted prompt text for interactive input.
-    
-    Returns:
-        Prompt text with optional type hints and descriptions
-    """
-    prompt_text = self.prompt or self.description or self.name
-    
-    # Add type hint for semantic types if there's a default
-    if self.value is not None and self.type in ["email", "url"]:
-      prompt_text += f" ({self.type})"
-    
-    return prompt_text
-  
-  def get_validation_hint(self) -> Optional[str]:
-    """Get validation hint for prompts (e.g., enum options).
-    
-    Returns:
-        Formatted hint string or None if no hint needed
-    """
-    hints = []
-    
-    # Add enum options
-    if self.type == "enum" and self.options:
-      hints.append(f"Options: {', '.join(self.options)}")
-    
-    # Add extra help text
-    if self.extra:
-      hints.append(self.extra)
-    
-    return " — ".join(hints) if hints else None
-  
-  def is_required(self) -> bool:
-    """Check if this variable requires a value (cannot be empty/None).
-    
-    A variable is considered required if:
-    - It has an explicit 'required: true' flag (highest precedence)
-    - OR it doesn't have a default value (value is None)
-      AND it's not marked as autogenerated (which can be empty and generated later)
-      AND it's not marked as optional (which can be empty)
-      AND it's not a boolean type (booleans default to False if not set)
-    
-    Returns:
-        True if the variable must have a non-empty value, False otherwise
-    """
-    # Optional variables can always be empty
-    if self.optional:
-      return False
-    
-    # Explicit required flag takes highest precedence
-    if self.required:
-      # But autogenerated variables can still be empty (will be generated later)
-      if self.autogenerated:
-        return False
-      return True
-    
-    # Autogenerated variables can be empty (will be generated later)
-    if self.autogenerated:
-      return False
-    
-    # Boolean variables always have a value (True or False)
-    if self.type == "bool":
-      return False
-    
-    # Variables with a default value are not required
-    if self.value is not None:
-      return False
-    
-    # No default value and not autogenerated = required
-    return True
-  
-  def clone(self, update: Optional[Dict[str, Any]] = None) -> 'Variable':
-    """Create a deep copy of the variable with optional field updates.
-    
-    This is more efficient than converting to dict and back when copying variables.
-    
-    Args:
-        update: Optional dictionary of field updates to apply to the clone
-        
-    Returns:
-        New Variable instance with copied data
-        
-    Example:
-        var2 = var1.clone(update={'origin': 'template'})
-    """
-    data = {
-      'name': self.name,
-      'type': self.type,
-      'value': self.value,
-      'description': self.description,
-      'prompt': self.prompt,
-      'options': self.options.copy() if self.options else None,
-      'origin': self.origin,
-      'sensitive': self.sensitive,
-      'extra': self.extra,
-      'autogenerated': self.autogenerated,
-      'required': self.required,
-      'optional': self.optional,
-      'original_value': self.original_value,
-      'needs': self.needs.copy() if self.needs else None,
-    }
-    
-    # Apply updates if provided
-    if update:
-      data.update(update)
-    
-    # Create new variable
-    cloned = Variable(data)
-    
-    # Preserve explicit fields from original, and add any update keys
-    cloned._explicit_fields = self._explicit_fields.copy()
-    if update:
-      cloned._explicit_fields.update(update.keys())
-    
-    return cloned
+
+    def clone(self, update: Optional[Dict[str, Any]] = None) -> "Variable":
+        """Create a deep copy of the variable with optional field updates.
+
+        This is more efficient than converting to dict and back when copying variables.
+
+        Args:
+            update: Optional dictionary of field updates to apply to the clone
+
+        Returns:
+            New Variable instance with copied data
+
+        Example:
+            var2 = var1.clone(update={'origin': 'template'})
+        """
+        data = {
+            "name": self.name,
+            "type": self.type,
+            "value": self.value,
+            "description": self.description,
+            "prompt": self.prompt,
+            "options": self.options.copy() if self.options else None,
+            "origin": self.origin,
+            "sensitive": self.sensitive,
+            "extra": self.extra,
+            "autogenerated": self.autogenerated,
+            "required": self.required,
+            "optional": self.optional,
+            "original_value": self.original_value,
+            "needs": self.needs.copy() if self.needs else None,
+        }
+
+        # Apply updates if provided
+        if update:
+            data.update(update)
+
+        # Create new variable
+        cloned = Variable(data)
+
+        # Preserve explicit fields from original, and add any update keys
+        cloned._explicit_fields = self._explicit_fields.copy()
+        if update:
+            cloned._explicit_fields.update(update.keys())
+
+        return cloned

+ 88 - 88
cli/core/version.py

@@ -14,97 +14,97 @@ logger = logging.getLogger(__name__)
 
 
 
 
 def parse_version(version_str: str) -> Tuple[int, int]:
 def parse_version(version_str: str) -> Tuple[int, int]:
-  """Parse a semantic version string into a tuple of integers.
-  
-  Args:
-      version_str: Version string in format "major.minor" (e.g., "1.0", "1.2")
-      
-  Returns:
-      Tuple of (major, minor) as integers
-      
-  Raises:
-      ValueError: If version string is not in valid semantic version format
-      
-  Examples:
-      >>> parse_version("1.0")
-      (1, 0)
-      >>> parse_version("1.2")
-      (1, 2)
-  """
-  if not version_str:
-    raise ValueError("Version string cannot be empty")
-
-  # Remove 'v' prefix if present
-  version_str = version_str.lstrip('v')
-
-  # Match semantic version pattern: major.minor
-  pattern = r'^(\d+)\.(\d+)$'
-  match = re.match(pattern, version_str)
-
-  if not match:
-    raise ValueError(
-      f"Invalid version format '{version_str}'. "
-      "Expected format: major.minor (e.g., '1.0', '1.2')"
-    )
-
-  major, minor = match.groups()
-  return (int(major), int(minor))
+    """Parse a semantic version string into a tuple of integers.
+
+    Args:
+        version_str: Version string in format "major.minor" (e.g., "1.0", "1.2")
+
+    Returns:
+        Tuple of (major, minor) as integers
+
+    Raises:
+        ValueError: If version string is not in valid semantic version format
+
+    Examples:
+        >>> parse_version("1.0")
+        (1, 0)
+        >>> parse_version("1.2")
+        (1, 2)
+    """
+    if not version_str:
+        raise ValueError("Version string cannot be empty")
+
+    # Remove 'v' prefix if present
+    version_str = version_str.lstrip("v")
+
+    # Match semantic version pattern: major.minor
+    pattern = r"^(\d+)\.(\d+)$"
+    match = re.match(pattern, version_str)
+
+    if not match:
+        raise ValueError(
+            f"Invalid version format '{version_str}'. "
+            "Expected format: major.minor (e.g., '1.0', '1.2')"
+        )
+
+    major, minor = match.groups()
+    return (int(major), int(minor))
 
 
 
 
 def compare_versions(version1: str, version2: str) -> int:
 def compare_versions(version1: str, version2: str) -> int:
-  """Compare two semantic version strings.
-  
-  Args:
-      version1: First version string
-      version2: Second version string
-      
-  Returns:
-      -1 if version1 < version2
-       0 if version1 == version2
-       1 if version1 > version2
-       
-  Raises:
-      ValueError: If either version string is invalid
-      
-  Examples:
-      >>> compare_versions("1.0", "0.9")
-      1
-      >>> compare_versions("1.0", "1.0")
-      0
-      >>> compare_versions("1.0", "1.1")
-      -1
-  """
-  v1 = parse_version(version1)
-  v2 = parse_version(version2)
-
-  if v1 < v2:
-    return -1
-  if v1 > v2:
-    return 1
-  return 0
+    """Compare two semantic version strings.
+
+    Args:
+        version1: First version string
+        version2: Second version string
+
+    Returns:
+        -1 if version1 < version2
+         0 if version1 == version2
+         1 if version1 > version2
+
+    Raises:
+        ValueError: If either version string is invalid
+
+    Examples:
+        >>> compare_versions("1.0", "0.9")
+        1
+        >>> compare_versions("1.0", "1.0")
+        0
+        >>> compare_versions("1.0", "1.1")
+        -1
+    """
+    v1 = parse_version(version1)
+    v2 = parse_version(version2)
+
+    if v1 < v2:
+        return -1
+    if v1 > v2:
+        return 1
+    return 0
 
 
 
 
 def is_compatible(current_version: str, required_version: str) -> bool:
 def is_compatible(current_version: str, required_version: str) -> bool:
-  """Check if current version meets the minimum required version.
-  
-  Args:
-      current_version: Current version
-      required_version: Minimum required version
-      
-  Returns:
-      True if current_version >= required_version, False otherwise
-      
-  Examples:
-      >>> is_compatible("1.0", "0.9")
-      True
-      >>> is_compatible("1.0", "1.0")
-      True
-      >>> is_compatible("1.0", "1.1")
-      False
-  """
-  try:
-    return compare_versions(current_version, required_version) >= 0
-  except ValueError as e:
-    logger.warning("Version compatibility check failed: %s", e)
-    # If we can't parse versions, assume incompatible for safety
-    return False
+    """Check if current version meets the minimum required version.
+
+    Args:
+        current_version: Current version
+        required_version: Minimum required version
+
+    Returns:
+        True if current_version >= required_version, False otherwise
+
+    Examples:
+        >>> is_compatible("1.0", "0.9")
+        True
+        >>> is_compatible("1.0", "1.0")
+        True
+        >>> is_compatible("1.0", "1.1")
+        False
+    """
+    try:
+        return compare_versions(current_version, required_version) >= 0
+    except ValueError as e:
+        logger.warning("Version compatibility check failed: %s", e)
+        # If we can't parse versions, assume incompatible for safety
+        return False

+ 7 - 7
cli/modules/compose/__init__.py

@@ -9,8 +9,8 @@ from .spec_v1_1 import spec as spec_1_1
 
 
 # Schema version mapping
 # Schema version mapping
 SCHEMAS = {
 SCHEMAS = {
-  "1.0": spec_1_0,
-  "1.1": spec_1_1,
+    "1.0": spec_1_0,
+    "1.1": spec_1_1,
 }
 }
 
 
 # Default spec points to latest version
 # Default spec points to latest version
@@ -18,12 +18,12 @@ spec = spec_1_1
 
 
 
 
 class ComposeModule(Module):
 class ComposeModule(Module):
-  """Docker Compose module."""
+    """Docker Compose module."""
 
 
-  name = "compose"
-  description = "Manage Docker Compose configurations"
-  schema_version = "1.1"  # Current schema version supported by this module
-  schemas = SCHEMAS  # Available schema versions
+    name = "compose"
+    description = "Manage Docker Compose configurations"
+    schema_version = "1.1"  # Current schema version supported by this module
+    schemas = SCHEMAS  # Available schema versions
 
 
 
 
 registry.register(ComposeModule)
 registry.register(ComposeModule)

+ 263 - 267
cli/modules/compose/spec_v1_0.py

@@ -1,282 +1,278 @@
 """Compose module schema version 1.0 - Original specification."""
 """Compose module schema version 1.0 - Original specification."""
+
 from collections import OrderedDict
 from collections import OrderedDict
 
 
 spec = OrderedDict(
 spec = OrderedDict(
     {
     {
-      "general": {
-        "title": "General",
-        "vars": {
-          "service_name": {
-            "description": "Service name",
-            "type": "str",
-          },
-          "container_name": {
-            "description": "Container name",
-            "type": "str",
-          },
-          "container_hostname": {
-            "description": "Container internal hostname",
-            "type": "str",
-          },
-          "container_timezone": {
-            "description": "Container timezone (e.g., Europe/Berlin)",
-            "type": "str",
-            "default": "UTC",
-          },
-          "user_uid": {
-            "description": "User UID for container process",
-            "type": "int",
-            "default": 1000,
-          },
-          "user_gid": {
-            "description": "User GID for container process",
-            "type": "int",
-            "default": 1000,
-          },
-          "container_loglevel": {
-            "description": "Container log level",
-            "type": "enum",
-            "options": ["debug", "info", "warn", "error"],
-            "default": "info",
-          },
-          "restart_policy": {
-            "description": "Container restart policy",
-            "type": "enum",
-            "options": ["unless-stopped", "always", "on-failure", "no"],
-            "default": "unless-stopped",
-          },
+        "general": {
+            "title": "General",
+            "vars": {
+                "service_name": {
+                    "description": "Service name",
+                    "type": "str",
+                },
+                "container_name": {
+                    "description": "Container name",
+                    "type": "str",
+                },
+                "container_hostname": {
+                    "description": "Container internal hostname",
+                    "type": "str",
+                },
+                "container_timezone": {
+                    "description": "Container timezone (e.g., Europe/Berlin)",
+                    "type": "str",
+                    "default": "UTC",
+                },
+                "user_uid": {
+                    "description": "User UID for container process",
+                    "type": "int",
+                    "default": 1000,
+                },
+                "user_gid": {
+                    "description": "User GID for container process",
+                    "type": "int",
+                    "default": 1000,
+                },
+                "container_loglevel": {
+                    "description": "Container log level",
+                    "type": "enum",
+                    "options": ["debug", "info", "warn", "error"],
+                    "default": "info",
+                },
+                "restart_policy": {
+                    "description": "Container restart policy",
+                    "type": "enum",
+                    "options": ["unless-stopped", "always", "on-failure", "no"],
+                    "default": "unless-stopped",
+                },
+            },
         },
         },
-      },
-      "network": {
-        "title": "Network",
-        "toggle": "network_enabled",
-        "vars": {
-          "network_enabled": {
-            "description": "Enable custom network block",
-            "type": "bool",
-            "default": False,
-          },
-          "network_name": {
-            "description": "Docker network name",
-            "type": "str",
-            "default": "bridge",
-          },
-          "network_external": {
-            "description": "Use existing Docker network",
-            "type": "bool",
-            "default": True,
-          },
+        "network": {
+            "title": "Network",
+            "toggle": "network_enabled",
+            "vars": {
+                "network_enabled": {
+                    "description": "Enable custom network block",
+                    "type": "bool",
+                    "default": False,
+                },
+                "network_name": {
+                    "description": "Docker network name",
+                    "type": "str",
+                    "default": "bridge",
+                },
+                "network_external": {
+                    "description": "Use existing Docker network",
+                    "type": "bool",
+                    "default": True,
+                },
+            },
         },
         },
-      },
-      "ports": {
-        "title": "Ports",
-        "toggle": "ports_enabled",
-        "vars": {
-          "ports_enabled": {
-            "description": "Expose ports via 'ports' mapping",
-            "type": "bool",
-            "default": True,
-          }
+        "ports": {
+            "title": "Ports",
+            "toggle": "ports_enabled",
+            "vars": {
+                "ports_enabled": {
+                    "description": "Expose ports via 'ports' mapping",
+                    "type": "bool",
+                    "default": True,
+                }
+            },
         },
         },
-      },
-      "traefik": {
-        "title": "Traefik",
-        "toggle": "traefik_enabled",
-        "description": "Traefik routes external traffic to your service.",
-        "vars": {
-          "traefik_enabled": {
-            "description": "Enable Traefik reverse proxy integration",
-            "type": "bool",
-            "default": False,
-          },
-          "traefik_network": {
-            "description": "Traefik network name",
-            "type": "str",
-            "default": "traefik",
-          },
-          "traefik_host": {
-            "description": "Domain name for your service (e.g., app.example.com)",
-            "type": "str",
-          },
-          "traefik_entrypoint": {
-            "description": "HTTP entrypoint (non-TLS)",
-            "type": "str",
-            "default": "web",
-          },
+        "traefik": {
+            "title": "Traefik",
+            "toggle": "traefik_enabled",
+            "description": "Traefik routes external traffic to your service.",
+            "vars": {
+                "traefik_enabled": {
+                    "description": "Enable Traefik reverse proxy integration",
+                    "type": "bool",
+                    "default": False,
+                },
+                "traefik_network": {
+                    "description": "Traefik network name",
+                    "type": "str",
+                    "default": "traefik",
+                },
+                "traefik_host": {
+                    "description": "Domain name for your service (e.g., app.example.com)",
+                    "type": "str",
+                },
+                "traefik_entrypoint": {
+                    "description": "HTTP entrypoint (non-TLS)",
+                    "type": "str",
+                    "default": "web",
+                },
+            },
         },
         },
-      },
-      "traefik_tls": {
-        "title": "Traefik TLS/SSL",
-        "toggle": "traefik_tls_enabled",
-        "needs": "traefik",
-        "description": "Enable HTTPS/TLS for Traefik with certificate management.",
-        "vars": {
-          "traefik_tls_enabled": {
-            "description": "Enable HTTPS/TLS",
-            "type": "bool",
-            "default": True,
-          },
-          "traefik_tls_entrypoint": {
-            "description": "TLS entrypoint",
-            "type": "str",
-            "default": "websecure",
-          },
-          "traefik_tls_certresolver": {
-            "description": "Traefik certificate resolver name",
-            "type": "str",
-            "default": "cloudflare",
-          },
+        "traefik_tls": {
+            "title": "Traefik TLS/SSL",
+            "toggle": "traefik_tls_enabled",
+            "needs": "traefik",
+            "description": "Enable HTTPS/TLS for Traefik with certificate management.",
+            "vars": {
+                "traefik_tls_enabled": {
+                    "description": "Enable HTTPS/TLS",
+                    "type": "bool",
+                    "default": True,
+                },
+                "traefik_tls_entrypoint": {
+                    "description": "TLS entrypoint",
+                    "type": "str",
+                    "default": "websecure",
+                },
+                "traefik_tls_certresolver": {
+                    "description": "Traefik certificate resolver name",
+                    "type": "str",
+                    "default": "cloudflare",
+                },
+            },
         },
         },
-      },
-      "swarm": {
-        "title": "Docker Swarm",
-        "toggle": "swarm_enabled",
-        "description": "Deploy service in Docker Swarm mode with replicas.",
-        "vars": {
-          "swarm_enabled": {
-            "description": "Enable Docker Swarm mode",
-            "type": "bool",
-            "default": False,
-          },
-          "swarm_replicas": {
-            "description": "Number of replicas in Swarm",
-            "type": "int",
-            "default": 1,
-          },
-          "swarm_placement_mode": {
-            "description": "Swarm placement mode",
-            "type": "enum",
-            "options": ["global", "replicated"],
-            "default": "replicated"
-          },
-          "swarm_placement_host": {
-            "description": "Limit placement to specific node",
-            "type": "str",
-          }
+        "swarm": {
+            "title": "Docker Swarm",
+            "toggle": "swarm_enabled",
+            "description": "Deploy service in Docker Swarm mode with replicas.",
+            "vars": {
+                "swarm_enabled": {
+                    "description": "Enable Docker Swarm mode",
+                    "type": "bool",
+                    "default": False,
+                },
+                "swarm_replicas": {
+                    "description": "Number of replicas in Swarm",
+                    "type": "int",
+                    "default": 1,
+                },
+                "swarm_placement_mode": {
+                    "description": "Swarm placement mode",
+                    "type": "enum",
+                    "options": ["global", "replicated"],
+                    "default": "replicated",
+                },
+                "swarm_placement_host": {
+                    "description": "Limit placement to specific node",
+                    "type": "str",
+                },
+            },
         },
         },
-      },
-      "database": {
-        "title": "Database",
-        "toggle": "database_enabled",
-        "description": "Connect to external database (PostgreSQL or MySQL)",
-        "vars": {
-          "database_enabled": {
-            "description": "Enable external database integration",
-            "type": "bool",
-            "default": False,
-          },
-          "database_type": {
-            "description": "Database type",
-            "type": "enum",
-            "options": ["postgres", "mysql"],
-            "default": "postgres",
-          },
-          "database_external": {
-            "description": "Use an external database server?",
-            "extra": "If 'no', a database container will be created in the compose project.",
-            "type": "bool",
-            "default": False,
-          },
-          "database_host": {
-            "description": "Database host",
-            "type": "str",
-            "default": "database",
-          },
-          "database_port": {
-            "description": "Database port",
-            "type": "int"
-          },
-          "database_name": {
-            "description": "Database name",
-            "type": "str",
-          },
-          "database_user": {
-            "description": "Database user",
-            "type": "str",
-          },
-          "database_password": {
-            "description": "Database password",
-            "type": "str",
-            "default": "",
-            "sensitive": True,
-            "autogenerated": True,
-          },
+        "database": {
+            "title": "Database",
+            "toggle": "database_enabled",
+            "description": "Connect to external database (PostgreSQL or MySQL)",
+            "vars": {
+                "database_enabled": {
+                    "description": "Enable external database integration",
+                    "type": "bool",
+                    "default": False,
+                },
+                "database_type": {
+                    "description": "Database type",
+                    "type": "enum",
+                    "options": ["postgres", "mysql"],
+                    "default": "postgres",
+                },
+                "database_external": {
+                    "description": "Use an external database server?",
+                    "extra": "If 'no', a database container will be created in the compose project.",
+                    "type": "bool",
+                    "default": False,
+                },
+                "database_host": {
+                    "description": "Database host",
+                    "type": "str",
+                    "default": "database",
+                },
+                "database_port": {"description": "Database port", "type": "int"},
+                "database_name": {
+                    "description": "Database name",
+                    "type": "str",
+                },
+                "database_user": {
+                    "description": "Database user",
+                    "type": "str",
+                },
+                "database_password": {
+                    "description": "Database password",
+                    "type": "str",
+                    "default": "",
+                    "sensitive": True,
+                    "autogenerated": True,
+                },
+            },
         },
         },
-      },
-      "email": {
-        "title": "Email Server",
-        "toggle": "email_enabled",
-        "description": "Configure email server for notifications and user management.",
-        "vars": {
-          "email_enabled": {
-            "description": "Enable email server configuration",
-            "type": "bool",
-            "default": False,
-          },
-          "email_host": {
-            "description": "SMTP server hostname",
-            "type": "str",
-          },
-          "email_port": {
-            "description": "SMTP server port",
-            "type": "int",
-            "default": 587,
-          },
-          "email_username": {
-            "description": "SMTP username",
-            "type": "str",
-          },
-          "email_password": {
-            "description": "SMTP password",
-            "type": "str",
-            "sensitive": True,
-          },
-          "email_from": {
-            "description": "From email address",
-            "type": "str",
-          },
-          "email_use_tls": {
-            "description": "Use TLS encryption",
-            "type": "bool",
-            "default": True,
-          },
-          "email_use_ssl": {
-            "description": "Use SSL encryption",
-            "type": "bool",
-            "default": False,
-          }
+        "email": {
+            "title": "Email Server",
+            "toggle": "email_enabled",
+            "description": "Configure email server for notifications and user management.",
+            "vars": {
+                "email_enabled": {
+                    "description": "Enable email server configuration",
+                    "type": "bool",
+                    "default": False,
+                },
+                "email_host": {
+                    "description": "SMTP server hostname",
+                    "type": "str",
+                },
+                "email_port": {
+                    "description": "SMTP server port",
+                    "type": "int",
+                    "default": 587,
+                },
+                "email_username": {
+                    "description": "SMTP username",
+                    "type": "str",
+                },
+                "email_password": {
+                    "description": "SMTP password",
+                    "type": "str",
+                    "sensitive": True,
+                },
+                "email_from": {
+                    "description": "From email address",
+                    "type": "str",
+                },
+                "email_use_tls": {
+                    "description": "Use TLS encryption",
+                    "type": "bool",
+                    "default": True,
+                },
+                "email_use_ssl": {
+                    "description": "Use SSL encryption",
+                    "type": "bool",
+                    "default": False,
+                },
+            },
         },
         },
-      },
-      "authentik": {
-        "title": "Authentik SSO",
-        "toggle": "authentik_enabled",
-        "description": "Integrate with Authentik for Single Sign-On authentication.",
-        "vars": {
-          "authentik_enabled": {
-            "description": "Enable Authentik SSO integration",
-            "type": "bool",
-            "default": False,
-          },
-          "authentik_url": {
-            "description": "Authentik base URL (e.g., https://auth.example.com)",
-            "type": "str",
-          },
-          "authentik_slug": {
-            "description": "Authentik application slug",
-            "type": "str",
-          },
-          "authentik_client_id": {
-            "description": "OAuth client ID from Authentik provider",
-            "type": "str",
-          },
-          "authentik_client_secret": {
-            "description": "OAuth client secret from Authentik provider",
-            "type": "str",
-            "sensitive": True,
-          },
+        "authentik": {
+            "title": "Authentik SSO",
+            "toggle": "authentik_enabled",
+            "description": "Integrate with Authentik for Single Sign-On authentication.",
+            "vars": {
+                "authentik_enabled": {
+                    "description": "Enable Authentik SSO integration",
+                    "type": "bool",
+                    "default": False,
+                },
+                "authentik_url": {
+                    "description": "Authentik base URL (e.g., https://auth.example.com)",
+                    "type": "str",
+                },
+                "authentik_slug": {
+                    "description": "Authentik application slug",
+                    "type": "str",
+                },
+                "authentik_client_id": {
+                    "description": "OAuth client ID from Authentik provider",
+                    "type": "str",
+                },
+                "authentik_client_secret": {
+                    "description": "OAuth client secret from Authentik provider",
+                    "type": "str",
+                    "sensitive": True,
+                },
+            },
         },
         },
-      },
     }
     }
-  )
-
-
+)

+ 337 - 341
cli/modules/compose/spec_v1_1.py

@@ -5,358 +5,354 @@ Changes from 1.0:
 - swarm: Added volume modes (local/mount/nfs) and conditional placement constraints
 - swarm: Added volume modes (local/mount/nfs) and conditional placement constraints
 - traefik_tls: Updated needs format from 'traefik' to 'traefik_enabled=true'
 - traefik_tls: Updated needs format from 'traefik' to 'traefik_enabled=true'
 """
 """
+
 from collections import OrderedDict
 from collections import OrderedDict
 
 
 spec = OrderedDict(
 spec = OrderedDict(
     {
     {
-      "general": {
-        "title": "General",
-        "vars": {
-          "service_name": {
-            "description": "Service name",
-            "type": "str",
-          },
-          "container_name": {
-            "description": "Container name",
-            "type": "str",
-          },
-          "container_hostname": {
-            "description": "Container internal hostname",
-            "type": "str",
-          },
-          "container_timezone": {
-            "description": "Container timezone (e.g., Europe/Berlin)",
-            "type": "str",
-            "default": "UTC",
-          },
-          "user_uid": {
-            "description": "User UID for container process",
-            "type": "int",
-            "default": 1000,
-          },
-          "user_gid": {
-            "description": "User GID for container process",
-            "type": "int",
-            "default": 1000,
-          },
-          "container_loglevel": {
-            "description": "Container log level",
-            "type": "enum",
-            "options": ["debug", "info", "warn", "error"],
-            "default": "info",
-          },
-          "restart_policy": {
-            "description": "Container restart policy",
-            "type": "enum",
-            "options": ["unless-stopped", "always", "on-failure", "no"],
-            "default": "unless-stopped",
-          },
+        "general": {
+            "title": "General",
+            "vars": {
+                "service_name": {
+                    "description": "Service name",
+                    "type": "str",
+                },
+                "container_name": {
+                    "description": "Container name",
+                    "type": "str",
+                },
+                "container_hostname": {
+                    "description": "Container internal hostname",
+                    "type": "str",
+                },
+                "container_timezone": {
+                    "description": "Container timezone (e.g., Europe/Berlin)",
+                    "type": "str",
+                    "default": "UTC",
+                },
+                "user_uid": {
+                    "description": "User UID for container process",
+                    "type": "int",
+                    "default": 1000,
+                },
+                "user_gid": {
+                    "description": "User GID for container process",
+                    "type": "int",
+                    "default": 1000,
+                },
+                "container_loglevel": {
+                    "description": "Container log level",
+                    "type": "enum",
+                    "options": ["debug", "info", "warn", "error"],
+                    "default": "info",
+                },
+                "restart_policy": {
+                    "description": "Container restart policy",
+                    "type": "enum",
+                    "options": ["unless-stopped", "always", "on-failure", "no"],
+                    "default": "unless-stopped",
+                },
+            },
         },
         },
-      },
-      "network": {
-        "title": "Network",
-        "toggle": "network_enabled",
-        "vars": {
-          "network_enabled": {
-            "description": "Enable custom network block",
-            "type": "bool",
-            "default": False,
-          },
-          "network_mode": {
-            "description": "Docker network mode",
-            "type": "enum",
-            "options": ["bridge", "host", "macvlan"],
-            "default": "bridge",
-            "extra": "bridge=default Docker networking, host=use host network stack, macvlan=dedicated MAC address on physical network",
-          },
-          "network_name": {
-            "description": "Docker network name",
-            "type": "str",
-            "default": "bridge",
-            "needs": "network_mode=bridge,macvlan",
-          },
-          "network_external": {
-            "description": "Use existing Docker network",
-            "type": "bool",
-            "default": True,
-            "needs": "network_mode=bridge,macvlan",
-          },
-          "network_macvlan_ipv4_address": {
-            "description": "Static IP address for container",
-            "type": "str",
-            "default": "192.168.1.253",
-            "needs": "network_mode=macvlan",
-          },
-          "network_macvlan_parent_interface": {
-            "description": "Host network interface name",
-            "type": "str",
-            "default": "eth0",
-            "needs": "network_mode=macvlan",
-          },
-          "network_macvlan_subnet": {
-            "description": "Network subnet in CIDR notation",
-            "type": "str",
-            "default": "192.168.1.0/24",
-            "needs": "network_mode=macvlan",
-          },
-          "network_macvlan_gateway": {
-            "description": "Network gateway IP address",
-            "type": "str",
-            "default": "192.168.1.1",
-            "needs": "network_mode=macvlan",
-          },
+        "network": {
+            "title": "Network",
+            "toggle": "network_enabled",
+            "vars": {
+                "network_enabled": {
+                    "description": "Enable custom network block",
+                    "type": "bool",
+                    "default": False,
+                },
+                "network_mode": {
+                    "description": "Docker network mode",
+                    "type": "enum",
+                    "options": ["bridge", "host", "macvlan"],
+                    "default": "bridge",
+                    "extra": "bridge=default Docker networking, host=use host network stack, macvlan=dedicated MAC address on physical network",
+                },
+                "network_name": {
+                    "description": "Docker network name",
+                    "type": "str",
+                    "default": "bridge",
+                    "needs": "network_mode=bridge,macvlan",
+                },
+                "network_external": {
+                    "description": "Use existing Docker network",
+                    "type": "bool",
+                    "default": True,
+                    "needs": "network_mode=bridge,macvlan",
+                },
+                "network_macvlan_ipv4_address": {
+                    "description": "Static IP address for container",
+                    "type": "str",
+                    "default": "192.168.1.253",
+                    "needs": "network_mode=macvlan",
+                },
+                "network_macvlan_parent_interface": {
+                    "description": "Host network interface name",
+                    "type": "str",
+                    "default": "eth0",
+                    "needs": "network_mode=macvlan",
+                },
+                "network_macvlan_subnet": {
+                    "description": "Network subnet in CIDR notation",
+                    "type": "str",
+                    "default": "192.168.1.0/24",
+                    "needs": "network_mode=macvlan",
+                },
+                "network_macvlan_gateway": {
+                    "description": "Network gateway IP address",
+                    "type": "str",
+                    "default": "192.168.1.1",
+                    "needs": "network_mode=macvlan",
+                },
+            },
         },
         },
-      },
-      "ports": {
-        "title": "Ports",
-        "toggle": "ports_enabled",
-        "vars": {
-          "ports_enabled": {
-            "description": "Expose ports via 'ports' mapping",
-            "type": "bool",
-            "default": True,
-          }
+        "ports": {
+            "title": "Ports",
+            "toggle": "ports_enabled",
+            "vars": {
+                "ports_enabled": {
+                    "description": "Expose ports via 'ports' mapping",
+                    "type": "bool",
+                    "default": True,
+                }
+            },
         },
         },
-      },
-      "traefik": {
-        "title": "Traefik",
-        "toggle": "traefik_enabled",
-        "description": "Traefik routes external traffic to your service.",
-        "vars": {
-          "traefik_enabled": {
-            "description": "Enable Traefik reverse proxy integration",
-            "type": "bool",
-            "default": False,
-          },
-          "traefik_network": {
-            "description": "Traefik network name",
-            "type": "str",
-            "default": "traefik",
-          },
-          "traefik_host": {
-            "description": "Domain name for your service (e.g., app.example.com)",
-            "type": "str",
-          },
-          "traefik_entrypoint": {
-            "description": "HTTP entrypoint (non-TLS)",
-            "type": "str",
-            "default": "web",
-          },
+        "traefik": {
+            "title": "Traefik",
+            "toggle": "traefik_enabled",
+            "description": "Traefik routes external traffic to your service.",
+            "vars": {
+                "traefik_enabled": {
+                    "description": "Enable Traefik reverse proxy integration",
+                    "type": "bool",
+                    "default": False,
+                },
+                "traefik_network": {
+                    "description": "Traefik network name",
+                    "type": "str",
+                    "default": "traefik",
+                },
+                "traefik_host": {
+                    "description": "Domain name for your service (e.g., app.example.com)",
+                    "type": "str",
+                },
+                "traefik_entrypoint": {
+                    "description": "HTTP entrypoint (non-TLS)",
+                    "type": "str",
+                    "default": "web",
+                },
+            },
         },
         },
-      },
-      "traefik_tls": {
-        "title": "Traefik TLS/SSL",
-        "toggle": "traefik_tls_enabled",
-        "needs": "traefik_enabled=true",
-        "description": "Enable HTTPS/TLS for Traefik with certificate management.",
-        "vars": {
-          "traefik_tls_enabled": {
-            "description": "Enable HTTPS/TLS",
-            "type": "bool",
-            "default": True,
-          },
-          "traefik_tls_entrypoint": {
-            "description": "TLS entrypoint",
-            "type": "str",
-            "default": "websecure",
-          },
-          "traefik_tls_certresolver": {
-            "description": "Traefik certificate resolver name",
-            "type": "str",
-            "default": "cloudflare",
-          },
+        "traefik_tls": {
+            "title": "Traefik TLS/SSL",
+            "toggle": "traefik_tls_enabled",
+            "needs": "traefik_enabled=true",
+            "description": "Enable HTTPS/TLS for Traefik with certificate management.",
+            "vars": {
+                "traefik_tls_enabled": {
+                    "description": "Enable HTTPS/TLS",
+                    "type": "bool",
+                    "default": True,
+                },
+                "traefik_tls_entrypoint": {
+                    "description": "TLS entrypoint",
+                    "type": "str",
+                    "default": "websecure",
+                },
+                "traefik_tls_certresolver": {
+                    "description": "Traefik certificate resolver name",
+                    "type": "str",
+                    "default": "cloudflare",
+                },
+            },
         },
         },
-      },
-      "swarm": {
-        "title": "Docker Swarm",
-        "toggle": "swarm_enabled",
-        "description": "Deploy service in Docker Swarm mode.",
-        "vars": {
-          "swarm_enabled": {
-            "description": "Enable Docker Swarm mode",
-            "type": "bool",
-            "default": False,
-          },
-          "swarm_placement_mode": {
-            "description": "Swarm placement mode",
-            "type": "enum",
-            "options": ["replicated", "global"],
-            "default": "replicated",
-            "extra": "replicated=run specific number of tasks, global=run one task per node",
-          },
-          "swarm_replicas": {
-            "description": "Number of replicas",
-            "type": "int",
-            "default": 1,
-            "needs": "swarm_placement_mode=replicated",
-          },
-          "swarm_placement_host": {
-            "description": "Target hostname for placement constraint",
-            "type": "str",
-            "default": "",
-            "optional": True,
-            "needs": "swarm_placement_mode=replicated",
-            "extra": "Constrains service to run on specific node by hostname (optional)",
-          },
-          "swarm_volume_mode": {
-            "description": "Swarm volume storage backend",
-            "type": "enum",
-            "options": ["local", "mount", "nfs"],
-            "default": "local",
-            "extra": "WARNING: 'local' only works on single-node deployments!",
-          },
-          "swarm_volume_mount_path": {
-            "description": "Host path for bind mount",
-            "type": "str",
-            "default": "/mnt/storage",
-            "needs": "swarm_volume_mode=mount",
-            "extra": "Useful for shared/replicated storage",
-          },
-          "swarm_volume_nfs_server": {
-            "description": "NFS server address",
-            "type": "str",
-            "default": "192.168.1.1",
-            "needs": "swarm_volume_mode=nfs",
-            "extra": "IP address or hostname of NFS server",
-          },
-          "swarm_volume_nfs_path": {
-            "description": "NFS export path",
-            "type": "str",
-            "default": "/export",
-            "needs": "swarm_volume_mode=nfs",
-            "extra": "Path to NFS export on the server",
-          },
-          "swarm_volume_nfs_options": {
-            "description": "NFS mount options",
-            "type": "str",
-            "default": "rw,nolock,soft",
-            "needs": "swarm_volume_mode=nfs",
-            "extra": "Comma-separated NFS mount options",
-          },
+        "swarm": {
+            "title": "Docker Swarm",
+            "toggle": "swarm_enabled",
+            "description": "Deploy service in Docker Swarm mode.",
+            "vars": {
+                "swarm_enabled": {
+                    "description": "Enable Docker Swarm mode",
+                    "type": "bool",
+                    "default": False,
+                },
+                "swarm_placement_mode": {
+                    "description": "Swarm placement mode",
+                    "type": "enum",
+                    "options": ["replicated", "global"],
+                    "default": "replicated",
+                    "extra": "replicated=run specific number of tasks, global=run one task per node",
+                },
+                "swarm_replicas": {
+                    "description": "Number of replicas",
+                    "type": "int",
+                    "default": 1,
+                    "needs": "swarm_placement_mode=replicated",
+                },
+                "swarm_placement_host": {
+                    "description": "Target hostname for placement constraint",
+                    "type": "str",
+                    "default": "",
+                    "optional": True,
+                    "needs": "swarm_placement_mode=replicated",
+                    "extra": "Constrains service to run on specific node by hostname (optional)",
+                },
+                "swarm_volume_mode": {
+                    "description": "Swarm volume storage backend",
+                    "type": "enum",
+                    "options": ["local", "mount", "nfs"],
+                    "default": "local",
+                    "extra": "WARNING: 'local' only works on single-node deployments!",
+                },
+                "swarm_volume_mount_path": {
+                    "description": "Host path for bind mount",
+                    "type": "str",
+                    "default": "/mnt/storage",
+                    "needs": "swarm_volume_mode=mount",
+                    "extra": "Useful for shared/replicated storage",
+                },
+                "swarm_volume_nfs_server": {
+                    "description": "NFS server address",
+                    "type": "str",
+                    "default": "192.168.1.1",
+                    "needs": "swarm_volume_mode=nfs",
+                    "extra": "IP address or hostname of NFS server",
+                },
+                "swarm_volume_nfs_path": {
+                    "description": "NFS export path",
+                    "type": "str",
+                    "default": "/export",
+                    "needs": "swarm_volume_mode=nfs",
+                    "extra": "Path to NFS export on the server",
+                },
+                "swarm_volume_nfs_options": {
+                    "description": "NFS mount options",
+                    "type": "str",
+                    "default": "rw,nolock,soft",
+                    "needs": "swarm_volume_mode=nfs",
+                    "extra": "Comma-separated NFS mount options",
+                },
+            },
         },
         },
-      },
-      "database": {
-        "title": "Database",
-        "toggle": "database_enabled",
-        "description": "Connect to external database (PostgreSQL or MySQL)",
-        "vars": {
-          "database_enabled": {
-            "description": "Enable external database integration",
-            "type": "bool",
-            "default": False,
-          },
-          "database_type": {
-            "description": "Database type",
-            "type": "enum",
-            "options": ["postgres", "mysql"],
-            "default": "postgres",
-          },
-          "database_external": {
-            "description": "Use an external database server?",
-            "extra": "If 'no', a database container will be created in the compose project.",
-            "type": "bool",
-            "default": False,
-          },
-          "database_host": {
-            "description": "Database host",
-            "type": "str",
-            "default": "database",
-          },
-          "database_port": {
-            "description": "Database port",
-            "type": "int"
-          },
-          "database_name": {
-            "description": "Database name",
-            "type": "str",
-          },
-          "database_user": {
-            "description": "Database user",
-            "type": "str",
-          },
-          "database_password": {
-            "description": "Database password",
-            "type": "str",
-            "default": "",
-            "sensitive": True,
-            "autogenerated": True,
-          },
+        "database": {
+            "title": "Database",
+            "toggle": "database_enabled",
+            "description": "Connect to external database (PostgreSQL or MySQL)",
+            "vars": {
+                "database_enabled": {
+                    "description": "Enable external database integration",
+                    "type": "bool",
+                    "default": False,
+                },
+                "database_type": {
+                    "description": "Database type",
+                    "type": "enum",
+                    "options": ["postgres", "mysql"],
+                    "default": "postgres",
+                },
+                "database_external": {
+                    "description": "Use an external database server?",
+                    "extra": "If 'no', a database container will be created in the compose project.",
+                    "type": "bool",
+                    "default": False,
+                },
+                "database_host": {
+                    "description": "Database host",
+                    "type": "str",
+                    "default": "database",
+                },
+                "database_port": {"description": "Database port", "type": "int"},
+                "database_name": {
+                    "description": "Database name",
+                    "type": "str",
+                },
+                "database_user": {
+                    "description": "Database user",
+                    "type": "str",
+                },
+                "database_password": {
+                    "description": "Database password",
+                    "type": "str",
+                    "default": "",
+                    "sensitive": True,
+                    "autogenerated": True,
+                },
+            },
         },
         },
-      },
-      "email": {
-        "title": "Email Server",
-        "toggle": "email_enabled",
-        "description": "Configure email server for notifications and user management.",
-        "vars": {
-          "email_enabled": {
-            "description": "Enable email server configuration",
-            "type": "bool",
-            "default": False,
-          },
-          "email_host": {
-            "description": "SMTP server hostname",
-            "type": "str",
-          },
-          "email_port": {
-            "description": "SMTP server port",
-            "type": "int",
-            "default": 587,
-          },
-          "email_username": {
-            "description": "SMTP username",
-            "type": "str",
-          },
-          "email_password": {
-            "description": "SMTP password",
-            "type": "str",
-            "sensitive": True,
-          },
-          "email_from": {
-            "description": "From email address",
-            "type": "str",
-          },
-          "email_use_tls": {
-            "description": "Use TLS encryption",
-            "type": "bool",
-            "default": True,
-          },
-          "email_use_ssl": {
-            "description": "Use SSL encryption",
-            "type": "bool",
-            "default": False,
-          }
+        "email": {
+            "title": "Email Server",
+            "toggle": "email_enabled",
+            "description": "Configure email server for notifications and user management.",
+            "vars": {
+                "email_enabled": {
+                    "description": "Enable email server configuration",
+                    "type": "bool",
+                    "default": False,
+                },
+                "email_host": {
+                    "description": "SMTP server hostname",
+                    "type": "str",
+                },
+                "email_port": {
+                    "description": "SMTP server port",
+                    "type": "int",
+                    "default": 587,
+                },
+                "email_username": {
+                    "description": "SMTP username",
+                    "type": "str",
+                },
+                "email_password": {
+                    "description": "SMTP password",
+                    "type": "str",
+                    "sensitive": True,
+                },
+                "email_from": {
+                    "description": "From email address",
+                    "type": "str",
+                },
+                "email_use_tls": {
+                    "description": "Use TLS encryption",
+                    "type": "bool",
+                    "default": True,
+                },
+                "email_use_ssl": {
+                    "description": "Use SSL encryption",
+                    "type": "bool",
+                    "default": False,
+                },
+            },
         },
         },
-      },
-      "authentik": {
-        "title": "Authentik SSO",
-        "toggle": "authentik_enabled",
-        "description": "Integrate with Authentik for Single Sign-On authentication.",
-        "vars": {
-          "authentik_enabled": {
-            "description": "Enable Authentik SSO integration",
-            "type": "bool",
-            "default": False,
-          },
-          "authentik_url": {
-            "description": "Authentik base URL (e.g., https://auth.example.com)",
-            "type": "str",
-          },
-          "authentik_slug": {
-            "description": "Authentik application slug",
-            "type": "str",
-          },
-          "authentik_client_id": {
-            "description": "OAuth client ID from Authentik provider",
-            "type": "str",
-          },
-          "authentik_client_secret": {
-            "description": "OAuth client secret from Authentik provider",
-            "type": "str",
-            "sensitive": True,
-          },
+        "authentik": {
+            "title": "Authentik SSO",
+            "toggle": "authentik_enabled",
+            "description": "Integrate with Authentik for Single Sign-On authentication.",
+            "vars": {
+                "authentik_enabled": {
+                    "description": "Enable Authentik SSO integration",
+                    "type": "bool",
+                    "default": False,
+                },
+                "authentik_url": {
+                    "description": "Authentik base URL (e.g., https://auth.example.com)",
+                    "type": "str",
+                },
+                "authentik_slug": {
+                    "description": "Authentik application slug",
+                    "type": "str",
+                },
+                "authentik_client_id": {
+                    "description": "OAuth client ID from Authentik provider",
+                    "type": "str",
+                },
+                "authentik_client_secret": {
+                    "description": "OAuth client secret from Authentik provider",
+                    "type": "str",
+                    "sensitive": True,
+                },
+            },
         },
         },
-      },
     }
     }
-  )
-
-
+)

+ 0 - 9
pyproject.toml

@@ -30,12 +30,3 @@ boilerplates = "cli.__main__:run"
 [tool.setuptools.packages.find]
 [tool.setuptools.packages.find]
 include = ["cli*"]
 include = ["cli*"]
 exclude = ["tests*", "scripts*"]
 exclude = ["tests*", "scripts*"]
-
-[tool.pylint.format]
-indent-string = '  '
-max-line-length = 120
-
-[tool.pylint.messages_control]
-disable = [
-    "bad-indentation",
-]

部分文件因文件數量過多而無法顯示