diff --git a/.github/workflows/docs.yml b/.github/workflows/docs.yml index 269d36d2..da78b41e 100644 --- a/.github/workflows/docs.yml +++ b/.github/workflows/docs.yml @@ -49,5 +49,3 @@ jobs: with: branch: gh-pages folder: site - -# Made with Bob diff --git a/.secrets.baseline b/.secrets.baseline index 9324636a..e0564e09 100644 --- a/.secrets.baseline +++ b/.secrets.baseline @@ -3,7 +3,7 @@ "files": "^.secrets.baseline$", "lines": null }, - "generated_at": "2025-12-25T19:13:06Z", + "generated_at": "2026-01-14T11:35:47Z", "plugins_used": [ { "name": "AWSKeyDetector" @@ -143,6 +143,16 @@ "verified_result": null } ], + "test/src/test_backup.py": [ + { + "hashed_secret": "4dfd3a58b4820476afe7efa2e2c52b267eec876a", + "is_secret": false, + "is_verified": false, + "line_number": 753, + "type": "Secret Keyword", + "verified_result": null + } + ], "test/src/test_db2.py": [ { "hashed_secret": "a4b48a81cdab1e1a5dd37907d6c85ca1c61ddc7c", diff --git a/mkdocs.yml b/mkdocs.yml index adec8bab..cb5a0d82 100644 --- a/mkdocs.yml +++ b/mkdocs.yml @@ -112,5 +112,3 @@ extra: social: - icon: fontawesome/brands/github link: https://github.com/ibm-mas/python-devops - -# Made with Bob diff --git a/src/mas/devops/backup.py b/src/mas/devops/backup.py new file mode 100644 index 00000000..172f26e4 --- /dev/null +++ b/src/mas/devops/backup.py @@ -0,0 +1,413 @@ +# ***************************************************************************** +# Copyright (c) 2026 IBM Corporation and other Contributors. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Eclipse Public License v1.0 +# which accompanies this distribution, and is available at +# http://www.eclipse.org/legal/epl-v10.html +# +# ***************************************************************************** +import logging +import os +import yaml +from openshift.dynamic import DynamicClient +from openshift.dynamic.exceptions import NotFoundError +import boto3 +from botocore.exceptions import ClientError, NoCredentialsError + +logger = logging.getLogger(name=__name__) + + +def createBackupDirectories(paths: list) -> bool: + """ + Create backup directories if they do not exist + """ + try: + for path in paths: + os.makedirs(path, exist_ok=True) + logger.info(msg=f"Created backup directory: {path}") + return True + except Exception as e: + logger.error(msg=f"Error creating backup directories: {e}") + return False + + +def copyContentsToYamlFile(file_path: str, content: dict) -> bool: + """ + Write dictionary content to a YAML file + """ + try: + # Create a custom dumper that uses literal style for multi-line strings + class LiteralDumper(yaml.SafeDumper): + pass + + def str_representer(dumper, data): + if '\n' in data: + return dumper.represent_scalar('tag:yaml.org,2002:str', data, style='|') + return dumper.represent_scalar('tag:yaml.org,2002:str', data) + + LiteralDumper.add_representer(str, str_representer) + + with open(file_path, 'w') as yaml_file: + yaml.dump(content, yaml_file, default_flow_style=False, Dumper=LiteralDumper) + return True + except Exception as e: + logger.error(f"Error writing to YAML file {file_path}: {e}") + return False + + +def filterResourceData(data: dict) -> dict: + """ + Filter metadata from Resource data and create minimal dict + """ + metadata_fields_to_remove = [ + 'annotations', + 'creationTimestamp', + 'generation', + 'resourceVersion', + 'selfLink', + 'ownerReferences', + 'uid', + 'managedFields' + ] + filteredCopy = data.copy() + if 'metadata' in filteredCopy: + for field in metadata_fields_to_remove: + if field in filteredCopy['metadata']: + del filteredCopy['metadata'][field] + + if 'status' in filteredCopy: + del filteredCopy['status'] + + # Remove labels with uid + # this will cause problem when restoring the backup + if 'metadata' in filteredCopy and 'labels' in filteredCopy['metadata']: + for key in list(filteredCopy['metadata']['labels'].keys()): + if "uid" in key.lower(): + filteredCopy['metadata']['labels'].pop(key) + + return filteredCopy + + +def extract_secrets_from_dict(data, secret_names=None): + """ + Recursively extract secret names from a dictionary structure. + Looks for keys like 'secretName' and 'secretRef.name' and collects their values. + + Args: + data: Dictionary to search + secret_names: Set to collect secret names (created if None) + + Returns: + Set of secret names found + """ + if secret_names is None: + secret_names = set() + + if isinstance(data, dict): + for key, value in data.items(): + # Check if this key is 'secretName' and has a string value + if (key == 'secretName' or 'secretname' in key.lower()) and isinstance(value, str) and value: + secret_names.add(value) + # Check if this key contains 'secretRef' and contains a 'name' field + elif 'SecretRef' in key and isinstance(value, dict): + if 'name' in value and isinstance(value['name'], str) and value['name']: + secret_names.add(value['name']) + # Recursively search nested structures + elif isinstance(value, (dict, list)): + extract_secrets_from_dict(value, secret_names) + + elif isinstance(data, list): + for item in data: + if isinstance(item, (dict, list)): + extract_secrets_from_dict(item, secret_names) + + return secret_names + + +def backupResources(dynClient: DynamicClient, kind: str, api_version: str, backup_path: str, namespace=None, name=None, labels=None) -> tuple: + """ + Backup resources of a given kind. + If name is provided, backs up that specific resource. + If name is None, backs up all resources of that kind. + If namespace is None, backs up cluster-level resources. + If labels is provided, filters resources by label selectors. + + Args: + dynClient: Kubernetes dynamic client + kind: Resource kind (e.g., 'MongoCfg', 'Secret', 'ClusterRole') + api_version: API version (e.g., 'config.mas.ibm.com/v1') + backup_path: Path to save backup files + namespace: Optional namespace to backup from (None for cluster-level resources) + name: Optional specific resource name + labels: Optional list of label selectors (e.g., ['app=myapp', 'env=prod']) + + Returns: + tuple: (backed_up_count: int, not_found_count: int, failed_count: int, discovered_secrets: set) + """ + discovered_secrets = set() + backed_up_count = 0 + not_found_count = 0 + failed_count = 0 + + # Build label selector string if labels provided + label_selector = None + if labels: + label_selector = ','.join(labels) + + # Determine scope description for logging + scope_desc = f"namespace '{namespace}'" if namespace else "cluster-level" + label_desc = f" with labels [{label_selector}]" if label_selector else "" + + try: + resourceAPI = dynClient.resources.get(api_version=api_version, kind=kind) + + if name: + # Backup specific named resource + logger.info(f"Backing up {kind} '{name}' from {scope_desc} (API version: {api_version}){label_desc}") + try: + if namespace: + resource = resourceAPI.get(name=name, namespace=namespace) + else: + resource = resourceAPI.get(name=name) + + if resource: + resources_to_process = [resource] + else: + logger.info(f"{kind} '{name}' not found in {scope_desc}, skipping backup") + not_found_count = 1 + return (backed_up_count, not_found_count, failed_count, discovered_secrets) + except NotFoundError: + logger.error(f"{kind} '{name}' not found in {scope_desc}, skipping backup") + not_found_count = 1 + return (backed_up_count, not_found_count, failed_count, discovered_secrets) + else: + # Backup all resources of this kind + logger.info(f"Backing up all {kind} resources from {scope_desc} (API version: {api_version}){label_desc}") + + # Build get parameters + get_params = {} + if namespace: + get_params['namespace'] = namespace + if label_selector: + get_params['label_selector'] = label_selector + + resources = resourceAPI.get(**get_params) + resources_to_process = resources.items + + # Process each resource + for resource in resources_to_process: + resource_name = resource["metadata"]["name"] + resource_dict = resource.to_dict() + + # Extract secrets from this resource if it's not a Secret itself + if kind != 'Secret': + secrets = extract_secrets_from_dict(resource_dict.get('spec', {})) + if secrets: + logger.info(f"Found {len(secrets)} secret reference(s) in {kind} '{resource_name}': {', '.join(sorted(secrets))}") + discovered_secrets.update(secrets) + + # Backup the resource + resource_backup_path = f"{backup_path}/resources/{kind.lower()}s" + createBackupDirectories([resource_backup_path]) + resource_file_path = f"{resource_backup_path}/{resource_name}.yaml" + filtered_resource = filterResourceData(resource_dict) + if copyContentsToYamlFile(resource_file_path, filtered_resource): + logger.info(f"Successfully backed up {kind} '{resource_name}' to '{resource_file_path}'") + backed_up_count += 1 + else: + logger.error(f"Failed to back up {kind} '{resource_name}' to '{resource_file_path}'") + failed_count += 1 + + if backed_up_count > 0: + logger.info(f"Successfully backed up {backed_up_count} {kind} resource(s)") + elif not name: + logger.info(f"No {kind} resources found in {scope_desc}{label_desc}") + + return (backed_up_count, not_found_count, failed_count, discovered_secrets) + + except NotFoundError: + if name: + logger.info(f"{kind} '{name}' not found in {scope_desc}") + not_found_count = 1 + else: + logger.info(f"No {kind} resources found in {scope_desc}{label_desc}") + return (backed_up_count, not_found_count, failed_count, discovered_secrets) + except Exception as e: + logger.error(f"Error backing up {kind} resources: {e}") + failed_count = 1 + return (backed_up_count, not_found_count, failed_count, discovered_secrets) + + +def uploadToS3( + file_path: str, + bucket_name: str, + object_name=None, + endpoint_url=None, + aws_access_key_id=None, + aws_secret_access_key=None, + region_name=None +) -> bool: + """ + Upload a tar.gz file to S3-compatible storage. + + Args: + file_path: Path to the tar.gz file to upload + bucket_name: Name of the S3 bucket + object_name: S3 object name. If not specified, file_path basename is used + endpoint_url: S3-compatible endpoint URL (e.g., for MinIO, Ceph) + aws_access_key_id: AWS access key ID (if not using environment variables) + aws_secret_access_key: AWS secret access key (if not using environment variables) + region_name: AWS region name (default: us-east-1) + + Returns: + bool: True if file was uploaded successfully, False otherwise + """ + # If S3 object_name was not specified, use file_path basename + if object_name is None: + object_name = os.path.basename(file_path) + + # Validate file exists and is a tar.gz file + if not os.path.exists(file_path): + logger.error(f"File not found: {file_path}") + return False + + if not file_path.endswith('.tar.gz'): + logger.warning(f"File does not have .tar.gz extension: {file_path}") + + # Configure S3 client + try: + s3_config = {} + + if endpoint_url: + s3_config['endpoint_url'] = endpoint_url + if aws_access_key_id and aws_secret_access_key: + s3_config['aws_access_key_id'] = aws_access_key_id + s3_config['aws_secret_access_key'] = aws_secret_access_key + if region_name: + s3_config['region_name'] = region_name + else: + s3_config['region_name'] = 'us-east-1' + + s3_client = boto3.client('s3', **s3_config) + + # Upload the file + logger.info(f"Uploading {file_path} to s3://{bucket_name}/{object_name}") + + file_size = os.path.getsize(file_path) + logger.info(f"File size: {file_size / (1024 * 1024):.2f} MB") + + s3_client.upload_file(file_path, bucket_name, object_name) + + logger.info(f"Successfully uploaded {file_path} to s3://{bucket_name}/{object_name}") + return True + + except FileNotFoundError: + logger.error(f"File not found: {file_path}") + return False + except NoCredentialsError: + logger.error("AWS credentials not found. Please provide credentials or configure environment variables.") + return False + except ClientError as e: + error_code = e.response.get('Error', {}).get('Code', 'Unknown') + error_message = e.response.get('Error', {}).get('Message', str(e)) + logger.error(f"S3 client error ({error_code}): {error_message}") + return False + except Exception as e: + logger.error(f"Unexpected error uploading to S3: {e}") + return False + + +def downloadFromS3( + bucket_name: str, + object_name: str, + local_dir: str, + endpoint_url=None, + aws_access_key_id=None, + aws_secret_access_key=None, + region_name=None +) -> bool: + """ + Download a tar.gz file from S3-compatible storage to a backup directory. + + Args: + bucket_name: Name of the S3 bucket + object_name: S3 object name to download + local_dir: Directory path where the file will be downloaded + endpoint_url: S3-compatible endpoint URL (e.g., for MinIO, Ceph) + aws_access_key_id: AWS access key ID (if not using environment variables) + aws_secret_access_key: AWS secret access key (if not using environment variables) + region_name: AWS region name (default: us-east-1) + + Returns: + bool: True if file was downloaded successfully, False otherwise + """ + # Validate backup directory + if not os.path.exists(local_dir): + logger.info(f"Backup directory does not exist, creating: {local_dir}") + try: + os.makedirs(local_dir, exist_ok=True) + except Exception as e: + logger.error(f"Failed to create backup directory {local_dir}: {e}") + return False + + # Construct the full file path + file_path = os.path.join(local_dir, object_name) + + # Warn if file doesn't have .tar.gz extension + if not object_name.endswith('.tar.gz'): + logger.warning(f"Object does not have .tar.gz extension: {object_name}") + + # Configure S3 client + try: + s3_config = {} + + if endpoint_url: + s3_config['endpoint_url'] = endpoint_url + if aws_access_key_id and aws_secret_access_key: + s3_config['aws_access_key_id'] = aws_access_key_id + s3_config['aws_secret_access_key'] = aws_secret_access_key + if region_name: + s3_config['region_name'] = region_name + else: + s3_config['region_name'] = 'us-east-1' + + s3_client = boto3.client('s3', **s3_config) + + # Check if object exists and get its size + logger.info(f"Downloading s3://{bucket_name}/{object_name} to {file_path}") + + try: + response = s3_client.head_object(Bucket=bucket_name, Key=object_name) + file_size = response.get('ContentLength', 0) + logger.info(f"Object size: {file_size / (1024 * 1024):.2f} MB") + except ClientError as e: + if e.response.get('Error', {}).get('Code') == '404': + logger.error(f"Object not found in S3: s3://{bucket_name}/{object_name}") + return False + raise + + # Download the file + s3_client.download_file(bucket_name, object_name, file_path) + + # Verify the downloaded file exists + if os.path.exists(file_path): + downloaded_size = os.path.getsize(file_path) + logger.info(f"Successfully downloaded {object_name} to {file_path}") + logger.info(f"Downloaded file size: {downloaded_size / (1024 * 1024):.2f} MB") + return True + else: + logger.error(f"Download completed but file not found at {file_path}") + return False + + except NoCredentialsError: + logger.error("AWS credentials not found. Please provide credentials or configure environment variables.") + return False + except ClientError as e: + error_code = e.response.get('Error', {}).get('Code', 'Unknown') + error_message = e.response.get('Error', {}).get('Message', str(e)) + logger.error(f"S3 client error ({error_code}): {error_message}") + return False + except Exception as e: + logger.error(f"Unexpected error downloading from S3: {e}") + return False diff --git a/src/mas/devops/data/ocp.yaml b/src/mas/devops/data/ocp.yaml index 173ba916..275eacad 100644 --- a/src/mas/devops/data/ocp.yaml +++ b/src/mas/devops/data/ocp.yaml @@ -59,5 +59,3 @@ ocp_versions: # - Extended Support (EUS): Additional 6 months available for purchase # - EUS is included with Premium subscriptions # - Not all versions have EUS available - -# Made with Bob diff --git a/src/mas/devops/mas/__init__.py b/src/mas/devops/mas/__init__.py index dfbecf04..333f109f 100644 --- a/src/mas/devops/mas/__init__.py +++ b/src/mas/devops/mas/__init__.py @@ -13,4 +13,5 @@ verifyMasInstance, getMasChannel, updateIBMEntitlementKey, + getMasPublicClusterIssuer, ) diff --git a/src/mas/devops/mas/suite.py b/src/mas/devops/mas/suite.py index 744b4e0d..01020be8 100644 --- a/src/mas/devops/mas/suite.py +++ b/src/mas/devops/mas/suite.py @@ -313,3 +313,47 @@ def updateIBMEntitlementKey(dynClient: DynamicClient, namespace: str, icrUsernam secret = secretsAPI.apply(body=secret, namespace=namespace) return secret + + +def getMasPublicClusterIssuer(dynClient: DynamicClient, instanceId: str) -> str | None: + """ + Retrieve the Public Cluster Issuer for a MAS instance. + + This function queries the Suite custom resource and attempts to retrieve the + certificate issuer name from spec.certificateIssuer.name. If the keys don't exist, + it returns the default issuer name. + + Args: + dynClient (DynamicClient): OpenShift dynamic client for cluster API interactions. + instanceId (str): The MAS instance identifier to use. + + Returns: + str: The name of the cluster issuer used for the passed in MAS Instance. + Returns the default "mas-{instanceId}-core-public-issuer" if the suite + doesn't specify a custom issuer, or None if the suite is not found. + """ + try: + suitesAPI = dynClient.resources.get(api_version="core.mas.ibm.com/v1", kind="Suite") + suite = suitesAPI.get(name=instanceId, namespace=f"mas-{instanceId}-core") + + # Check if spec.certificateIssuer.name exists + if hasattr(suite, 'spec') and hasattr(suite.spec, 'certificateIssuer') and hasattr(suite.spec.certificateIssuer, 'name'): + issuerName = suite.spec.certificateIssuer.name + logger.debug(f"Found custom certificate issuer: {issuerName}") + return issuerName + + # Keys don't exist, return default + defaultIssuer = f"mas-{instanceId}-core-public-issuer" + logger.debug(f"No custom certificate issuer found, using default: {defaultIssuer}") + return defaultIssuer + + except NotFoundError: + logger.warning(f"Suite instance '{instanceId}' not found") + return None + except ResourceNotFoundError: + # The MAS Suite CRD has not even been installed in the cluster + logger.warning("MAS Suite CRD not found in the cluster") + return None + except UnauthorizedError as e: + logger.error(f"Error: Unable to retrieve MAS instance due to failed authorization: {e}") + return None diff --git a/src/mas/devops/ocp.py b/src/mas/devops/ocp.py index 9e14261b..efc66fa6 100644 --- a/src/mas/devops/ocp.py +++ b/src/mas/devops/ocp.py @@ -437,6 +437,64 @@ def crdExists(dynClient: DynamicClient, crdName: str) -> bool: return False +def getCR(dynClient: DynamicClient, cr_api_version: str, cr_kind: str, cr_name: str, namespace: str = None) -> dict: + """ + Get a Custom Resource + """ + + try: + crAPI = dynClient.resources.get(api_version=cr_api_version, kind=cr_kind) + if namespace: + cr = crAPI.get(name=cr_name, namespace=namespace) + else: + cr = crAPI.get(name=cr_name) + return cr + except NotFoundError: + logger.debug(f"CR {cr_name} of kind {cr_kind} does not exist in namespace {namespace}") + except Exception as e: + logger.debug(f"Error retrieving CR {cr_name} of kind {cr_kind} in namespace {namespace}: {e}") + + return {} + + +def getSecret(dynClient: DynamicClient, namespace: str, secret_name: str) -> dict: + """ + Get a Secret + """ + try: + secretAPI = dynClient.resources.get(api_version="v1", kind="Secret") + secret = secretAPI.get(name=secret_name, namespace=namespace) + logger.debug(f"Secret {secret_name} exists in namespace {namespace}") + return secret.to_dict() + except NotFoundError: + logger.debug(f"Secret {secret_name} does not exist in namespace {namespace}") + return {} + + +def apply_resource(dynClient: DynamicClient, resource_yaml: str, namespace: str): + """ + Apply a Kubernetes resource from its YAML definition. + If the resource already exists, it will be updated. + If it does not exist, it will be created. + """ + resource_dict = yaml.safe_load(resource_yaml) + kind = resource_dict['kind'] + api_version = resource_dict['apiVersion'] + metadata = resource_dict['metadata'] + name = metadata['name'] + + try: + resource = dynClient.resources.get(api_version=api_version, kind=kind) + # Try to get the existing resource + resource.get(name=name, namespace=namespace) + # If found, skip creation + logger.debug(f"{kind} '{name}' already exists in namespace '{namespace}', skipping creation.") + except NotFoundError: + # If not found, create it + logger.debug(f"Creating new {kind} '{name}' in namespace '{namespace}'") + resource.create(body=resource_dict, namespace=namespace) + + def listInstances(dynClient: DynamicClient, apiVersion: str, kind: str) -> list: """ Get a list of instances of a particular custom resource on the cluster. diff --git a/src/mas/devops/restore.py b/src/mas/devops/restore.py new file mode 100644 index 00000000..d427e628 --- /dev/null +++ b/src/mas/devops/restore.py @@ -0,0 +1,122 @@ +# ***************************************************************************** +# Copyright (c) 2026 IBM Corporation and other Contributors. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Eclipse Public License v1.0 +# which accompanies this distribution, and is available at +# http://www.eclipse.org/legal/epl-v10.html +# +# ***************************************************************************** +import logging +import yaml +from openshift.dynamic import DynamicClient +from openshift.dynamic.exceptions import NotFoundError + +logger = logging.getLogger(name=__name__) + + +def loadYamlFile(file_path: str): + """ + Load YAML content from a file + + Args: + file_path: Path to the YAML file + + Returns: + dict: Parsed YAML content or None if error + """ + try: + with open(file_path, 'r') as yaml_file: + content = yaml.safe_load(yaml_file) + return content + except Exception as e: + logger.error(f"Error reading YAML file {file_path}: {e}") + return None + + +def restoreResource(dynClient: DynamicClient, resource_data: dict, namespace=None, replace_resource=True) -> tuple: + """ + Restore a single Kubernetes resource from its YAML representation. + If the resource exists and replace_resource is True, it will be updated (replaced). + If the resource exists and replace_resource is False, it will be skipped. + If the resource doesn't exist, it will be created. + + Args: + dynClient: Kubernetes dynamic client + resource_data: Dictionary containing the resource definition + namespace: Optional namespace override (uses resource's namespace if not provided) + replace_resource: If True, replace existing resources; if False, skip them (default: True) + + Returns: + tuple: (success: bool, resource_name: str, status_message: str or None) + - success: True if created, updated, or skipped; False if failed + - resource_name: Name of the resource + - status_message: None if created, "updated" if replaced, "skipped" if exists and not replaced, error message if failed + """ + try: + # Extract resource metadata + kind = resource_data.get('kind') + api_version = resource_data.get('apiVersion') + metadata = resource_data.get('metadata', {}) + resource_name = metadata.get('name') + resource_namespace = namespace or metadata.get('namespace') + + if not kind or not api_version or not resource_name: + error_msg = "Resource missing required fields (kind, apiVersion, or name)" + logger.error(error_msg) + return (False, resource_name or 'unknown', error_msg) + + # Get the resource API + resourceAPI = dynClient.resources.get(api_version=api_version, kind=kind) + + # Determine scope description for logging + scope_desc = f"namespace '{resource_namespace}'" if resource_namespace else "cluster-level" + + # Check if resource already exists + resource_exists = False + existing_resource = None + try: + if resource_namespace: + existing_resource = resourceAPI.get(name=resource_name, namespace=resource_namespace) + else: + existing_resource = resourceAPI.get(name=resource_name) + resource_exists = existing_resource is not None + except NotFoundError: + resource_exists = False + + # Apply the resource (create, update, or skip) + try: + if resource_exists: + if replace_resource: + # Resource exists - update it using strategic merge patch + logger.info(f"Patching existing {kind} '{resource_name}' in {scope_desc}") + + if resource_namespace: + resourceAPI.patch(body=resource_data, name=resource_name, namespace=resource_namespace, content_type='application/merge-patch+json') + else: + resourceAPI.patch(body=resource_data, name=resource_name, content_type='application/merge-patch+json') + logger.info(f"Successfully patched {kind} '{resource_name}' in {scope_desc}") + return (True, resource_name, "updated") + else: + # Resource exists but replace_resource is False - skip it + logger.info(f"{kind} '{resource_name}' already exists in {scope_desc}, skipping (replace_resource=False)") + return (True, resource_name, "skipped") + else: + # Resource doesn't exist - create it + logger.info(f"Creating {kind} '{resource_name}' in {scope_desc}") + if resource_namespace: + resourceAPI.create(body=resource_data, namespace=resource_namespace) + else: + resourceAPI.create(body=resource_data) + logger.info(f"Successfully created {kind} '{resource_name}' in {scope_desc}") + return (True, resource_name, None) + except Exception as e: + action = "update" if resource_exists else "create" + error_msg = f"Failed to {action} {kind} '{resource_name}': {e}" + logger.error(error_msg) + return (False, resource_name, error_msg) + + except Exception as e: + error_msg = f"Error restoring resource: {e}" + logger.error(error_msg) + return (False, resource_data.get('metadata', {}).get('name', 'unknown'), error_msg) diff --git a/src/mas/devops/sls.py b/src/mas/devops/sls.py index 4927ed9a..e204bc8d 100644 --- a/src/mas/devops/sls.py +++ b/src/mas/devops/sls.py @@ -77,3 +77,33 @@ def findSLSByNamespace(namespace: str, instances: list = None, dynClient: Dynami if namespace in instance['metadata']['namespace']: return True return False + + +def getSLSRegistrationDetails(namespace: str, name: str, dynClient: DynamicClient): + """ + Retrieve registration details like licenseId and registrationKey from the LicenseService instance's CR status + + This function gets the LicenseService instance of a specified name in a specified namespace. + It retrieves licenseId and registrationKey keys in CR status and returns. + + Args: + namespace (str): The OpenShift namespace to search for SLS instances. + name (str): Name of SLS(LicenseService) instance. + dynClient (DynamicClient): OpenShift dynamic client for querying instances. + Required if instances is None. Defaults to None. + + Returns: + dict: dict with 'licenseId' and 'registrationKey' when details are found. + Empty if not found. + """ + try: + slsAPI = dynClient.resources.get(api_version="sls.ibm.com/v1", kind="LicenseService") + slsInstance = slsAPI.get(name=name, namespace=namespace) + if hasattr(slsInstance, 'status') and hasattr(slsInstance.status, 'licenseId') and hasattr(slsInstance.status, 'registrationKey'): + return dict( + registrationKey=slsInstance.status.registrationKey, + licenseId=slsInstance.status.licenseId + ) + except NotFoundError: + logger.info(f"No SLS '{name}' found in namespace {namespace}.'") + return dict() diff --git a/src/mas/devops/tekton.py b/src/mas/devops/tekton.py index 7ba7c164..96054c99 100644 --- a/src/mas/devops/tekton.py +++ b/src/mas/devops/tekton.py @@ -262,7 +262,7 @@ def updateTektonDefinitions(namespace: str, yamlFile: str) -> None: logger.debug(line) -def preparePipelinesNamespace(dynClient: DynamicClient, instanceId: str = None, storageClass: str = None, accessMode: str = None, waitForBind: bool = True, configureRBAC: bool = True): +def preparePipelinesNamespace(dynClient: DynamicClient, instanceId: str = None, storageClass: str = None, accessMode: str = None, waitForBind: bool = True, configureRBAC: bool = True, createConfigPVC: bool = True, createBackupPVC: bool = False, backupStorageSize: str = "20Gi"): """ Prepare a namespace for MAS pipelines by creating RBAC and PVC resources. @@ -276,6 +276,9 @@ def preparePipelinesNamespace(dynClient: DynamicClient, instanceId: str = None, accessMode (str, optional): Access mode for the PVC. Defaults to None. waitForBind (bool, optional): Whether to wait for PVC to bind. Defaults to True. configureRBAC (bool, optional): Whether to configure RBAC. Defaults to True. + createConfigPVC (bool, optional): Whether to create config PVC. Defaults to True. + createBackupPVC (bool, optional): Whether to create backup PVC. Defaults to False. + backupStorageSize (str, optional): Size of the backup PVC storage. Defaults to "20Gi". Returns: None @@ -304,32 +307,66 @@ def preparePipelinesNamespace(dynClient: DynamicClient, instanceId: str = None, # Create PVC (instanceId namespace only) if instanceId is not None: - template = env.get_template("pipelines-pvc.yml.j2") - renderedTemplate = template.render( - mas_instance_id=instanceId, - pipeline_storage_class=storageClass, - pipeline_storage_accessmode=accessMode - ) - logger.debug(renderedTemplate) - pvc = yaml.safe_load(renderedTemplate) pvcAPI = dynClient.resources.get(api_version="v1", kind="PersistentVolumeClaim") - pvcAPI.apply(body=pvc, namespace=namespace) + # Automatically determine if we should wait for PVC binding based on storage class volumeBindingMode = getStorageClassVolumeBindingMode(dynClient, storageClass) waitForBind = (volumeBindingMode == "Immediate") - if waitForBind: - logger.info(f"Storage class {storageClass} uses volumeBindingMode={volumeBindingMode}, waiting for PVC to bind") - pvcIsBound = False - while not pvcIsBound: - configPVC = pvcAPI.get(name="config-pvc", namespace=namespace) - if configPVC.status.phase == "Bound": - pvcIsBound = True - else: - logger.debug("Waiting 15s before checking status of PVC again") - logger.debug(configPVC) - sleep(15) - else: - logger.info(f"Storage class {storageClass} uses volumeBindingMode={volumeBindingMode}, skipping PVC bind wait") + + # Create config PVC if requested + if createConfigPVC: + logger.info("Creating config PVC") + template = env.get_template("pipelines-pvc.yml.j2") + renderedTemplate = template.render( + mas_instance_id=instanceId, + pipeline_storage_class=storageClass, + pipeline_storage_accessmode=accessMode + ) + logger.debug(renderedTemplate) + pvc = yaml.safe_load(renderedTemplate) + pvcAPI.apply(body=pvc, namespace=namespace) + + if waitForBind: + logger.info(f"Storage class {storageClass} uses volumeBindingMode={volumeBindingMode}, waiting for config PVC to bind") + pvcIsBound = False + while not pvcIsBound: + configPVC = pvcAPI.get(name="config-pvc", namespace=namespace) + if configPVC.status.phase == "Bound": + pvcIsBound = True + else: + logger.debug("Waiting 15s before checking status of config PVC again") + logger.debug(configPVC) + sleep(15) + else: + logger.info(f"Storage class {storageClass} uses volumeBindingMode={volumeBindingMode}, skipping config PVC bind wait") + + # Create backup PVC if requested + if createBackupPVC: + logger.info("Creating backup PVC") + backupTemplate = env.get_template("pipelines-backup-pvc.yml.j2") + renderedBackupTemplate = backupTemplate.render( + mas_instance_id=instanceId, + pipeline_storage_class=storageClass, + pipeline_storage_accessmode=accessMode, + backup_storage_size=backupStorageSize + ) + logger.debug(renderedBackupTemplate) + backupPvc = yaml.safe_load(renderedBackupTemplate) + pvcAPI.apply(body=backupPvc, namespace=namespace) + + if waitForBind: + logger.info(f"Storage class {storageClass} uses volumeBindingMode={volumeBindingMode}, waiting for backup PVC to bind") + backupPvcIsBound = False + while not backupPvcIsBound: + backupPVC = pvcAPI.get(name="backup-pvc", namespace=namespace) + if backupPVC.status.phase == "Bound": + backupPvcIsBound = True + else: + logger.debug("Waiting 15s before checking status of backup PVC again") + logger.debug(backupPVC) + sleep(15) + else: + logger.info(f"Storage class {storageClass} uses volumeBindingMode={volumeBindingMode}, skipping backup PVC bind wait") def prepareAiServicePipelinesNamespace(dynClient: DynamicClient, instanceId: str = None, storageClass: str = None, accessMode: str = None, waitForBind: bool = True, configureRBAC: bool = True): @@ -398,6 +435,46 @@ def prepareAiServicePipelinesNamespace(dynClient: DynamicClient, instanceId: str logger.info(f"Storage class {storageClass} uses volumeBindingMode={volumeBindingMode}, skipping PVC bind wait") +def prepareRestoreSecrets(dynClient: DynamicClient, namespace: str, restoreConfigs: dict = None): + """ + Create or update secret required for MAS Restore pipeline. + + Creates secret in the specified namespace: + - pipeline-restore-configs + + Parameters: + dynClient (DynamicClient): OpenShift Dynamic Client + namespace (str): The namespace to create secrets in + restoreConfigs (dict, optional): configuration data for restore. Defaults to None (empty secret). + + Returns: + None + + Raises: + NotFoundError: If secrets cannot be created + """ + secretsAPI = dynClient.resources.get(api_version="v1", kind="Secret") + + # 1. Secret/pipeline-restore-configs + # ------------------------------------------------------------------------- + # Must exist, but can be empty + try: + secretsAPI.delete(name="pipeline-restore-configs", namespace=namespace) + except NotFoundError: + pass + + if restoreConfigs is None: + restoreConfigs = { + "apiVersion": "v1", + "kind": "Secret", + "type": "Opaque", + "metadata": { + "name": "pipeline-restore-configs" + } + } + secretsAPI.create(body=restoreConfigs, namespace=namespace) + + def prepareInstallSecrets(dynClient: DynamicClient, namespace: str, slsLicenseFile: dict | None = None, additionalConfigs: dict | None = None, certs: dict | None = None, podTemplates: dict | None = None) -> None: """ Create or update secrets required for MAS installation pipelines. @@ -697,6 +774,52 @@ def launchUpdatePipeline(dynClient: DynamicClient, params: dict) -> str: return pipelineURL +def launchBackupPipeline(dynClient: DynamicClient, params: dict) -> str: + """ + Create a PipelineRun to backup a MAS instance. + + Parameters: + dynClient (DynamicClient): OpenShift Dynamic Client + params (dict): Backup parameters including instance ID and configuration + + Returns: + str: URL to the PipelineRun in the OpenShift console + + Raises: + NotFoundError: If resources cannot be created + """ + instanceId = params["mas_instance_id"] + backupVersion = params["backup_version"] + namespace = f"mas-{instanceId}-pipelines" + timestamp = launchPipelineRun(dynClient, namespace, "pipelinerun-backup", params) + + pipelineURL = f"{getConsoleURL(dynClient)}/k8s/ns/mas-{instanceId}-pipelines/tekton.dev~v1beta1~PipelineRun/{instanceId}-backup-{backupVersion}-{timestamp}" + return pipelineURL + + +def launchRestorePipeline(dynClient: DynamicClient, params: dict) -> str: + """ + Create a PipelineRun to restore a MAS instance. + + Parameters: + dynClient (DynamicClient): OpenShift Dynamic Client + params (dict): Backup/Restore parameters including instance ID and configuration + + Returns: + str: URL to the PipelineRun in the OpenShift console + + Raises: + NotFoundError: If resources cannot be created + """ + instanceId = params["mas_instance_id"] + restoreVersion = params["restore_version"] + namespace = f"mas-{instanceId}-pipelines" + timestamp = launchPipelineRun(dynClient, namespace, "pipelinerun-restore", params) + + pipelineURL = f"{getConsoleURL(dynClient)}/k8s/ns/mas-{instanceId}-pipelines/tekton.dev~v1beta1~PipelineRun/{instanceId}-restore-{restoreVersion}-{timestamp}" + return pipelineURL + + def launchAiServiceUpgradePipeline(dynClient: DynamicClient, aiserviceInstanceId: str, skipPreCheck: bool = False, diff --git a/src/mas/devops/templates/pipelinerun-backup.yml.j2 b/src/mas/devops/templates/pipelinerun-backup.yml.j2 new file mode 100644 index 00000000..7c9bb94a --- /dev/null +++ b/src/mas/devops/templates/pipelinerun-backup.yml.j2 @@ -0,0 +1,157 @@ +--- +apiVersion: tekton.dev/v1beta1 +kind: PipelineRun +metadata: + name: "{{ mas_instance_id }}-backup-{{ backup_version }}-{{ timestamp }}" + labels: + tekton.dev/pipeline: mas-backup +spec: + pipelineRef: + name: mas-backup + serviceAccountName: "{{ service_account_name | default('pipeline', True) }}" + timeouts: + pipeline: "0" + workspaces: + - name: shared-backups + persistentVolumeClaim: + claimName: backup-pvc + params: + # Common Parameters + - name: image_pull_policy + value: IfNotPresent + - name: mas_instance_id + value: "{{ mas_instance_id }}" + + {% if skip_pre_check is defined and skip_pre_check != "" %} + # Pipeline config + - name: skip_pre_check + value: "{{ skip_pre_check }}" + {% endif %} + + # Backup Configuration + - name: backup_version + value: "{{ backup_version }}" + + # Component Flags + {% if include_sls is defined and include_sls != "" %} + - name: include_sls + value: "{{ include_sls }}" + {% endif %} + + # MongoDB Configuration + {% if mongodb_namespace is defined and mongodb_namespace != "" %} + - name: mongodb_namespace + value: "{{ mongodb_namespace }}" + {% endif %} + {% if mongodb_instance_name is defined and mongodb_instance_name != "" %} + - name: mongodb_instance_name + value: "{{ mongodb_instance_name }}" + {% endif %} + {% if mongodb_provider is defined and mongodb_provider != "" %} + - name: mongodb_provider + value: "{{ mongodb_provider }}" + {% endif %} + + # SLS Configuration + {% if sls_namespace is defined and sls_namespace != "" %} + - name: sls_namespace + value: "{{ sls_namespace }}" + {% endif %} + + # Certificate Manager Configuration + {% if cert_manager_provider is defined and cert_manager_provider != "" %} + - name: cert_manager_provider + value: "{{ cert_manager_provider }}" + {% endif %} + + # Development Build Support + {% if artifactory_username is defined and artifactory_username != "" %} + - name: artifactory_username + value: "{{ artifactory_username }}" + {% endif %} + {% if artifactory_token is defined and artifactory_token != "" %} + - name: artifactory_token + value: "{{ artifactory_token }}" + {% endif %} + + # Upload Configuration + {% if upload_backup is defined and upload_backup != "" %} + - name: upload_backup + value: "{{ upload_backup }}" + {% endif %} + {% if aws_access_key_id is defined and aws_access_key_id != "" %} + - name: aws_access_key_id + value: "{{ aws_access_key_id }}" + {% endif %} + {% if aws_secret_access_key is defined and aws_secret_access_key != "" %} + - name: aws_secret_access_key + value: "{{ aws_secret_access_key }}" + {% endif %} + {% if s3_bucket_name is defined and s3_bucket_name != "" %} + - name: s3_bucket_name + value: "{{ s3_bucket_name }}" + {% endif %} + {% if s3_region is defined and s3_region != "" %} + - name: s3_region + value: "{{ s3_region }}" + {% endif %} + {% if s3_endpoint_url is defined and s3_endpoint_url != "" %} + - name: s3_endpoint_url + value: "{{ s3_endpoint_url }}" + {% endif %} + {% if artifactory_url is defined and artifactory_url != "" %} + - name: artifactory_url + value: "{{ artifactory_url }}" + {% endif %} + {% if artifactory_repository is defined and artifactory_repository != "" %} + - name: artifactory_repository + value: "{{ artifactory_repository }}" + {% endif %} + + # Manage Application Backup Configuration + {% if backup_manage_app is defined and backup_manage_app != "" %} + - name: backup_manage_app + value: "{{ backup_manage_app }}" + {% endif %} + {% if backup_manage_db is defined and backup_manage_db != "" %} + - name: backup_manage_db + value: "{{ backup_manage_db }}" + {% endif %} + {% if manage_workspace_id is defined and manage_workspace_id != "" %} + - name: manage_workspace_id + value: "{{ manage_workspace_id }}" + {% endif %} + + # Manage Db2 Backup Configuration + {% if manage_db2_namespace is defined and manage_db2_namespace != "" %} + - name: manage_db2_namespace + value: "{{ manage_db2_namespace }}" + {% endif %} + {% if manage_db2_instance_name is defined and manage_db2_instance_name != "" %} + - name: manage_db2_instance_name + value: "{{ manage_db2_instance_name }}" + {% endif %} + {% if manage_db2_backup_type is defined and manage_db2_backup_type != "" %} + - name: manage_db2_backup_type + value: "{{ manage_db2_backup_type }}" + {% endif %} + {% if manage_db2_backup_vendor is defined and manage_db2_backup_vendor != "" %} + - name: manage_db2_backup_vendor + value: "{{ manage_db2_backup_vendor }}" + {% endif %} + {% if backup_s3_endpoint is defined and backup_s3_endpoint != "" %} + - name: backup_s3_endpoint + value: "{{ backup_s3_endpoint }}" + {% endif %} + {% if backup_s3_bucket is defined and backup_s3_bucket != "" %} + - name: backup_s3_bucket + value: "{{ backup_s3_bucket }}" + {% endif %} + {% if backup_s3_access_key is defined and backup_s3_access_key != "" %} + - name: backup_s3_access_key + value: "{{ backup_s3_access_key }}" + {% endif %} + {% if backup_s3_secret_key is defined and backup_s3_secret_key != "" %} + - name: backup_s3_secret_key + value: "{{ backup_s3_secret_key }}" + {% endif %} \ No newline at end of file diff --git a/src/mas/devops/templates/pipelinerun-restore.yml.j2 b/src/mas/devops/templates/pipelinerun-restore.yml.j2 new file mode 100644 index 00000000..5a17786f --- /dev/null +++ b/src/mas/devops/templates/pipelinerun-restore.yml.j2 @@ -0,0 +1,230 @@ +--- +apiVersion: tekton.dev/v1beta1 +kind: PipelineRun +metadata: + name: "{{ mas_instance_id }}-restore-{{ restore_version }}-{{ timestamp }}" + labels: + tekton.dev/pipeline: mas-restore +spec: + pipelineRef: + name: mas-restore + serviceAccountName: "{{ service_account_name | default('pipeline', True) }}" + timeouts: + pipeline: "0" + workspaces: + - name: shared-backups + persistentVolumeClaim: + claimName: backup-pvc + - name: restore-configurations + secret: + secretName: pipeline-restore-configs + params: + # Common Parameters + - name: image_pull_policy + value: IfNotPresent + - name: mas_instance_id + value: "{{ mas_instance_id }}" + + {% if skip_pre_check is defined and skip_pre_check != "" %} + # Pipeline config + - name: skip_pre_check + value: "{{ skip_pre_check }}" + {% endif %} + + # Restore Configuration + - name: restore_version + value: "{{ restore_version }}" + + # Component Flags + {% if include_sls is defined and include_sls != "" %} + - name: include_sls + value: "{{ include_sls }}" + {% endif %} + {% if include_dro is defined and include_dro != "" %} + - name: include_dro + value: "{{ include_dro }}" + {% endif %} + {% if include_grafana is defined and include_grafana != "" %} + - name: include_grafana + value: "{{ include_grafana }}" + {% endif %} + + # MongoDB Configuration + {% if mongodb_namespace is defined and mongodb_namespace != "" %} + - name: mongodb_namespace + value: "{{ mongodb_namespace }}" + {% endif %} + {% if mongodb_instance_name is defined and mongodb_instance_name != "" %} + - name: mongodb_instance_name + value: "{{ mongodb_instance_name }}" + {% endif %} + {% if mongodb_provider is defined and mongodb_provider != "" %} + - name: mongodb_provider + value: "{{ mongodb_provider }}" + {% endif %} + {% if override_mongodb_storageclass is defined and override_mongodb_storageclass != "" %} + - name: override_mongodb_storageclass + value: "{{ override_mongodb_storageclass }}" + {% endif %} + + # SLS Configuration + {% if sls_namespace is defined and sls_namespace != "" %} + - name: sls_namespace + value: "{{ sls_namespace }}" + {% endif %} + {% if sls_domain is defined and sls_domain != "" %} + - name: sls_domain + value: "{{ sls_domain }}" + {% endif %} + + # DRO Configuration + {% if dro_contact_email is defined and dro_contact_email != "" %} + - name: dro_contact_email + value: "{{ dro_contact_email }}" + {% endif %} + {% if dro_contact_firstname is defined and dro_contact_firstname != "" %} + - name: dro_contact_firstname + value: "{{ dro_contact_firstname }}" + {% endif %} + {% if dro_contact_lastname is defined and dro_contact_lastname != "" %} + - name: dro_contact_lastname + value: "{{ dro_contact_lastname }}" + {% endif %} + {% if ibm_entitlement_key is defined and ibm_entitlement_key != "" %} + - name: ibm_entitlement_key + value: "{{ ibm_entitlement_key }}" + {% endif %} + {% if dro_namespace is defined and dro_namespace != "" %} + - name: dro_namespace + value: "{{ dro_namespace }}" + {% endif %} + {% if dro_storage_class is defined and dro_storage_class != "" %} + - name: dro_storage_class + value: "{{ dro_storage_class }}" + {% endif %} + + # Suite Restore Configuration + {% if include_slscfg_from_backup is defined and include_slscfg_from_backup != "" %} + - name: include_slscfg_from_backup + value: "{{ include_slscfg_from_backup }}" + {% endif %} + {% if sls_url_on_restore is defined and sls_url_on_restore != "" %} + - name: sls_url_on_restore + value: "{{ sls_url_on_restore }}" + {% endif %} + {% if sls_cfg_file is defined and sls_cfg_file != "" %} + - name: sls_cfg_file + value: "{{ sls_cfg_file }}" + {% endif %} + {% if include_drocfg_from_backup is defined and include_drocfg_from_backup != "" %} + - name: include_drocfg_from_backup + value: "{{ include_drocfg_from_backup }}" + {% endif %} + {% if dro_url_on_restore is defined and dro_url_on_restore != "" %} + - name: dro_url_on_restore + value: "{{ dro_url_on_restore }}" + {% endif %} + {% if dro_cfg_file is defined and dro_cfg_file != "" %} + - name: dro_cfg_file + value: "{{ dro_cfg_file }}" + {% endif %} + {% if mas_domain_on_restore is defined and mas_domain_on_restore != "" %} + - name: mas_domain_on_restore + value: "{{ mas_domain_on_restore }}" + {% endif %} + + # Certificate Manager Configuration + {% if cert_manager_provider is defined and cert_manager_provider != "" %} + - name: cert_manager_provider + value: "{{ cert_manager_provider }}" + {% endif %} + + # Development Build Support + {% if artifactory_username is defined and artifactory_username != "" %} + - name: artifactory_username + value: "{{ artifactory_username }}" + {% endif %} + {% if artifactory_token is defined and artifactory_token != "" %} + - name: artifactory_token + value: "{{ artifactory_token }}" + {% endif %} + + # Download Configuration + {% if backup_archive_name is defined and backup_archive_name != "" %} + - name: backup_archive_name + value: "{{ backup_archive_name }}" + {% endif %} + {% if download_backup is defined and download_backup != "" %} + - name: download_backup + value: "{{ download_backup }}" + {% endif %} + {% if aws_access_key_id is defined and aws_access_key_id != "" %} + - name: aws_access_key_id + value: "{{ aws_access_key_id }}" + {% endif %} + {% if aws_secret_access_key is defined and aws_secret_access_key != "" %} + - name: aws_secret_access_key + value: "{{ aws_secret_access_key }}" + {% endif %} + {% if s3_bucket_name is defined and s3_bucket_name != "" %} + - name: s3_bucket_name + value: "{{ s3_bucket_name }}" + {% endif %} + {% if s3_region is defined and s3_region != "" %} + - name: s3_region + value: "{{ s3_region }}" + {% endif %} + {% if s3_endpoint_url is defined and s3_endpoint_url != "" %} + - name: s3_endpoint_url + value: "{{ s3_endpoint_url }}" + {% endif %} + {% if artifactory_url is defined and artifactory_url != "" %} + - name: artifactory_url + value: "{{ artifactory_url }}" + {% endif %} + {% if artifactory_repository is defined and artifactory_repository != "" %} + - name: artifactory_repository + value: "{{ artifactory_repository }}" + {% endif %} + + # Manage Application Restore Configuration + {% if restore_manage_app is defined and restore_manage_app != "" %} + - name: restore_manage_app + value: "{{ restore_manage_app }}" + {% endif %} + {% if restore_manage_db is defined and restore_manage_db != "" %} + - name: restore_manage_db + value: "{{ restore_manage_db }}" + {% endif %} + # Manage App Storage class + {% if manage_app_override_storageclass is defined and manage_app_override_storageclass != "" %} + - name: manage_app_override_storageclass + value: "{{ manage_app_override_storageclass }}" + {% endif %} + {% if manage_app_storage_class_rwx is defined and manage_app_storage_class_rwx != "" %} + - name: manage_app_storage_class_rwx + value: "{{ manage_app_storage_class_rwx }}" + {% endif %} + {% if manage_app_storage_class_rwo is defined and manage_app_storage_class_rwo != "" %} + - name: manage_app_storage_class_rwo + value: "{{ manage_app_storage_class_rwo }}" + {% endif %} + + # Manage Db2 Restore Configuration + {% if manage_db2_restore_vendor is defined and manage_db2_restore_vendor != "" %} + - name: manage_db2_restore_vendor + value: "{{ manage_db2_restore_vendor }}" + {% endif %} + + {% if manage_db_override_storageclass is defined and manage_db_override_storageclass != "" %} + - name: manage_db_override_storageclass + value: "{{ manage_db_override_storageclass }}" + {% endif %} + {% if manage_db_storage_class_rwo is defined and manage_db_storage_class_rwo != "" %} + - name: manage_db_storage_class_rwo + value: "{{ manage_db_storage_class_rwo }}" + {% endif %} + {% if manage_db_storage_class_rwx is defined and manage_db_storage_class_rwx != "" %} + - name: manage_db_storage_class_rwx + value: "{{ manage_db_storage_class_rwx }}" + {% endif %} diff --git a/src/mas/devops/templates/pipelines-backup-pvc.yml.j2 b/src/mas/devops/templates/pipelines-backup-pvc.yml.j2 new file mode 100644 index 00000000..655b9b4d --- /dev/null +++ b/src/mas/devops/templates/pipelines-backup-pvc.yml.j2 @@ -0,0 +1,15 @@ +--- +# PVC for backup storage +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: backup-pvc + namespace: mas-{{ mas_instance_id }}-pipelines +spec: + accessModes: + - {{ pipeline_storage_accessmode }} + volumeMode: Filesystem + storageClassName: {{ pipeline_storage_class }} + resources: + requests: + storage: {{ backup_storage_size }} \ No newline at end of file diff --git a/test/src/test_backup.py b/test/src/test_backup.py new file mode 100644 index 00000000..5ae37452 --- /dev/null +++ b/test/src/test_backup.py @@ -0,0 +1,900 @@ +# ***************************************************************************** +# Copyright (c) 2026 IBM Corporation and other Contributors. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Eclipse Public License v1.0 +# which accompanies this distribution, and is available at +# http://www.eclipse.org/legal/epl-v10.html +# +# ***************************************************************************** + +import yaml +from unittest.mock import MagicMock, Mock +from openshift.dynamic.exceptions import NotFoundError + +from mas.devops.backup import createBackupDirectories, copyContentsToYamlFile, filterResourceData, backupResources, extract_secrets_from_dict + + +class TestCreateBackupDirectories: + """Tests for createBackupDirectories function""" + + def test_create_single_directory(self, tmp_path): + """Test creating a single backup directory""" + test_dir = tmp_path / "backup1" + result = createBackupDirectories([str(test_dir)]) + + assert result is True + assert test_dir.exists() + assert test_dir.is_dir() + + def test_create_multiple_directories(self, tmp_path): + """Test creating multiple backup directories""" + test_dirs = [ + tmp_path / "backup1", + tmp_path / "backup2", + tmp_path / "backup3" + ] + paths = [str(d) for d in test_dirs] + result = createBackupDirectories(paths) + + assert result is True + for test_dir in test_dirs: + assert test_dir.exists() + assert test_dir.is_dir() + + def test_create_nested_directories(self, tmp_path): + """Test creating nested backup directories""" + nested_dir = tmp_path / "level1" / "level2" / "level3" + result = createBackupDirectories([str(nested_dir)]) + + assert result is True + assert nested_dir.exists() + assert nested_dir.is_dir() + + def test_create_existing_directory(self, tmp_path): + """Test creating a directory that already exists""" + test_dir = tmp_path / "existing" + test_dir.mkdir() + + result = createBackupDirectories([str(test_dir)]) + + assert result is True + assert test_dir.exists() + + def test_create_empty_list(self): + """Test with empty list of paths""" + result = createBackupDirectories([]) + assert result is True + + def test_create_directory_permission_error(self, mocker): + """Test handling of permission errors""" + mock_makedirs = mocker.patch('os.makedirs', side_effect=PermissionError("Permission denied")) + + result = createBackupDirectories(["/invalid/path"]) + + assert result is False + mock_makedirs.assert_called_once() + + def test_create_directory_os_error(self, mocker): + """Test handling of OS errors""" + mocker.patch('os.makedirs', side_effect=OSError("OS error")) + + result = createBackupDirectories(["/some/path"]) + + assert result is False + + +class TestCopyContentsToYamlFile: + """Tests for copyContentsToYamlFile function""" + + def test_write_simple_dict(self, tmp_path): + """Test writing a simple dictionary to YAML file""" + test_file = tmp_path / "test.yaml" + content = {"key1": "value1", "key2": "value2"} + + result = copyContentsToYamlFile(str(test_file), content) + + assert result is True + assert test_file.exists() + + with open(test_file, 'r') as f: + loaded_content = yaml.safe_load(f) + assert loaded_content == content + + def test_write_nested_dict(self, tmp_path): + """Test writing a nested dictionary to YAML file""" + test_file = tmp_path / "nested.yaml" + content = { + "level1": { + "level2": { + "level3": "value" + } + }, + "list": [1, 2, 3] + } + + result = copyContentsToYamlFile(str(test_file), content) + + assert result is True + with open(test_file, 'r') as f: + loaded_content = yaml.safe_load(f) + assert loaded_content == content + + def test_write_empty_dict(self, tmp_path): + """Test writing an empty dictionary""" + test_file = tmp_path / "empty.yaml" + content = {} + + result = copyContentsToYamlFile(str(test_file), content) + + assert result is True + with open(test_file, 'r') as f: + loaded_content = yaml.safe_load(f) + assert loaded_content == content + + def test_overwrite_existing_file(self, tmp_path): + """Test overwriting an existing YAML file""" + test_file = tmp_path / "overwrite.yaml" + old_content = {"old": "data"} + new_content = {"new": "data"} + + # Write initial content + with open(test_file, 'w') as f: + yaml.dump(old_content, f) + + # Overwrite with new content + result = copyContentsToYamlFile(str(test_file), new_content) + + assert result is True + with open(test_file, 'r') as f: + loaded_content = yaml.safe_load(f) + assert loaded_content == new_content + assert loaded_content != old_content + + def test_write_to_nonexistent_directory(self, tmp_path): + """Test writing to a file in a non-existent directory""" + test_file = tmp_path / "nonexistent" / "test.yaml" + content = {"key": "value"} + + result = copyContentsToYamlFile(str(test_file), content) + + assert result is False + + def test_write_permission_error(self, mocker): + """Test handling of permission errors during write""" + mocker.patch('builtins.open', side_effect=PermissionError("Permission denied")) + + result = copyContentsToYamlFile("/invalid/path.yaml", {"key": "value"}) + + assert result is False + + def test_write_with_special_characters(self, tmp_path): + """Test writing content with special characters""" + test_file = tmp_path / "special.yaml" + content = { + "special": "value with\nnewlines", + "unicode": "café ☕", + "quotes": "value with 'quotes' and \"double quotes\"" + } + + result = copyContentsToYamlFile(str(test_file), content) + + assert result is True + with open(test_file, 'r') as f: + loaded_content = yaml.safe_load(f) + assert loaded_content == content + + +class TestFilterResourceData: + """Tests for filterResourceData function""" + + def test_filter_all_metadata_fields(self): + """Test filtering all metadata fields that should be removed""" + data = { + "apiVersion": "v1", + "kind": "Resource", + "metadata": { + "name": "test-resource", + "namespace": "test-namespace", + "annotations": {"key": "value"}, + "creationTimestamp": "2026-01-01T00:00:00Z", + "generation": 1, + "resourceVersion": "12345", + "selfLink": "/api/v1/namespaces/test/resources/test-resource", + "uid": "abc-123-def", + "managedFields": [{"manager": "test"}] + }, + "spec": {"replicas": 3} + } + + result = filterResourceData(data) + + assert "name" in result["metadata"] + assert "namespace" in result["metadata"] + assert "annotations" not in result["metadata"] + assert "creationTimestamp" not in result["metadata"] + assert "generation" not in result["metadata"] + assert "resourceVersion" not in result["metadata"] + assert "selfLink" not in result["metadata"] + assert "uid" not in result["metadata"] + assert "managedFields" not in result["metadata"] + assert "spec" in result + + def test_filter_status_field(self): + """Test that status field is removed""" + data = { + "metadata": {"name": "test"}, + "spec": {"replicas": 3}, + "status": { + "phase": "Running", + "conditions": [] + } + } + + result = filterResourceData(data) + + assert "status" not in result + assert "spec" in result + assert "metadata" in result + + def test_filter_partial_metadata(self): + """Test filtering when only some metadata fields are present""" + data = { + "metadata": { + "name": "test-resource", + "uid": "abc-123", + "labels": {"app": "test"} + } + } + + result = filterResourceData(data) + + assert "name" in result["metadata"] + assert "labels" in result["metadata"] + assert "uid" not in result["metadata"] + + def test_filter_no_metadata(self): + """Test filtering when metadata field is not present""" + data = { + "apiVersion": "v1", + "kind": "Resource", + "spec": {"replicas": 3} + } + + result = filterResourceData(data) + + assert "metadata" not in result + assert "spec" in result + assert "apiVersion" in result + + def test_filter_empty_metadata(self): + """Test filtering with empty metadata""" + data = { + "metadata": {}, + "spec": {"replicas": 3} + } + + result = filterResourceData(data) + + assert "metadata" in result + assert result["metadata"] == {} + + def test_filter_preserves_other_fields(self): + """Test that other fields are preserved""" + data = { + "apiVersion": "v1", + "kind": "ConfigMap", + "metadata": { + "name": "test-config", + "uid": "should-be-removed" + }, + "data": { + "key1": "value1", + "key2": "value2" + } + } + + result = filterResourceData(data) + + assert result["apiVersion"] == "v1" + assert result["kind"] == "ConfigMap" + assert result["data"] == {"key1": "value1", "key2": "value2"} + assert "uid" not in result["metadata"] + + def test_filter_shallow_copy_behavior(self): + """Test that filterResourceData uses shallow copy (modifies nested dicts)""" + data = { + "metadata": { + "name": "test", + "uid": "abc-123" + }, + "status": {"phase": "Running"} + } + + result = filterResourceData(data) + + # Due to shallow copy, nested metadata dict is modified in original + # but top-level status is not (it's deleted from copy only) + assert "uid" not in data["metadata"] # Modified due to shallow copy + assert "status" in data # Not modified (top-level key) + + # Result should not have uid and status + assert "uid" not in result["metadata"] + assert "status" not in result + + def test_filter_complex_resource(self): + """Test filtering a complex Kubernetes resource""" + data = { + "apiVersion": "apps/v1", + "kind": "Deployment", + "metadata": { + "name": "my-deployment", + "namespace": "default", + "labels": {"app": "myapp"}, + "annotations": {"deployment.kubernetes.io/revision": "1"}, + "creationTimestamp": "2026-01-01T00:00:00Z", + "generation": 5, + "resourceVersion": "98765", + "uid": "xyz-789", + "managedFields": [{"manager": "kubectl"}] + }, + "spec": { + "replicas": 3, + "selector": {"matchLabels": {"app": "myapp"}} + }, + "status": { + "availableReplicas": 3, + "readyReplicas": 3 + } + } + + result = filterResourceData(data) + + # Check preserved fields + assert result["apiVersion"] == "apps/v1" + assert result["kind"] == "Deployment" + assert result["metadata"]["name"] == "my-deployment" + assert result["metadata"]["namespace"] == "default" + assert result["metadata"]["labels"] == {"app": "myapp"} + assert result["spec"]["replicas"] == 3 + + # Check removed fields + assert "annotations" not in result["metadata"] + assert "creationTimestamp" not in result["metadata"] + assert "generation" not in result["metadata"] + assert "resourceVersion" not in result["metadata"] + assert "uid" not in result["metadata"] + assert "managedFields" not in result["metadata"] + assert "status" not in result + + def test_filter_empty_dict(self): + """Test filtering an empty dictionary""" + data = {} + result = filterResourceData(data) + assert result == {} + + +class TestExtractSecretsFromDict: + """Tests for extract_secrets_from_dict function""" + + def test_extract_single_secret(self): + """Test extracting a single secret name""" + data = { + "spec": { + "secretName": "my-secret" + } + } + result = extract_secrets_from_dict(data) + assert result == {"my-secret"} + + def test_extract_multiple_secrets(self): + """Test extracting multiple secret names""" + data = { + "spec": { + "database": { + "secretName": "db-secret" + }, + "auth": { + "secretName": "auth-secret" + } + } + } + result = extract_secrets_from_dict(data) + assert result == {"db-secret", "auth-secret"} + + def test_extract_secrets_from_list(self): + """Test extracting secrets from list structures""" + data = { + "spec": { + "volumes": [ + {"secretName": "secret1"}, + {"secretName": "secret2"}, + {"configMap": "not-a-secret"} + ] + } + } + result = extract_secrets_from_dict(data) + assert result == {"secret1", "secret2"} + + def test_extract_nested_secrets(self): + """Test extracting deeply nested secrets""" + data = { + "level1": { + "level2": { + "level3": { + "secretName": "deep-secret" + } + } + } + } + result = extract_secrets_from_dict(data) + assert result == {"deep-secret"} + + def test_no_secrets_found(self): + """Test when no secrets are present""" + data = { + "spec": { + "replicas": 3, + "image": "myapp:latest" + } + } + result = extract_secrets_from_dict(data) + assert result == set() + + def test_empty_dict(self): + """Test with empty dictionary""" + result = extract_secrets_from_dict({}) + assert result == set() + + def test_ignore_empty_secret_name(self): + """Test that empty string secret names are ignored""" + data = { + "spec": { + "secretName": "", + "other": { + "secretName": "valid-secret" + } + } + } + result = extract_secrets_from_dict(data) + assert result == {"valid-secret"} + + def test_ignore_non_string_secret_name(self): + """Test that non-string secret names are ignored""" + data = { + "spec": { + "secretName": 123, + "other": { + "secretName": "valid-secret" + } + } + } + result = extract_secrets_from_dict(data) + assert result == {"valid-secret"} + + def test_duplicate_secrets(self): + """Test that duplicate secret names are deduplicated""" + data = { + "spec": { + "volume1": {"secretName": "shared-secret"}, + "volume2": {"secretName": "shared-secret"}, + "volume3": {"secretName": "unique-secret"} + } + } + result = extract_secrets_from_dict(data) + assert result == {"shared-secret", "unique-secret"} + + +class TestBackupResources: + """Tests for backupResources function""" + + def test_backup_single_namespaced_resource(self, tmp_path, mocker): + """Test backing up a single namespaced resource by name""" + backup_path = str(tmp_path / "backup") + + # Mock resource data + mock_resource = { + "metadata": { + "name": "test-resource", + "namespace": "test-ns", + "uid": "abc-123" + }, + "spec": {"replicas": 3} + } + + # Create mock resource object with to_dict method + mock_resource_obj = MagicMock() + mock_resource_obj.__getitem__ = lambda self, key: mock_resource[key] + mock_resource_obj.to_dict.return_value = mock_resource + + # Mock the dynamic client + mock_client = MagicMock() + mock_api = MagicMock() + mock_api.get.return_value = mock_resource_obj + mock_client.resources.get.return_value = mock_api + + # Mock the helper functions + mocker.patch('mas.devops.backup.copyContentsToYamlFile', return_value=True) + + result = backupResources( + mock_client, + kind="ConfigMap", + api_version="v1", + backup_path=backup_path, + namespace="test-ns", + name="test-resource" + ) + + backed_up, not_found, failed, secrets = result + assert backed_up == 1 + assert not_found == 0 + assert failed == 0 + assert secrets == set() + + def test_backup_multiple_namespaced_resources(self, tmp_path, mocker): + """Test backing up all resources of a kind in a namespace""" + backup_path = str(tmp_path / "backup") + + # Mock multiple resources + mock_resources = [ + { + "metadata": {"name": "resource1", "namespace": "test-ns"}, + "spec": {"data": "value1"} + }, + { + "metadata": {"name": "resource2", "namespace": "test-ns"}, + "spec": {"data": "value2"} + } + ] + + # Create mock resource objects + mock_resource_objs = [] + for res in mock_resources: + mock_obj = MagicMock() + mock_obj.__getitem__ = lambda self, key, r=res: r[key] + mock_obj.to_dict.return_value = res + mock_resource_objs.append(mock_obj) + + # Mock the response with items + mock_response = MagicMock() + mock_response.items = mock_resource_objs + + # Mock the dynamic client + mock_client = MagicMock() + mock_api = MagicMock() + mock_api.get.return_value = mock_response + mock_client.resources.get.return_value = mock_api + + mocker.patch('mas.devops.backup.copyContentsToYamlFile', return_value=True) + + result = backupResources( + mock_client, + kind="ConfigMap", + api_version="v1", + backup_path=backup_path, + namespace="test-ns" + ) + + backed_up, not_found, failed, secrets = result + assert backed_up == 2 + assert not_found == 0 + assert failed == 0 + + def test_backup_cluster_level_resource(self, tmp_path, mocker): + """Test backing up cluster-level resources (no namespace)""" + backup_path = str(tmp_path / "backup") + + mock_resource = { + "metadata": {"name": "cluster-role"}, + "rules": [{"apiGroups": ["*"], "resources": ["*"], "verbs": ["*"]}] + } + + mock_resource_obj = MagicMock() + mock_resource_obj.__getitem__ = lambda self, key: mock_resource[key] + mock_resource_obj.to_dict.return_value = mock_resource + + mock_client = MagicMock() + mock_api = MagicMock() + mock_api.get.return_value = mock_resource_obj + mock_client.resources.get.return_value = mock_api + + mocker.patch('mas.devops.backup.copyContentsToYamlFile', return_value=True) + + result = backupResources( + mock_client, + kind="ClusterRole", + api_version="rbac.authorization.k8s.io/v1", + backup_path=backup_path, + name="cluster-role" + ) + + backed_up, not_found, failed, secrets = result + assert backed_up == 1 + assert not_found == 0 + assert failed == 0 + + def test_backup_with_label_selector(self, tmp_path, mocker): + """Test backing up resources with label selectors""" + backup_path = str(tmp_path / "backup") + + mock_resource = { + "metadata": { + "name": "labeled-resource", + "namespace": "test-ns", + "labels": {"app": "myapp", "env": "prod"} + }, + "spec": {} + } + + mock_resource_obj = MagicMock() + mock_resource_obj.__getitem__ = lambda self, key: mock_resource[key] + mock_resource_obj.to_dict.return_value = mock_resource + + mock_response = MagicMock() + mock_response.items = [mock_resource_obj] + + mock_client = MagicMock() + mock_api = MagicMock() + mock_api.get.return_value = mock_response + mock_client.resources.get.return_value = mock_api + + mocker.patch('mas.devops.backup.copyContentsToYamlFile', return_value=True) + + result = backupResources( + mock_client, + kind="ConfigMap", + api_version="v1", + backup_path=backup_path, + namespace="test-ns", + labels=["app=myapp", "env=prod"] + ) + + backed_up, not_found, failed, secrets = result + assert backed_up == 1 + assert not_found == 0 + assert failed == 0 + + # Verify label selector was passed correctly + mock_api.get.assert_called_once_with(namespace="test-ns", label_selector="app=myapp,env=prod") + + def test_backup_resource_not_found_by_name(self, mocker): + """Test handling when a specific named resource is not found""" + mock_client = MagicMock() + mock_api = MagicMock() + mock_api.get.side_effect = NotFoundError(Mock()) + mock_client.resources.get.return_value = mock_api + + result = backupResources( + mock_client, + kind="ConfigMap", + api_version="v1", + backup_path="/tmp/backup", + namespace="test-ns", + name="nonexistent" + ) + + backed_up, not_found, failed, secrets = result + assert backed_up == 0 + assert not_found == 1 + assert failed == 0 + assert secrets == set() + + def test_backup_no_resources_found(self, mocker): + """Test when no resources of the kind exist""" + mock_response = MagicMock() + mock_response.items = [] + + mock_client = MagicMock() + mock_api = MagicMock() + mock_api.get.return_value = mock_response + mock_client.resources.get.return_value = mock_api + + result = backupResources( + mock_client, + kind="ConfigMap", + api_version="v1", + backup_path="/tmp/backup", + namespace="test-ns" + ) + + backed_up, not_found, failed, secrets = result + assert backed_up == 0 + assert not_found == 0 + assert failed == 0 + + def test_backup_discovers_secrets(self, tmp_path, mocker): + """Test that secrets are discovered from resource specs""" + backup_path = str(tmp_path / "backup") + + mock_resource = { + "metadata": {"name": "app-deployment", "namespace": "test-ns"}, + "spec": { + "template": { + "spec": { + "volumes": [ + {"secretName": "db-credentials"}, + {"secretName": "api-key"} + ] + } + } + } + } + + mock_resource_obj = MagicMock() + mock_resource_obj.__getitem__ = lambda self, key: mock_resource[key] + mock_resource_obj.to_dict.return_value = mock_resource + + mock_client = MagicMock() + mock_api = MagicMock() + mock_api.get.return_value = mock_resource_obj + mock_client.resources.get.return_value = mock_api + + mocker.patch('mas.devops.backup.copyContentsToYamlFile', return_value=True) + + result = backupResources( + mock_client, + kind="Deployment", + api_version="apps/v1", + backup_path=backup_path, + namespace="test-ns", + name="app-deployment" + ) + + backed_up, not_found, failed, secrets = result + assert backed_up == 1 + assert secrets == {"db-credentials", "api-key"} + + def test_backup_secret_does_not_discover_itself(self, tmp_path, mocker): + """Test that backing up Secrets doesn't try to discover secrets""" + backup_path = str(tmp_path / "backup") + + mock_resource = { + "metadata": {"name": "my-secret", "namespace": "test-ns"}, + "data": {"password": "encoded-value"} + } + + mock_resource_obj = MagicMock() + mock_resource_obj.__getitem__ = lambda self, key: mock_resource[key] + mock_resource_obj.to_dict.return_value = mock_resource + + mock_client = MagicMock() + mock_api = MagicMock() + mock_api.get.return_value = mock_resource_obj + mock_client.resources.get.return_value = mock_api + + mocker.patch('mas.devops.backup.copyContentsToYamlFile', return_value=True) + + result = backupResources( + mock_client, + kind="Secret", + api_version="v1", + backup_path=backup_path, + namespace="test-ns", + name="my-secret" + ) + + backed_up, not_found, failed, secrets = result + assert backed_up == 1 + assert secrets == set() # Should not discover secrets from Secret resources + + def test_backup_write_failure(self, tmp_path, mocker): + """Test handling when writing backup file fails""" + backup_path = str(tmp_path / "backup") + + mock_resource = { + "metadata": {"name": "test-resource", "namespace": "test-ns"}, + "spec": {} + } + + mock_resource_obj = MagicMock() + mock_resource_obj.__getitem__ = lambda self, key: mock_resource[key] + mock_resource_obj.to_dict.return_value = mock_resource + + mock_client = MagicMock() + mock_api = MagicMock() + mock_api.get.return_value = mock_resource_obj + mock_client.resources.get.return_value = mock_api + + # Mock copyContentsToYamlFile to fail + mocker.patch('mas.devops.backup.copyContentsToYamlFile', return_value=False) + + result = backupResources( + mock_client, + kind="ConfigMap", + api_version="v1", + backup_path=backup_path, + namespace="test-ns", + name="test-resource" + ) + + backed_up, not_found, failed, secrets = result + assert backed_up == 0 + assert not_found == 0 + assert failed == 1 + + def test_backup_api_exception(self, mocker): + """Test handling of general API exceptions""" + mock_client = MagicMock() + mock_client.resources.get.side_effect = Exception("API error") + + result = backupResources( + mock_client, + kind="ConfigMap", + api_version="v1", + backup_path="/tmp/backup", + namespace="test-ns" + ) + + backed_up, not_found, failed, secrets = result + assert backed_up == 0 + assert not_found == 0 + assert failed == 1 + + def test_backup_mixed_success_and_failure(self, tmp_path, mocker): + """Test backing up multiple resources with mixed success/failure""" + backup_path = str(tmp_path / "backup") + + mock_resources = [ + { + "metadata": {"name": "resource1", "namespace": "test-ns"}, + "spec": {} + }, + { + "metadata": {"name": "resource2", "namespace": "test-ns"}, + "spec": {} + }, + { + "metadata": {"name": "resource3", "namespace": "test-ns"}, + "spec": {} + } + ] + + mock_resource_objs = [] + for res in mock_resources: + mock_obj = MagicMock() + mock_obj.__getitem__ = lambda self, key, r=res: r[key] + mock_obj.to_dict.return_value = res + mock_resource_objs.append(mock_obj) + + mock_response = MagicMock() + mock_response.items = mock_resource_objs + + mock_client = MagicMock() + mock_api = MagicMock() + mock_api.get.return_value = mock_response + mock_client.resources.get.return_value = mock_api + + # Mock copyContentsToYamlFile to succeed for first two, fail for third + mock_copy = mocker.patch('mas.devops.backup.copyContentsToYamlFile') + mock_copy.side_effect = [True, True, False] + + result = backupResources( + mock_client, + kind="ConfigMap", + api_version="v1", + backup_path=backup_path, + namespace="test-ns" + ) + + backed_up, not_found, failed, secrets = result + assert backed_up == 2 + assert not_found == 0 + assert failed == 1 + + def test_backup_resource_kind_not_found(self, mocker): + """Test when the resource kind itself is not found in the API""" + mock_client = MagicMock() + mock_client.resources.get.side_effect = NotFoundError(Mock()) + + result = backupResources( + mock_client, + kind="NonExistentKind", + api_version="v1", + backup_path="/tmp/backup", + namespace="test-ns" + ) + + backed_up, not_found, failed, secrets = result + assert backed_up == 0 + assert not_found == 0 + assert failed == 0 diff --git a/test/src/test_mas.py b/test/src/test_mas.py index 10583926..3902a1e7 100644 --- a/test/src/test_mas.py +++ b/test/src/test_mas.py @@ -64,6 +64,12 @@ def test_is_airgap_install(): assert mas.isAirgapInstall(dynClient, checkICSP=False) is False +def test_get_mas_public_cluster_issuer(): + # Test with non-existent instance - should return None + issuer = mas.getMasPublicClusterIssuer(dynClient, "doesnotexist") + assert issuer is None + + # def test_is_app_ready(): # mas.waitForAppReady(dynClient, "fvtcpd", "iot") # mas.waitForAppReady(dynClient, "fvtcpd", "iot", "masdev") diff --git a/test/src/test_restore.py b/test/src/test_restore.py new file mode 100644 index 00000000..3666b6b4 --- /dev/null +++ b/test/src/test_restore.py @@ -0,0 +1,397 @@ +# ***************************************************************************** +# Copyright (c) 2026 IBM Corporation and other Contributors. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Eclipse Public License v1.0 +# which accompanies this distribution, and is available at +# http://www.eclipse.org/legal/epl-v10.html +# +# ***************************************************************************** + +import yaml +from unittest.mock import MagicMock, Mock +from openshift.dynamic.exceptions import NotFoundError + +from mas.devops.restore import loadYamlFile, restoreResource + + +class TestLoadYamlFile: + """Tests for loadYamlFile function""" + + def test_load_valid_yaml_file(self, tmp_path): + """Test loading a valid YAML file""" + yaml_content = { + 'apiVersion': 'v1', + 'kind': 'ConfigMap', + 'metadata': { + 'name': 'test-config', + 'namespace': 'test-ns' + } + } + + yaml_file = tmp_path / "test.yaml" + with open(yaml_file, 'w') as f: + yaml.dump(yaml_content, f) + + result = loadYamlFile(str(yaml_file)) + + assert result is not None + assert result['kind'] == 'ConfigMap' + assert result['metadata']['name'] == 'test-config' + + def test_load_empty_yaml_file(self, tmp_path): + """Test loading an empty YAML file""" + yaml_file = tmp_path / "empty.yaml" + yaml_file.write_text("") + + result = loadYamlFile(str(yaml_file)) + + assert result is None + + def test_load_nonexistent_file(self): + """Test loading a non-existent file""" + result = loadYamlFile("/nonexistent/path/file.yaml") + + assert result is None + + def test_load_invalid_yaml_file(self, tmp_path): + """Test loading an invalid YAML file""" + yaml_file = tmp_path / "invalid.yaml" + yaml_file.write_text("invalid: yaml: content: [") + + result = loadYamlFile(str(yaml_file)) + + assert result is None + + def test_load_yaml_with_multiple_documents(self, tmp_path): + """Test loading YAML file with multiple documents returns None (not supported)""" + yaml_file = tmp_path / "multi.yaml" + yaml_file.write_text("---\nkey1: value1\n---\nkey2: value2") + + result = loadYamlFile(str(yaml_file)) + + # yaml.safe_load() doesn't support multiple documents, so it should return None + assert result is None + + +class TestRestoreResource: + """Tests for restoreResource function""" + + def setup_method(self): + """Set up test fixtures""" + self.mock_client = MagicMock() + self.mock_resource_api = MagicMock() + self.mock_client.resources.get.return_value = self.mock_resource_api + + def test_create_new_namespaced_resource(self): + """Test creating a new namespaced resource""" + resource_data = { + 'apiVersion': 'v1', + 'kind': 'ConfigMap', + 'metadata': { + 'name': 'test-config', + 'namespace': 'test-ns' + }, + 'data': { + 'key': 'value' + } + } + + # Resource doesn't exist + self.mock_resource_api.get.side_effect = NotFoundError(Mock()) + + success, name, status = restoreResource(self.mock_client, resource_data) + + assert success is True + assert name == 'test-config' + assert status is None + self.mock_resource_api.create.assert_called_once_with( + body=resource_data, + namespace='test-ns' + ) + + def test_create_new_cluster_resource(self): + """Test creating a new cluster-scoped resource""" + resource_data = { + 'apiVersion': 'v1', + 'kind': 'Namespace', + 'metadata': { + 'name': 'test-namespace' + } + } + + # Resource doesn't exist + self.mock_resource_api.get.side_effect = NotFoundError(Mock()) + + success, name, status = restoreResource(self.mock_client, resource_data) + + assert success is True + assert name == 'test-namespace' + assert status is None + self.mock_resource_api.create.assert_called_once_with( + body=resource_data + ) + + def test_update_existing_resource_with_replace_true(self): + """Test updating an existing resource when replace_resource is True""" + resource_data = { + 'apiVersion': 'v1', + 'kind': 'ConfigMap', + 'metadata': { + 'name': 'test-config', + 'namespace': 'test-ns' + }, + 'data': { + 'key': 'new-value' + } + } + + # Resource exists + existing_resource = { + 'metadata': { + 'name': 'test-config', + 'resourceVersion': '12345' + } + } + self.mock_resource_api.get.return_value = existing_resource + + success, name, status = restoreResource(self.mock_client, resource_data, replace_resource=True) + + assert success is True + assert name == 'test-config' + assert status == 'updated' + self.mock_resource_api.patch.assert_called_once_with( + body=resource_data, + name='test-config', + namespace='test-ns', + content_type='application/merge-patch+json' + ) + + def test_skip_existing_resource_with_replace_false(self): + """Test skipping an existing resource when replace_resource is False""" + resource_data = { + 'apiVersion': 'v1', + 'kind': 'ConfigMap', + 'metadata': { + 'name': 'test-config', + 'namespace': 'test-ns' + } + } + + # Resource exists + existing_resource = {'metadata': {'name': 'test-config'}} + self.mock_resource_api.get.return_value = existing_resource + + success, name, status = restoreResource(self.mock_client, resource_data, replace_resource=False) + + assert success is True + assert name == 'test-config' + assert status == 'skipped' + self.mock_resource_api.patch.assert_not_called() + self.mock_resource_api.create.assert_not_called() + + def test_namespace_override(self): + """Test that namespace parameter overrides resource namespace""" + resource_data = { + 'apiVersion': 'v1', + 'kind': 'ConfigMap', + 'metadata': { + 'name': 'test-config', + 'namespace': 'original-ns' + } + } + + # Resource doesn't exist + self.mock_resource_api.get.side_effect = NotFoundError(Mock()) + + success, name, status = restoreResource( + self.mock_client, + resource_data, + namespace='override-ns' + ) + + assert success is True + self.mock_resource_api.create.assert_called_once_with( + body=resource_data, + namespace='override-ns' + ) + + def test_missing_kind_field(self): + """Test handling resource missing kind field""" + resource_data = { + 'apiVersion': 'v1', + 'metadata': { + 'name': 'test-resource' + } + } + + success, name, status = restoreResource(self.mock_client, resource_data) + + assert success is False + assert name == 'test-resource' + assert 'missing required fields' in status.lower() + + def test_missing_api_version_field(self): + """Test handling resource missing apiVersion field""" + resource_data = { + 'kind': 'ConfigMap', + 'metadata': { + 'name': 'test-resource' + } + } + + success, name, status = restoreResource(self.mock_client, resource_data) + + assert success is False + assert name == 'test-resource' + assert 'missing required fields' in status.lower() + + def test_missing_name_field(self): + """Test handling resource missing name field""" + resource_data = { + 'apiVersion': 'v1', + 'kind': 'ConfigMap', + 'metadata': {} + } + + success, name, status = restoreResource(self.mock_client, resource_data) + + assert success is False + assert name == 'unknown' + assert 'missing required fields' in status.lower() + + def test_create_failure(self): + """Test handling create operation failure""" + resource_data = { + 'apiVersion': 'v1', + 'kind': 'ConfigMap', + 'metadata': { + 'name': 'test-config', + 'namespace': 'test-ns' + } + } + + # Resource doesn't exist + self.mock_resource_api.get.side_effect = NotFoundError(Mock()) + # Create fails + self.mock_resource_api.create.side_effect = Exception("Create failed") + + success, name, status = restoreResource(self.mock_client, resource_data) + + assert success is False + assert name == 'test-config' + assert 'Failed to create' in status + assert 'Create failed' in status + + def test_patch_failure(self): + """Test handling patch operation failure""" + resource_data = { + 'apiVersion': 'v1', + 'kind': 'ConfigMap', + 'metadata': { + 'name': 'test-config', + 'namespace': 'test-ns' + } + } + + # Resource exists + existing_resource = {'metadata': {'name': 'test-config'}} + self.mock_resource_api.get.return_value = existing_resource + # Patch fails + self.mock_resource_api.patch.side_effect = Exception("Patch failed") + + success, name, status = restoreResource(self.mock_client, resource_data, replace_resource=True) + + assert success is False + assert name == 'test-config' + assert 'Failed to update' in status + assert 'Patch failed' in status + + def test_resource_api_get_failure(self): + """Test handling failure to get resource API""" + resource_data = { + 'apiVersion': 'v1', + 'kind': 'ConfigMap', + 'metadata': { + 'name': 'test-config' + } + } + + # Getting resource API fails + self.mock_client.resources.get.side_effect = Exception("API not found") + + success, name, status = restoreResource(self.mock_client, resource_data) + + assert success is False + assert name == 'test-config' + assert 'Error restoring resource' in status + + def test_update_cluster_scoped_resource(self): + """Test updating a cluster-scoped resource""" + resource_data = { + 'apiVersion': 'v1', + 'kind': 'Namespace', + 'metadata': { + 'name': 'test-namespace' + } + } + + # Resource exists + existing_resource = {'metadata': {'name': 'test-namespace'}} + self.mock_resource_api.get.return_value = existing_resource + + success, name, status = restoreResource(self.mock_client, resource_data, replace_resource=True) + + assert success is True + assert name == 'test-namespace' + assert status == 'updated' + self.mock_resource_api.patch.assert_called_once_with( + body=resource_data, + name='test-namespace', + content_type='application/merge-patch+json' + ) + + def test_malformed_resource_data(self): + """Test handling malformed resource data""" + resource_data = { + 'apiVersion': 'v1', + 'kind': 'ConfigMap' + # Missing metadata entirely + } + + success, name, status = restoreResource(self.mock_client, resource_data) + + assert success is False + assert name == 'unknown' + assert 'missing required fields' in status.lower() + + def test_resource_with_complex_metadata(self): + """Test resource with complex metadata structure""" + resource_data = { + 'apiVersion': 'apps/v1', + 'kind': 'Deployment', + 'metadata': { + 'name': 'test-deployment', + 'namespace': 'test-ns', + 'labels': { + 'app': 'test', + 'version': 'v1' + }, + 'annotations': { + 'description': 'Test deployment' + } + }, + 'spec': { + 'replicas': 3 + } + } + + # Resource doesn't exist + self.mock_resource_api.get.side_effect = NotFoundError(Mock()) + + success, name, status = restoreResource(self.mock_client, resource_data) + + assert success is True + assert name == 'test-deployment' + assert status is None + self.mock_resource_api.create.assert_called_once()