From 54f08e0a51ac106c2a73091b5cdafdd1ca290b8d Mon Sep 17 00:00:00 2001 From: Sanjay Prabhakar Date: Wed, 10 Dec 2025 13:34:54 +0000 Subject: [PATCH 01/41] [patch] add some ocp functions (#149) Co-authored-by: Sanjay Prabhakar --- src/mas/devops/ocp.py | 57 +++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 57 insertions(+) diff --git a/src/mas/devops/ocp.py b/src/mas/devops/ocp.py index 4219c5cc..50d24530 100644 --- a/src/mas/devops/ocp.py +++ b/src/mas/devops/ocp.py @@ -243,6 +243,63 @@ def crdExists(dynClient: DynamicClient, crdName: str) -> bool: return False +def getCR(dynClient: DynamicClient, cr_api_version: str, cr_kind: str, cr_name: str, namespace: str = None) -> dict: + """ + Get a Custom Resource + """ + + try: + crAPI = dynClient.resources.get(api_version=cr_api_version, kind=cr_kind) + if namespace: + cr = crAPI.get(name=cr_name, namespace=namespace) + else: + cr = crAPI.get(name=cr_name) + return cr + except NotFoundError: + logger.debug(f"CR {cr_name} of kind {cr_kind} does not exist in namespace {namespace}") + + return {} + + +def getSecret(dynClient: DynamicClient, namespace: str, secret_name: str) -> dict: + """ + Get a Secret + """ + try: + secretAPI = dynClient.resources.get(api_version="v1", kind="Secret") + secret = secretAPI.get(name=secret_name, namespace=namespace) + logger.debug(f"Secret {secret_name} exists in namespace {namespace}") + return secret.to_dict() + except NotFoundError: + logger.debug(f"Secret {secret_name} does not exist in namespace {namespace}") + return {} + + +def apply_resource(dynClient: DynamicClient, resource_yaml: str, namespace: str): + """ + Apply a Kubernetes resource from its YAML definition. + If the resource already exists, it will be updated. + If it does not exist, it will be created. + """ + resource_dict = yaml.safe_load(resource_yaml) + kind = resource_dict['kind'] + api_version = resource_dict['apiVersion'] + metadata = resource_dict['metadata'] + name = metadata['name'] + + try: + resource = dynClient.resources.get(api_version=api_version, kind=kind) + # Try to get the existing resource + resource.get(name=name, namespace=namespace) + # If found, update it + logger.debug(f"Updating existing {kind} '{name}' in namespace '{namespace}'") + resource.patch(body=resource_dict, namespace=namespace, name=name) + except NotFoundError: + # If not found, create it + logger.debug(f"Creating new {kind} '{name}' in namespace '{namespace}'") + resource.create(body=resource_dict, namespace=namespace) + + def listInstances(dynClient: DynamicClient, apiVersion: str, kind: str) -> list: """ Get a list of instances of a particular CR on the cluster From f8dfc855cbdce49ff00ed57fac455c9a0bb2c669 Mon Sep 17 00:00:00 2001 From: Sanjay Prabhakar Date: Fri, 12 Dec 2025 11:47:04 +0000 Subject: [PATCH 02/41] [patch] add exception in getCR --- src/mas/devops/ocp.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/mas/devops/ocp.py b/src/mas/devops/ocp.py index 50d24530..50f2f96b 100644 --- a/src/mas/devops/ocp.py +++ b/src/mas/devops/ocp.py @@ -257,6 +257,8 @@ def getCR(dynClient: DynamicClient, cr_api_version: str, cr_kind: str, cr_name: return cr except NotFoundError: logger.debug(f"CR {cr_name} of kind {cr_kind} does not exist in namespace {namespace}") + except Exception as e: + logger.debug(f"Error retrieving CR {cr_name} of kind {cr_kind} in namespace {namespace}: {e}") return {} From 97a1d8ed706de1af195da8034c5aed1153572080 Mon Sep 17 00:00:00 2001 From: Sanjay Prabhakar Date: Fri, 12 Dec 2025 12:46:43 +0000 Subject: [PATCH 03/41] [patch] fix apply_resource --- src/mas/devops/ocp.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/src/mas/devops/ocp.py b/src/mas/devops/ocp.py index 50f2f96b..49898df6 100644 --- a/src/mas/devops/ocp.py +++ b/src/mas/devops/ocp.py @@ -293,9 +293,8 @@ def apply_resource(dynClient: DynamicClient, resource_yaml: str, namespace: str) resource = dynClient.resources.get(api_version=api_version, kind=kind) # Try to get the existing resource resource.get(name=name, namespace=namespace) - # If found, update it - logger.debug(f"Updating existing {kind} '{name}' in namespace '{namespace}'") - resource.patch(body=resource_dict, namespace=namespace, name=name) + # If found, skip creation + logger.debug(f"{kind} '{name}' already exists in namespace '{namespace}', skipping creation.") except NotFoundError: # If not found, create it logger.debug(f"Creating new {kind} '{name}' in namespace '{namespace}'") From 98303192aa6f4f9b2dbe4a5805f1035353637022 Mon Sep 17 00:00:00 2001 From: whitfiea Date: Mon, 12 Jan 2026 17:07:11 +0000 Subject: [PATCH 04/41] [minor] Add backup.py --- src/mas/devops/backup.py | 66 +++++++ test/src/test_backup.py | 374 +++++++++++++++++++++++++++++++++++++++ 2 files changed, 440 insertions(+) create mode 100644 src/mas/devops/backup.py create mode 100644 test/src/test_backup.py diff --git a/src/mas/devops/backup.py b/src/mas/devops/backup.py new file mode 100644 index 00000000..fd019a3e --- /dev/null +++ b/src/mas/devops/backup.py @@ -0,0 +1,66 @@ +# ***************************************************************************** +# Copyright (c) 2026 IBM Corporation and other Contributors. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Eclipse Public License v1.0 +# which accompanies this distribution, and is available at +# http://www.eclipse.org/legal/epl-v10.html +# +# ***************************************************************************** +import logging +import os +import yaml + +logger = logging.getLogger(name=__name__) + + +def createBackupDirectories(paths: list) -> bool: + """ + Create backup directories if they do not exist + """ + try: + for path in paths: + os.makedirs(path, exist_ok=True) + logger.info(msg=f"Created backup directory: {path}") + return True + except Exception as e: + logger.error(msg=f"Error creating backup directories: {e}") + return False + + +def copyContentsToYamlFile(file_path: str, content: dict) -> bool: + """ + Write dictionary content to a YAML file + """ + try: + with open(file_path, 'w') as yaml_file: + yaml.dump(content, yaml_file, default_flow_style=False) + return True + except Exception as e: + logger.error(f"Error writing to YAML file {file_path}: {e}") + return False + + +def filterResourceData(data: dict) -> dict: + """ + Filter metadata from Resource data and create minimal dict + """ + metadata_fields_to_remove = [ + 'annotations', + 'creationTimestamp', + 'generation', + 'resourceVersion', + 'selfLink', + 'uid', + 'managedFields' + ] + filteredCopy = data.copy() + if 'metadata' in filteredCopy: + for field in metadata_fields_to_remove: + if field in filteredCopy['metadata']: + del filteredCopy['metadata'][field] + + if 'status' in filteredCopy: + del filteredCopy['status'] + + return filteredCopy diff --git a/test/src/test_backup.py b/test/src/test_backup.py new file mode 100644 index 00000000..2566f417 --- /dev/null +++ b/test/src/test_backup.py @@ -0,0 +1,374 @@ +# ***************************************************************************** +# Copyright (c) 2026 IBM Corporation and other Contributors. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Eclipse Public License v1.0 +# which accompanies this distribution, and is available at +# http://www.eclipse.org/legal/epl-v10.html +# +# ***************************************************************************** + +import yaml + +from mas.devops.backup import createBackupDirectories, copyContentsToYamlFile, filterResourceData + + +class TestCreateBackupDirectories: + """Tests for createBackupDirectories function""" + + def test_create_single_directory(self, tmp_path): + """Test creating a single backup directory""" + test_dir = tmp_path / "backup1" + result = createBackupDirectories([str(test_dir)]) + + assert result is True + assert test_dir.exists() + assert test_dir.is_dir() + + def test_create_multiple_directories(self, tmp_path): + """Test creating multiple backup directories""" + test_dirs = [ + tmp_path / "backup1", + tmp_path / "backup2", + tmp_path / "backup3" + ] + paths = [str(d) for d in test_dirs] + result = createBackupDirectories(paths) + + assert result is True + for test_dir in test_dirs: + assert test_dir.exists() + assert test_dir.is_dir() + + def test_create_nested_directories(self, tmp_path): + """Test creating nested backup directories""" + nested_dir = tmp_path / "level1" / "level2" / "level3" + result = createBackupDirectories([str(nested_dir)]) + + assert result is True + assert nested_dir.exists() + assert nested_dir.is_dir() + + def test_create_existing_directory(self, tmp_path): + """Test creating a directory that already exists""" + test_dir = tmp_path / "existing" + test_dir.mkdir() + + result = createBackupDirectories([str(test_dir)]) + + assert result is True + assert test_dir.exists() + + def test_create_empty_list(self): + """Test with empty list of paths""" + result = createBackupDirectories([]) + assert result is True + + def test_create_directory_permission_error(self, mocker): + """Test handling of permission errors""" + mock_makedirs = mocker.patch('os.makedirs', side_effect=PermissionError("Permission denied")) + + result = createBackupDirectories(["/invalid/path"]) + + assert result is False + mock_makedirs.assert_called_once() + + def test_create_directory_os_error(self, mocker): + """Test handling of OS errors""" + mocker.patch('os.makedirs', side_effect=OSError("OS error")) + + result = createBackupDirectories(["/some/path"]) + + assert result is False + + +class TestCopyContentsToYamlFile: + """Tests for copyContentsToYamlFile function""" + + def test_write_simple_dict(self, tmp_path): + """Test writing a simple dictionary to YAML file""" + test_file = tmp_path / "test.yaml" + content = {"key1": "value1", "key2": "value2"} + + result = copyContentsToYamlFile(str(test_file), content) + + assert result is True + assert test_file.exists() + + with open(test_file, 'r') as f: + loaded_content = yaml.safe_load(f) + assert loaded_content == content + + def test_write_nested_dict(self, tmp_path): + """Test writing a nested dictionary to YAML file""" + test_file = tmp_path / "nested.yaml" + content = { + "level1": { + "level2": { + "level3": "value" + } + }, + "list": [1, 2, 3] + } + + result = copyContentsToYamlFile(str(test_file), content) + + assert result is True + with open(test_file, 'r') as f: + loaded_content = yaml.safe_load(f) + assert loaded_content == content + + def test_write_empty_dict(self, tmp_path): + """Test writing an empty dictionary""" + test_file = tmp_path / "empty.yaml" + content = {} + + result = copyContentsToYamlFile(str(test_file), content) + + assert result is True + with open(test_file, 'r') as f: + loaded_content = yaml.safe_load(f) + assert loaded_content == content + + def test_overwrite_existing_file(self, tmp_path): + """Test overwriting an existing YAML file""" + test_file = tmp_path / "overwrite.yaml" + old_content = {"old": "data"} + new_content = {"new": "data"} + + # Write initial content + with open(test_file, 'w') as f: + yaml.dump(old_content, f) + + # Overwrite with new content + result = copyContentsToYamlFile(str(test_file), new_content) + + assert result is True + with open(test_file, 'r') as f: + loaded_content = yaml.safe_load(f) + assert loaded_content == new_content + assert loaded_content != old_content + + def test_write_to_nonexistent_directory(self, tmp_path): + """Test writing to a file in a non-existent directory""" + test_file = tmp_path / "nonexistent" / "test.yaml" + content = {"key": "value"} + + result = copyContentsToYamlFile(str(test_file), content) + + assert result is False + + def test_write_permission_error(self, mocker): + """Test handling of permission errors during write""" + mocker.patch('builtins.open', side_effect=PermissionError("Permission denied")) + + result = copyContentsToYamlFile("/invalid/path.yaml", {"key": "value"}) + + assert result is False + + def test_write_with_special_characters(self, tmp_path): + """Test writing content with special characters""" + test_file = tmp_path / "special.yaml" + content = { + "special": "value with\nnewlines", + "unicode": "café ☕", + "quotes": "value with 'quotes' and \"double quotes\"" + } + + result = copyContentsToYamlFile(str(test_file), content) + + assert result is True + with open(test_file, 'r') as f: + loaded_content = yaml.safe_load(f) + assert loaded_content == content + + +class TestFilterResourceData: + """Tests for filterResourceData function""" + + def test_filter_all_metadata_fields(self): + """Test filtering all metadata fields that should be removed""" + data = { + "apiVersion": "v1", + "kind": "Resource", + "metadata": { + "name": "test-resource", + "namespace": "test-namespace", + "annotations": {"key": "value"}, + "creationTimestamp": "2026-01-01T00:00:00Z", + "generation": 1, + "resourceVersion": "12345", + "selfLink": "/api/v1/namespaces/test/resources/test-resource", + "uid": "abc-123-def", + "managedFields": [{"manager": "test"}] + }, + "spec": {"replicas": 3} + } + + result = filterResourceData(data) + + assert "name" in result["metadata"] + assert "namespace" in result["metadata"] + assert "annotations" not in result["metadata"] + assert "creationTimestamp" not in result["metadata"] + assert "generation" not in result["metadata"] + assert "resourceVersion" not in result["metadata"] + assert "selfLink" not in result["metadata"] + assert "uid" not in result["metadata"] + assert "managedFields" not in result["metadata"] + assert "spec" in result + + def test_filter_status_field(self): + """Test that status field is removed""" + data = { + "metadata": {"name": "test"}, + "spec": {"replicas": 3}, + "status": { + "phase": "Running", + "conditions": [] + } + } + + result = filterResourceData(data) + + assert "status" not in result + assert "spec" in result + assert "metadata" in result + + def test_filter_partial_metadata(self): + """Test filtering when only some metadata fields are present""" + data = { + "metadata": { + "name": "test-resource", + "uid": "abc-123", + "labels": {"app": "test"} + } + } + + result = filterResourceData(data) + + assert "name" in result["metadata"] + assert "labels" in result["metadata"] + assert "uid" not in result["metadata"] + + def test_filter_no_metadata(self): + """Test filtering when metadata field is not present""" + data = { + "apiVersion": "v1", + "kind": "Resource", + "spec": {"replicas": 3} + } + + result = filterResourceData(data) + + assert "metadata" not in result + assert "spec" in result + assert "apiVersion" in result + + def test_filter_empty_metadata(self): + """Test filtering with empty metadata""" + data = { + "metadata": {}, + "spec": {"replicas": 3} + } + + result = filterResourceData(data) + + assert "metadata" in result + assert result["metadata"] == {} + + def test_filter_preserves_other_fields(self): + """Test that other fields are preserved""" + data = { + "apiVersion": "v1", + "kind": "ConfigMap", + "metadata": { + "name": "test-config", + "uid": "should-be-removed" + }, + "data": { + "key1": "value1", + "key2": "value2" + } + } + + result = filterResourceData(data) + + assert result["apiVersion"] == "v1" + assert result["kind"] == "ConfigMap" + assert result["data"] == {"key1": "value1", "key2": "value2"} + assert "uid" not in result["metadata"] + + def test_filter_shallow_copy_behavior(self): + """Test that filterResourceData uses shallow copy (modifies nested dicts)""" + data = { + "metadata": { + "name": "test", + "uid": "abc-123" + }, + "status": {"phase": "Running"} + } + + result = filterResourceData(data) + + # Due to shallow copy, nested metadata dict is modified in original + # but top-level status is not (it's deleted from copy only) + assert "uid" not in data["metadata"] # Modified due to shallow copy + assert "status" in data # Not modified (top-level key) + + # Result should not have uid and status + assert "uid" not in result["metadata"] + assert "status" not in result + + def test_filter_complex_resource(self): + """Test filtering a complex Kubernetes resource""" + data = { + "apiVersion": "apps/v1", + "kind": "Deployment", + "metadata": { + "name": "my-deployment", + "namespace": "default", + "labels": {"app": "myapp"}, + "annotations": {"deployment.kubernetes.io/revision": "1"}, + "creationTimestamp": "2026-01-01T00:00:00Z", + "generation": 5, + "resourceVersion": "98765", + "uid": "xyz-789", + "managedFields": [{"manager": "kubectl"}] + }, + "spec": { + "replicas": 3, + "selector": {"matchLabels": {"app": "myapp"}} + }, + "status": { + "availableReplicas": 3, + "readyReplicas": 3 + } + } + + result = filterResourceData(data) + + # Check preserved fields + assert result["apiVersion"] == "apps/v1" + assert result["kind"] == "Deployment" + assert result["metadata"]["name"] == "my-deployment" + assert result["metadata"]["namespace"] == "default" + assert result["metadata"]["labels"] == {"app": "myapp"} + assert result["spec"]["replicas"] == 3 + + # Check removed fields + assert "annotations" not in result["metadata"] + assert "creationTimestamp" not in result["metadata"] + assert "generation" not in result["metadata"] + assert "resourceVersion" not in result["metadata"] + assert "uid" not in result["metadata"] + assert "managedFields" not in result["metadata"] + assert "status" not in result + + def test_filter_empty_dict(self): + """Test filtering an empty dictionary""" + data = {} + result = filterResourceData(data) + assert result == {} + +# Made with Bob From 6fa74be55d5e93aa952a2dc220e1867b593e4195 Mon Sep 17 00:00:00 2001 From: whitfiea Date: Tue, 13 Jan 2026 11:49:50 +0000 Subject: [PATCH 05/41] update backup.p[y to add backup_resources --- src/mas/devops/backup.py | 135 ++++++++++++++++++++++++++++++++++++++- 1 file changed, 134 insertions(+), 1 deletion(-) diff --git a/src/mas/devops/backup.py b/src/mas/devops/backup.py index fd019a3e..3bedff46 100644 --- a/src/mas/devops/backup.py +++ b/src/mas/devops/backup.py @@ -10,6 +10,8 @@ import logging import os import yaml +from openshift.dynamic import DynamicClient +from openshift.dynamic.exceptions import NotFoundError logger = logging.getLogger(name=__name__) @@ -33,8 +35,19 @@ def copyContentsToYamlFile(file_path: str, content: dict) -> bool: Write dictionary content to a YAML file """ try: + # Create a custom dumper that uses literal style for multi-line strings + class LiteralDumper(yaml.SafeDumper): + pass + + def str_representer(dumper, data): + if '\n' in data: + return dumper.represent_scalar('tag:yaml.org,2002:str', data, style='|') + return dumper.represent_scalar('tag:yaml.org,2002:str', data) + + LiteralDumper.add_representer(str, str_representer) + with open(file_path, 'w') as yaml_file: - yaml.dump(content, yaml_file, default_flow_style=False) + yaml.dump(content, yaml_file, default_flow_style=False, Dumper=LiteralDumper) return True except Exception as e: logger.error(f"Error writing to YAML file {file_path}: {e}") @@ -64,3 +77,123 @@ def filterResourceData(data: dict) -> dict: del filteredCopy['status'] return filteredCopy + + +def extract_secrets_from_dict(data, secret_names=None): + """ + Recursively extract secret names from a dictionary structure. + Looks for keys named 'secretName' and collects their values. + + Args: + data: Dictionary to search + secret_names: Set to collect secret names (created if None) + + Returns: + Set of secret names found + """ + if secret_names is None: + secret_names = set() + + if isinstance(data, dict): + for key, value in data.items(): + # Check if this key is 'secretName' and has a string value + if key == 'secretName' and isinstance(value, str) and value: + secret_names.add(value) + # Recursively search nested structures + elif isinstance(value, (dict, list)): + extract_secrets_from_dict(value, secret_names) + + elif isinstance(data, list): + for item in data: + if isinstance(item, (dict, list)): + extract_secrets_from_dict(item, secret_names) + + return secret_names + + +def backupResources(dynClient: DynamicClient, namespace: str, kind: str, api_version: str, backup_path: str, name=None) -> tuple: + """ + Backup resources of a given kind in a namespace. + If name is provided, backs up that specific resource. + If name is None, backs up all resources of that kind. + + Args: + dynClient: Kubernetes dynamic client + namespace: Namespace to backup from + kind: Resource kind (e.g., 'MongoCfg', 'Secret') + api_version: API version (e.g., 'config.mas.ibm.com/v1') + backup_path: Path to save backup files + name: Optional specific resource name + + Returns: + tuple: (backed_up_count: int, not_found_count: int, failed_count: int, discovered_secrets: set) + """ + discovered_secrets = set() + backed_up_count = 0 + not_found_count = 0 + failed_count = 0 + + try: + resourceAPI = dynClient.resources.get(api_version=api_version, kind=kind) + + if name: + # Backup specific named resource + logger.info(f"Backing up {kind} '{name}' from namespace '{namespace}' (API version: {api_version})") + try: + resource = resourceAPI.get(name=name, namespace=namespace) + if resource: + resources_to_process = [resource] + else: + logger.info(f"{kind} '{name}' not found in namespace '{namespace}', skipping backup") + not_found_count = 1 + return (backed_up_count, not_found_count, failed_count, discovered_secrets) + except NotFoundError: + logger.error(f"{kind} '{name}' not found in namespace '{namespace}', skipping backup") + not_found_count = 1 + return (backed_up_count, not_found_count, failed_count, discovered_secrets) + else: + # Backup all resources of this kind + logger.info(f"Backing up all {kind} resources from namespace '{namespace}' (API version: {api_version})") + resources = resourceAPI.get(namespace=namespace) + resources_to_process = resources.items + + # Process each resource + for resource in resources_to_process: + resource_name = resource["metadata"]["name"] + resource_dict = resource.to_dict() + + # Extract secrets from this resource if it's not a Secret itself + if kind != 'Secret': + secrets = extract_secrets_from_dict(resource_dict.get('spec', {})) + if secrets: + logger.info(f"Found {len(secrets)} secret reference(s) in {kind} '{resource_name}': {', '.join(sorted(secrets))}") + discovered_secrets.update(secrets) + + # Backup the resource + resource_file_path = f"{backup_path}/{resource_name}.yaml" + filtered_resource = filterResourceData(resource_dict) + if copyContentsToYamlFile(resource_file_path, filtered_resource): + logger.info(f"Successfully backed up {kind} '{resource_name}' to '{resource_file_path}'") + backed_up_count += 1 + else: + logger.error(f"Failed to back up {kind} '{resource_name}' to '{resource_file_path}'") + failed_count += 1 + + if backed_up_count > 0: + logger.info(f"Successfully backed up {backed_up_count} {kind} resource(s)") + elif not name: + logger.info(f"No {kind} resources found in namespace '{namespace}'") + + return (backed_up_count, not_found_count, failed_count, discovered_secrets) + + except NotFoundError: + if name: + logger.info(f"{kind} '{name}' not found in namespace '{namespace}'") + not_found_count = 1 + else: + logger.info(f"No {kind} resources found in namespace '{namespace}'") + return (backed_up_count, not_found_count, failed_count, discovered_secrets) + except Exception as e: + logger.error(f"Error backing up {kind} resources: {e}") + failed_count = 1 + return (backed_up_count, not_found_count, failed_count, discovered_secrets) From 79fb4a0554eec3b8b938eac26c1213d4373966dc Mon Sep 17 00:00:00 2001 From: whitfiea Date: Tue, 13 Jan 2026 11:51:01 +0000 Subject: [PATCH 06/41] black formatting --- src/mas/devops/backup.py | 38 +++++++++--------- test/src/test_backup.py | 86 ++++++++++++++++++++-------------------- 2 files changed, 62 insertions(+), 62 deletions(-) diff --git a/src/mas/devops/backup.py b/src/mas/devops/backup.py index 3bedff46..13f009d6 100644 --- a/src/mas/devops/backup.py +++ b/src/mas/devops/backup.py @@ -38,14 +38,14 @@ def copyContentsToYamlFile(file_path: str, content: dict) -> bool: # Create a custom dumper that uses literal style for multi-line strings class LiteralDumper(yaml.SafeDumper): pass - + def str_representer(dumper, data): if '\n' in data: return dumper.represent_scalar('tag:yaml.org,2002:str', data, style='|') return dumper.represent_scalar('tag:yaml.org,2002:str', data) - + LiteralDumper.add_representer(str, str_representer) - + with open(file_path, 'w') as yaml_file: yaml.dump(content, yaml_file, default_flow_style=False, Dumper=LiteralDumper) return True @@ -75,7 +75,7 @@ def filterResourceData(data: dict) -> dict: if 'status' in filteredCopy: del filteredCopy['status'] - + return filteredCopy @@ -83,17 +83,17 @@ def extract_secrets_from_dict(data, secret_names=None): """ Recursively extract secret names from a dictionary structure. Looks for keys named 'secretName' and collects their values. - + Args: data: Dictionary to search secret_names: Set to collect secret names (created if None) - + Returns: Set of secret names found """ if secret_names is None: secret_names = set() - + if isinstance(data, dict): for key, value in data.items(): # Check if this key is 'secretName' and has a string value @@ -102,12 +102,12 @@ def extract_secrets_from_dict(data, secret_names=None): # Recursively search nested structures elif isinstance(value, (dict, list)): extract_secrets_from_dict(value, secret_names) - + elif isinstance(data, list): for item in data: if isinstance(item, (dict, list)): extract_secrets_from_dict(item, secret_names) - + return secret_names @@ -116,7 +116,7 @@ def backupResources(dynClient: DynamicClient, namespace: str, kind: str, api_ver Backup resources of a given kind in a namespace. If name is provided, backs up that specific resource. If name is None, backs up all resources of that kind. - + Args: dynClient: Kubernetes dynamic client namespace: Namespace to backup from @@ -124,7 +124,7 @@ def backupResources(dynClient: DynamicClient, namespace: str, kind: str, api_ver api_version: API version (e.g., 'config.mas.ibm.com/v1') backup_path: Path to save backup files name: Optional specific resource name - + Returns: tuple: (backed_up_count: int, not_found_count: int, failed_count: int, discovered_secrets: set) """ @@ -132,10 +132,10 @@ def backupResources(dynClient: DynamicClient, namespace: str, kind: str, api_ver backed_up_count = 0 not_found_count = 0 failed_count = 0 - + try: resourceAPI = dynClient.resources.get(api_version=api_version, kind=kind) - + if name: # Backup specific named resource logger.info(f"Backing up {kind} '{name}' from namespace '{namespace}' (API version: {api_version})") @@ -156,19 +156,19 @@ def backupResources(dynClient: DynamicClient, namespace: str, kind: str, api_ver logger.info(f"Backing up all {kind} resources from namespace '{namespace}' (API version: {api_version})") resources = resourceAPI.get(namespace=namespace) resources_to_process = resources.items - + # Process each resource for resource in resources_to_process: resource_name = resource["metadata"]["name"] resource_dict = resource.to_dict() - + # Extract secrets from this resource if it's not a Secret itself if kind != 'Secret': secrets = extract_secrets_from_dict(resource_dict.get('spec', {})) if secrets: logger.info(f"Found {len(secrets)} secret reference(s) in {kind} '{resource_name}': {', '.join(sorted(secrets))}") discovered_secrets.update(secrets) - + # Backup the resource resource_file_path = f"{backup_path}/{resource_name}.yaml" filtered_resource = filterResourceData(resource_dict) @@ -178,14 +178,14 @@ def backupResources(dynClient: DynamicClient, namespace: str, kind: str, api_ver else: logger.error(f"Failed to back up {kind} '{resource_name}' to '{resource_file_path}'") failed_count += 1 - + if backed_up_count > 0: logger.info(f"Successfully backed up {backed_up_count} {kind} resource(s)") elif not name: logger.info(f"No {kind} resources found in namespace '{namespace}'") - + return (backed_up_count, not_found_count, failed_count, discovered_secrets) - + except NotFoundError: if name: logger.info(f"{kind} '{name}' not found in namespace '{namespace}'") diff --git a/test/src/test_backup.py b/test/src/test_backup.py index 2566f417..01ed0578 100644 --- a/test/src/test_backup.py +++ b/test/src/test_backup.py @@ -20,7 +20,7 @@ def test_create_single_directory(self, tmp_path): """Test creating a single backup directory""" test_dir = tmp_path / "backup1" result = createBackupDirectories([str(test_dir)]) - + assert result is True assert test_dir.exists() assert test_dir.is_dir() @@ -34,7 +34,7 @@ def test_create_multiple_directories(self, tmp_path): ] paths = [str(d) for d in test_dirs] result = createBackupDirectories(paths) - + assert result is True for test_dir in test_dirs: assert test_dir.exists() @@ -44,7 +44,7 @@ def test_create_nested_directories(self, tmp_path): """Test creating nested backup directories""" nested_dir = tmp_path / "level1" / "level2" / "level3" result = createBackupDirectories([str(nested_dir)]) - + assert result is True assert nested_dir.exists() assert nested_dir.is_dir() @@ -53,9 +53,9 @@ def test_create_existing_directory(self, tmp_path): """Test creating a directory that already exists""" test_dir = tmp_path / "existing" test_dir.mkdir() - + result = createBackupDirectories([str(test_dir)]) - + assert result is True assert test_dir.exists() @@ -67,18 +67,18 @@ def test_create_empty_list(self): def test_create_directory_permission_error(self, mocker): """Test handling of permission errors""" mock_makedirs = mocker.patch('os.makedirs', side_effect=PermissionError("Permission denied")) - + result = createBackupDirectories(["/invalid/path"]) - + assert result is False mock_makedirs.assert_called_once() def test_create_directory_os_error(self, mocker): """Test handling of OS errors""" mocker.patch('os.makedirs', side_effect=OSError("OS error")) - + result = createBackupDirectories(["/some/path"]) - + assert result is False @@ -89,12 +89,12 @@ def test_write_simple_dict(self, tmp_path): """Test writing a simple dictionary to YAML file""" test_file = tmp_path / "test.yaml" content = {"key1": "value1", "key2": "value2"} - + result = copyContentsToYamlFile(str(test_file), content) - + assert result is True assert test_file.exists() - + with open(test_file, 'r') as f: loaded_content = yaml.safe_load(f) assert loaded_content == content @@ -110,9 +110,9 @@ def test_write_nested_dict(self, tmp_path): }, "list": [1, 2, 3] } - + result = copyContentsToYamlFile(str(test_file), content) - + assert result is True with open(test_file, 'r') as f: loaded_content = yaml.safe_load(f) @@ -122,9 +122,9 @@ def test_write_empty_dict(self, tmp_path): """Test writing an empty dictionary""" test_file = tmp_path / "empty.yaml" content = {} - + result = copyContentsToYamlFile(str(test_file), content) - + assert result is True with open(test_file, 'r') as f: loaded_content = yaml.safe_load(f) @@ -135,14 +135,14 @@ def test_overwrite_existing_file(self, tmp_path): test_file = tmp_path / "overwrite.yaml" old_content = {"old": "data"} new_content = {"new": "data"} - + # Write initial content with open(test_file, 'w') as f: yaml.dump(old_content, f) - + # Overwrite with new content result = copyContentsToYamlFile(str(test_file), new_content) - + assert result is True with open(test_file, 'r') as f: loaded_content = yaml.safe_load(f) @@ -153,17 +153,17 @@ def test_write_to_nonexistent_directory(self, tmp_path): """Test writing to a file in a non-existent directory""" test_file = tmp_path / "nonexistent" / "test.yaml" content = {"key": "value"} - + result = copyContentsToYamlFile(str(test_file), content) - + assert result is False def test_write_permission_error(self, mocker): """Test handling of permission errors during write""" mocker.patch('builtins.open', side_effect=PermissionError("Permission denied")) - + result = copyContentsToYamlFile("/invalid/path.yaml", {"key": "value"}) - + assert result is False def test_write_with_special_characters(self, tmp_path): @@ -174,9 +174,9 @@ def test_write_with_special_characters(self, tmp_path): "unicode": "café ☕", "quotes": "value with 'quotes' and \"double quotes\"" } - + result = copyContentsToYamlFile(str(test_file), content) - + assert result is True with open(test_file, 'r') as f: loaded_content = yaml.safe_load(f) @@ -204,9 +204,9 @@ def test_filter_all_metadata_fields(self): }, "spec": {"replicas": 3} } - + result = filterResourceData(data) - + assert "name" in result["metadata"] assert "namespace" in result["metadata"] assert "annotations" not in result["metadata"] @@ -228,9 +228,9 @@ def test_filter_status_field(self): "conditions": [] } } - + result = filterResourceData(data) - + assert "status" not in result assert "spec" in result assert "metadata" in result @@ -244,9 +244,9 @@ def test_filter_partial_metadata(self): "labels": {"app": "test"} } } - + result = filterResourceData(data) - + assert "name" in result["metadata"] assert "labels" in result["metadata"] assert "uid" not in result["metadata"] @@ -258,9 +258,9 @@ def test_filter_no_metadata(self): "kind": "Resource", "spec": {"replicas": 3} } - + result = filterResourceData(data) - + assert "metadata" not in result assert "spec" in result assert "apiVersion" in result @@ -271,9 +271,9 @@ def test_filter_empty_metadata(self): "metadata": {}, "spec": {"replicas": 3} } - + result = filterResourceData(data) - + assert "metadata" in result assert result["metadata"] == {} @@ -291,9 +291,9 @@ def test_filter_preserves_other_fields(self): "key2": "value2" } } - + result = filterResourceData(data) - + assert result["apiVersion"] == "v1" assert result["kind"] == "ConfigMap" assert result["data"] == {"key1": "value1", "key2": "value2"} @@ -308,14 +308,14 @@ def test_filter_shallow_copy_behavior(self): }, "status": {"phase": "Running"} } - + result = filterResourceData(data) - + # Due to shallow copy, nested metadata dict is modified in original # but top-level status is not (it's deleted from copy only) assert "uid" not in data["metadata"] # Modified due to shallow copy assert "status" in data # Not modified (top-level key) - + # Result should not have uid and status assert "uid" not in result["metadata"] assert "status" not in result @@ -345,9 +345,9 @@ def test_filter_complex_resource(self): "readyReplicas": 3 } } - + result = filterResourceData(data) - + # Check preserved fields assert result["apiVersion"] == "apps/v1" assert result["kind"] == "Deployment" @@ -355,7 +355,7 @@ def test_filter_complex_resource(self): assert result["metadata"]["namespace"] == "default" assert result["metadata"]["labels"] == {"app": "myapp"} assert result["spec"]["replicas"] == 3 - + # Check removed fields assert "annotations" not in result["metadata"] assert "creationTimestamp" not in result["metadata"] From 3a735c81ffc863db18af824f836b5546db2c29d4 Mon Sep 17 00:00:00 2001 From: whitfiea Date: Wed, 14 Jan 2026 11:32:39 +0000 Subject: [PATCH 07/41] update backupResources to allow labels and optional namespace --- src/mas/devops/backup.py | 54 +++- src/mas/devops/mas/__init__.py | 1 + src/mas/devops/mas/suite.py | 44 +++ test/src/test_backup.py | 530 ++++++++++++++++++++++++++++++++- test/src/test_mas.py | 6 + 5 files changed, 620 insertions(+), 15 deletions(-) diff --git a/src/mas/devops/backup.py b/src/mas/devops/backup.py index 13f009d6..ebc1022c 100644 --- a/src/mas/devops/backup.py +++ b/src/mas/devops/backup.py @@ -111,19 +111,22 @@ def extract_secrets_from_dict(data, secret_names=None): return secret_names -def backupResources(dynClient: DynamicClient, namespace: str, kind: str, api_version: str, backup_path: str, name=None) -> tuple: +def backupResources(dynClient: DynamicClient, kind: str, api_version: str, backup_path: str, namespace=None, name=None, labels=None) -> tuple: """ - Backup resources of a given kind in a namespace. + Backup resources of a given kind. If name is provided, backs up that specific resource. If name is None, backs up all resources of that kind. + If namespace is None, backs up cluster-level resources. + If labels is provided, filters resources by label selectors. Args: dynClient: Kubernetes dynamic client - namespace: Namespace to backup from - kind: Resource kind (e.g., 'MongoCfg', 'Secret') + kind: Resource kind (e.g., 'MongoCfg', 'Secret', 'ClusterRole') api_version: API version (e.g., 'config.mas.ibm.com/v1') backup_path: Path to save backup files + namespace: Optional namespace to backup from (None for cluster-level resources) name: Optional specific resource name + labels: Optional list of label selectors (e.g., ['app=myapp', 'env=prod']) Returns: tuple: (backed_up_count: int, not_found_count: int, failed_count: int, discovered_secrets: set) @@ -133,28 +136,49 @@ def backupResources(dynClient: DynamicClient, namespace: str, kind: str, api_ver not_found_count = 0 failed_count = 0 + # Build label selector string if labels provided + label_selector = None + if labels: + label_selector = ','.join(labels) + + # Determine scope description for logging + scope_desc = f"namespace '{namespace}'" if namespace else "cluster-level" + label_desc = f" with labels [{label_selector}]" if label_selector else "" + try: resourceAPI = dynClient.resources.get(api_version=api_version, kind=kind) if name: # Backup specific named resource - logger.info(f"Backing up {kind} '{name}' from namespace '{namespace}' (API version: {api_version})") + logger.info(f"Backing up {kind} '{name}' from {scope_desc} (API version: {api_version}){label_desc}") try: - resource = resourceAPI.get(name=name, namespace=namespace) + if namespace: + resource = resourceAPI.get(name=name, namespace=namespace) + else: + resource = resourceAPI.get(name=name) + if resource: resources_to_process = [resource] else: - logger.info(f"{kind} '{name}' not found in namespace '{namespace}', skipping backup") + logger.info(f"{kind} '{name}' not found in {scope_desc}, skipping backup") not_found_count = 1 return (backed_up_count, not_found_count, failed_count, discovered_secrets) except NotFoundError: - logger.error(f"{kind} '{name}' not found in namespace '{namespace}', skipping backup") + logger.error(f"{kind} '{name}' not found in {scope_desc}, skipping backup") not_found_count = 1 return (backed_up_count, not_found_count, failed_count, discovered_secrets) else: # Backup all resources of this kind - logger.info(f"Backing up all {kind} resources from namespace '{namespace}' (API version: {api_version})") - resources = resourceAPI.get(namespace=namespace) + logger.info(f"Backing up all {kind} resources from {scope_desc} (API version: {api_version}){label_desc}") + + # Build get parameters + get_params = {} + if namespace: + get_params['namespace'] = namespace + if label_selector: + get_params['label_selector'] = label_selector + + resources = resourceAPI.get(**get_params) resources_to_process = resources.items # Process each resource @@ -170,7 +194,9 @@ def backupResources(dynClient: DynamicClient, namespace: str, kind: str, api_ver discovered_secrets.update(secrets) # Backup the resource - resource_file_path = f"{backup_path}/{resource_name}.yaml" + resource_backup_path = f"{backup_path}/resources/{kind.lower()}s" + createBackupDirectories([resource_backup_path]) + resource_file_path = f"{resource_backup_path}/{resource_name}.yaml" filtered_resource = filterResourceData(resource_dict) if copyContentsToYamlFile(resource_file_path, filtered_resource): logger.info(f"Successfully backed up {kind} '{resource_name}' to '{resource_file_path}'") @@ -182,16 +208,16 @@ def backupResources(dynClient: DynamicClient, namespace: str, kind: str, api_ver if backed_up_count > 0: logger.info(f"Successfully backed up {backed_up_count} {kind} resource(s)") elif not name: - logger.info(f"No {kind} resources found in namespace '{namespace}'") + logger.info(f"No {kind} resources found in {scope_desc}{label_desc}") return (backed_up_count, not_found_count, failed_count, discovered_secrets) except NotFoundError: if name: - logger.info(f"{kind} '{name}' not found in namespace '{namespace}'") + logger.info(f"{kind} '{name}' not found in {scope_desc}") not_found_count = 1 else: - logger.info(f"No {kind} resources found in namespace '{namespace}'") + logger.info(f"No {kind} resources found in {scope_desc}{label_desc}") return (backed_up_count, not_found_count, failed_count, discovered_secrets) except Exception as e: logger.error(f"Error backing up {kind} resources: {e}") diff --git a/src/mas/devops/mas/__init__.py b/src/mas/devops/mas/__init__.py index dfbecf04..333f109f 100644 --- a/src/mas/devops/mas/__init__.py +++ b/src/mas/devops/mas/__init__.py @@ -13,4 +13,5 @@ verifyMasInstance, getMasChannel, updateIBMEntitlementKey, + getMasPublicClusterIssuer, ) diff --git a/src/mas/devops/mas/suite.py b/src/mas/devops/mas/suite.py index df91c537..0cc46612 100644 --- a/src/mas/devops/mas/suite.py +++ b/src/mas/devops/mas/suite.py @@ -313,3 +313,47 @@ def updateIBMEntitlementKey(dynClient: DynamicClient, namespace: str, icrUsernam secret = secretsAPI.apply(body=secret, namespace=namespace) return secret + + +def getMasPublicClusterIssuer(dynClient: DynamicClient, instanceId: str) -> str | None: + """ + Retrieve the Public Cluster Issuer for a MAS instance. + + This function queries the Suite custom resource and attempts to retrieve the + certificate issuer name from spec.certificateIssuer.name. If the keys don't exist, + it returns the default issuer name. + + Args: + dynClient (DynamicClient): OpenShift dynamic client for cluster API interactions. + instanceId (str): The MAS instance identifier to use. + + Returns: + str: The name of the cluster issuer used for the passed in MAS Instance. + Returns the default "mas-{instanceId}-core-public-issuer" if the suite + doesn't specify a custom issuer, or None if the suite is not found. + """ + try: + suitesAPI = dynClient.resources.get(api_version="core.mas.ibm.com/v1", kind="Suite") + suite = suitesAPI.get(name=instanceId, namespace=f"mas-{instanceId}-core") + + # Check if spec.certificateIssuer.name exists + if hasattr(suite, 'spec') and hasattr(suite.spec, 'certificateIssuer') and hasattr(suite.spec.certificateIssuer, 'name'): + issuerName = suite.spec.certificateIssuer.name + logger.debug(f"Found custom certificate issuer: {issuerName}") + return issuerName + + # Keys don't exist, return default + defaultIssuer = f"mas-{instanceId}-core-public-issuer" + logger.debug(f"No custom certificate issuer found, using default: {defaultIssuer}") + return defaultIssuer + + except NotFoundError: + logger.warning(f"Suite instance '{instanceId}' not found") + return None + except ResourceNotFoundError: + # The MAS Suite CRD has not even been installed in the cluster + logger.warning("MAS Suite CRD not found in the cluster") + return None + except UnauthorizedError as e: + logger.error(f"Error: Unable to retrieve MAS instance due to failed authorization: {e}") + return None diff --git a/test/src/test_backup.py b/test/src/test_backup.py index 01ed0578..dc0f1b55 100644 --- a/test/src/test_backup.py +++ b/test/src/test_backup.py @@ -9,8 +9,10 @@ # ***************************************************************************** import yaml +from unittest.mock import MagicMock, Mock +from openshift.dynamic.exceptions import NotFoundError -from mas.devops.backup import createBackupDirectories, copyContentsToYamlFile, filterResourceData +from mas.devops.backup import createBackupDirectories, copyContentsToYamlFile, filterResourceData, backupResources, extract_secrets_from_dict class TestCreateBackupDirectories: @@ -371,4 +373,530 @@ def test_filter_empty_dict(self): result = filterResourceData(data) assert result == {} + +class TestExtractSecretsFromDict: + """Tests for extract_secrets_from_dict function""" + + def test_extract_single_secret(self): + """Test extracting a single secret name""" + data = { + "spec": { + "secretName": "my-secret" + } + } + result = extract_secrets_from_dict(data) + assert result == {"my-secret"} + + def test_extract_multiple_secrets(self): + """Test extracting multiple secret names""" + data = { + "spec": { + "database": { + "secretName": "db-secret" + }, + "auth": { + "secretName": "auth-secret" + } + } + } + result = extract_secrets_from_dict(data) + assert result == {"db-secret", "auth-secret"} + + def test_extract_secrets_from_list(self): + """Test extracting secrets from list structures""" + data = { + "spec": { + "volumes": [ + {"secretName": "secret1"}, + {"secretName": "secret2"}, + {"configMap": "not-a-secret"} + ] + } + } + result = extract_secrets_from_dict(data) + assert result == {"secret1", "secret2"} + + def test_extract_nested_secrets(self): + """Test extracting deeply nested secrets""" + data = { + "level1": { + "level2": { + "level3": { + "secretName": "deep-secret" + } + } + } + } + result = extract_secrets_from_dict(data) + assert result == {"deep-secret"} + + def test_no_secrets_found(self): + """Test when no secrets are present""" + data = { + "spec": { + "replicas": 3, + "image": "myapp:latest" + } + } + result = extract_secrets_from_dict(data) + assert result == set() + + def test_empty_dict(self): + """Test with empty dictionary""" + result = extract_secrets_from_dict({}) + assert result == set() + + def test_ignore_empty_secret_name(self): + """Test that empty string secret names are ignored""" + data = { + "spec": { + "secretName": "", + "other": { + "secretName": "valid-secret" + } + } + } + result = extract_secrets_from_dict(data) + assert result == {"valid-secret"} + + def test_ignore_non_string_secret_name(self): + """Test that non-string secret names are ignored""" + data = { + "spec": { + "secretName": 123, + "other": { + "secretName": "valid-secret" + } + } + } + result = extract_secrets_from_dict(data) + assert result == {"valid-secret"} + + def test_duplicate_secrets(self): + """Test that duplicate secret names are deduplicated""" + data = { + "spec": { + "volume1": {"secretName": "shared-secret"}, + "volume2": {"secretName": "shared-secret"}, + "volume3": {"secretName": "unique-secret"} + } + } + result = extract_secrets_from_dict(data) + assert result == {"shared-secret", "unique-secret"} + + +class TestBackupResources: + """Tests for backupResources function""" + + def test_backup_single_namespaced_resource(self, tmp_path, mocker): + """Test backing up a single namespaced resource by name""" + backup_path = str(tmp_path / "backup") + + # Mock resource data + mock_resource = { + "metadata": { + "name": "test-resource", + "namespace": "test-ns", + "uid": "abc-123" + }, + "spec": {"replicas": 3} + } + + # Create mock resource object with to_dict method + mock_resource_obj = MagicMock() + mock_resource_obj.__getitem__ = lambda self, key: mock_resource[key] + mock_resource_obj.to_dict.return_value = mock_resource + + # Mock the dynamic client + mock_client = MagicMock() + mock_api = MagicMock() + mock_api.get.return_value = mock_resource_obj + mock_client.resources.get.return_value = mock_api + + # Mock the helper functions + mocker.patch('mas.devops.backup.copyContentsToYamlFile', return_value=True) + + result = backupResources( + mock_client, + kind="ConfigMap", + api_version="v1", + backup_path=backup_path, + namespace="test-ns", + name="test-resource" + ) + + backed_up, not_found, failed, secrets = result + assert backed_up == 1 + assert not_found == 0 + assert failed == 0 + assert secrets == set() + + def test_backup_multiple_namespaced_resources(self, tmp_path, mocker): + """Test backing up all resources of a kind in a namespace""" + backup_path = str(tmp_path / "backup") + + # Mock multiple resources + mock_resources = [ + { + "metadata": {"name": "resource1", "namespace": "test-ns"}, + "spec": {"data": "value1"} + }, + { + "metadata": {"name": "resource2", "namespace": "test-ns"}, + "spec": {"data": "value2"} + } + ] + + # Create mock resource objects + mock_resource_objs = [] + for res in mock_resources: + mock_obj = MagicMock() + mock_obj.__getitem__ = lambda self, key, r=res: r[key] + mock_obj.to_dict.return_value = res + mock_resource_objs.append(mock_obj) + + # Mock the response with items + mock_response = MagicMock() + mock_response.items = mock_resource_objs + + # Mock the dynamic client + mock_client = MagicMock() + mock_api = MagicMock() + mock_api.get.return_value = mock_response + mock_client.resources.get.return_value = mock_api + + mocker.patch('mas.devops.backup.copyContentsToYamlFile', return_value=True) + + result = backupResources( + mock_client, + kind="ConfigMap", + api_version="v1", + backup_path=backup_path, + namespace="test-ns" + ) + + backed_up, not_found, failed, secrets = result + assert backed_up == 2 + assert not_found == 0 + assert failed == 0 + + def test_backup_cluster_level_resource(self, tmp_path, mocker): + """Test backing up cluster-level resources (no namespace)""" + backup_path = str(tmp_path / "backup") + + mock_resource = { + "metadata": {"name": "cluster-role"}, + "rules": [{"apiGroups": ["*"], "resources": ["*"], "verbs": ["*"]}] + } + + mock_resource_obj = MagicMock() + mock_resource_obj.__getitem__ = lambda self, key: mock_resource[key] + mock_resource_obj.to_dict.return_value = mock_resource + + mock_client = MagicMock() + mock_api = MagicMock() + mock_api.get.return_value = mock_resource_obj + mock_client.resources.get.return_value = mock_api + + mocker.patch('mas.devops.backup.copyContentsToYamlFile', return_value=True) + + result = backupResources( + mock_client, + kind="ClusterRole", + api_version="rbac.authorization.k8s.io/v1", + backup_path=backup_path, + name="cluster-role" + ) + + backed_up, not_found, failed, secrets = result + assert backed_up == 1 + assert not_found == 0 + assert failed == 0 + + def test_backup_with_label_selector(self, tmp_path, mocker): + """Test backing up resources with label selectors""" + backup_path = str(tmp_path / "backup") + + mock_resource = { + "metadata": { + "name": "labeled-resource", + "namespace": "test-ns", + "labels": {"app": "myapp", "env": "prod"} + }, + "spec": {} + } + + mock_resource_obj = MagicMock() + mock_resource_obj.__getitem__ = lambda self, key: mock_resource[key] + mock_resource_obj.to_dict.return_value = mock_resource + + mock_response = MagicMock() + mock_response.items = [mock_resource_obj] + + mock_client = MagicMock() + mock_api = MagicMock() + mock_api.get.return_value = mock_response + mock_client.resources.get.return_value = mock_api + + mocker.patch('mas.devops.backup.copyContentsToYamlFile', return_value=True) + + result = backupResources( + mock_client, + kind="ConfigMap", + api_version="v1", + backup_path=backup_path, + namespace="test-ns", + labels=["app=myapp", "env=prod"] + ) + + backed_up, not_found, failed, secrets = result + assert backed_up == 1 + assert not_found == 0 + assert failed == 0 + + # Verify label selector was passed correctly + mock_api.get.assert_called_once_with(namespace="test-ns", label_selector="app=myapp,env=prod") + + def test_backup_resource_not_found_by_name(self, mocker): + """Test handling when a specific named resource is not found""" + mock_client = MagicMock() + mock_api = MagicMock() + mock_api.get.side_effect = NotFoundError(Mock()) + mock_client.resources.get.return_value = mock_api + + result = backupResources( + mock_client, + kind="ConfigMap", + api_version="v1", + backup_path="/tmp/backup", + namespace="test-ns", + name="nonexistent" + ) + + backed_up, not_found, failed, secrets = result + assert backed_up == 0 + assert not_found == 1 + assert failed == 0 + assert secrets == set() + + def test_backup_no_resources_found(self, mocker): + """Test when no resources of the kind exist""" + mock_response = MagicMock() + mock_response.items = [] + + mock_client = MagicMock() + mock_api = MagicMock() + mock_api.get.return_value = mock_response + mock_client.resources.get.return_value = mock_api + + result = backupResources( + mock_client, + kind="ConfigMap", + api_version="v1", + backup_path="/tmp/backup", + namespace="test-ns" + ) + + backed_up, not_found, failed, secrets = result + assert backed_up == 0 + assert not_found == 0 + assert failed == 0 + + def test_backup_discovers_secrets(self, tmp_path, mocker): + """Test that secrets are discovered from resource specs""" + backup_path = str(tmp_path / "backup") + + mock_resource = { + "metadata": {"name": "app-deployment", "namespace": "test-ns"}, + "spec": { + "template": { + "spec": { + "volumes": [ + {"secretName": "db-credentials"}, + {"secretName": "api-key"} + ] + } + } + } + } + + mock_resource_obj = MagicMock() + mock_resource_obj.__getitem__ = lambda self, key: mock_resource[key] + mock_resource_obj.to_dict.return_value = mock_resource + + mock_client = MagicMock() + mock_api = MagicMock() + mock_api.get.return_value = mock_resource_obj + mock_client.resources.get.return_value = mock_api + + mocker.patch('mas.devops.backup.copyContentsToYamlFile', return_value=True) + + result = backupResources( + mock_client, + kind="Deployment", + api_version="apps/v1", + backup_path=backup_path, + namespace="test-ns", + name="app-deployment" + ) + + backed_up, not_found, failed, secrets = result + assert backed_up == 1 + assert secrets == {"db-credentials", "api-key"} + + def test_backup_secret_does_not_discover_itself(self, tmp_path, mocker): + """Test that backing up Secrets doesn't try to discover secrets""" + backup_path = str(tmp_path / "backup") + + mock_resource = { + "metadata": {"name": "my-secret", "namespace": "test-ns"}, + "data": {"password": "encoded-value"} + } + + mock_resource_obj = MagicMock() + mock_resource_obj.__getitem__ = lambda self, key: mock_resource[key] + mock_resource_obj.to_dict.return_value = mock_resource + + mock_client = MagicMock() + mock_api = MagicMock() + mock_api.get.return_value = mock_resource_obj + mock_client.resources.get.return_value = mock_api + + mocker.patch('mas.devops.backup.copyContentsToYamlFile', return_value=True) + + result = backupResources( + mock_client, + kind="Secret", + api_version="v1", + backup_path=backup_path, + namespace="test-ns", + name="my-secret" + ) + + backed_up, not_found, failed, secrets = result + assert backed_up == 1 + assert secrets == set() # Should not discover secrets from Secret resources + + def test_backup_write_failure(self, tmp_path, mocker): + """Test handling when writing backup file fails""" + backup_path = str(tmp_path / "backup") + + mock_resource = { + "metadata": {"name": "test-resource", "namespace": "test-ns"}, + "spec": {} + } + + mock_resource_obj = MagicMock() + mock_resource_obj.__getitem__ = lambda self, key: mock_resource[key] + mock_resource_obj.to_dict.return_value = mock_resource + + mock_client = MagicMock() + mock_api = MagicMock() + mock_api.get.return_value = mock_resource_obj + mock_client.resources.get.return_value = mock_api + + # Mock copyContentsToYamlFile to fail + mocker.patch('mas.devops.backup.copyContentsToYamlFile', return_value=False) + + result = backupResources( + mock_client, + kind="ConfigMap", + api_version="v1", + backup_path=backup_path, + namespace="test-ns", + name="test-resource" + ) + + backed_up, not_found, failed, secrets = result + assert backed_up == 0 + assert not_found == 0 + assert failed == 1 + + def test_backup_api_exception(self, mocker): + """Test handling of general API exceptions""" + mock_client = MagicMock() + mock_client.resources.get.side_effect = Exception("API error") + + result = backupResources( + mock_client, + kind="ConfigMap", + api_version="v1", + backup_path="/tmp/backup", + namespace="test-ns" + ) + + backed_up, not_found, failed, secrets = result + assert backed_up == 0 + assert not_found == 0 + assert failed == 1 + + def test_backup_mixed_success_and_failure(self, tmp_path, mocker): + """Test backing up multiple resources with mixed success/failure""" + backup_path = str(tmp_path / "backup") + + mock_resources = [ + { + "metadata": {"name": "resource1", "namespace": "test-ns"}, + "spec": {} + }, + { + "metadata": {"name": "resource2", "namespace": "test-ns"}, + "spec": {} + }, + { + "metadata": {"name": "resource3", "namespace": "test-ns"}, + "spec": {} + } + ] + + mock_resource_objs = [] + for res in mock_resources: + mock_obj = MagicMock() + mock_obj.__getitem__ = lambda self, key, r=res: r[key] + mock_obj.to_dict.return_value = res + mock_resource_objs.append(mock_obj) + + mock_response = MagicMock() + mock_response.items = mock_resource_objs + + mock_client = MagicMock() + mock_api = MagicMock() + mock_api.get.return_value = mock_response + mock_client.resources.get.return_value = mock_api + + # Mock copyContentsToYamlFile to succeed for first two, fail for third + mock_copy = mocker.patch('mas.devops.backup.copyContentsToYamlFile') + mock_copy.side_effect = [True, True, False] + + result = backupResources( + mock_client, + kind="ConfigMap", + api_version="v1", + backup_path=backup_path, + namespace="test-ns" + ) + + backed_up, not_found, failed, secrets = result + assert backed_up == 2 + assert not_found == 0 + assert failed == 1 + + def test_backup_resource_kind_not_found(self, mocker): + """Test when the resource kind itself is not found in the API""" + mock_client = MagicMock() + mock_client.resources.get.side_effect = NotFoundError(Mock()) + + result = backupResources( + mock_client, + kind="NonExistentKind", + api_version="v1", + backup_path="/tmp/backup", + namespace="test-ns" + ) + + backed_up, not_found, failed, secrets = result + assert backed_up == 0 + assert not_found == 0 + assert failed == 0 + # Made with Bob diff --git a/test/src/test_mas.py b/test/src/test_mas.py index 10583926..3902a1e7 100644 --- a/test/src/test_mas.py +++ b/test/src/test_mas.py @@ -64,6 +64,12 @@ def test_is_airgap_install(): assert mas.isAirgapInstall(dynClient, checkICSP=False) is False +def test_get_mas_public_cluster_issuer(): + # Test with non-existent instance - should return None + issuer = mas.getMasPublicClusterIssuer(dynClient, "doesnotexist") + assert issuer is None + + # def test_is_app_ready(): # mas.waitForAppReady(dynClient, "fvtcpd", "iot") # mas.waitForAppReady(dynClient, "fvtcpd", "iot", "masdev") From e6be8d7cbf5bdfb304d578a642f760adf020978f Mon Sep 17 00:00:00 2001 From: whitfiea Date: Wed, 14 Jan 2026 11:36:39 +0000 Subject: [PATCH 08/41] pre-commit chnages --- .secrets.baseline | 12 +++- src/mas/devops/backup.py | 6 +- src/mas/devops/mas/suite.py | 2 +- test/src/test_backup.py | 122 ++++++++++++++++++------------------ 4 files changed, 76 insertions(+), 66 deletions(-) diff --git a/.secrets.baseline b/.secrets.baseline index 9324636a..e0564e09 100644 --- a/.secrets.baseline +++ b/.secrets.baseline @@ -3,7 +3,7 @@ "files": "^.secrets.baseline$", "lines": null }, - "generated_at": "2025-12-25T19:13:06Z", + "generated_at": "2026-01-14T11:35:47Z", "plugins_used": [ { "name": "AWSKeyDetector" @@ -143,6 +143,16 @@ "verified_result": null } ], + "test/src/test_backup.py": [ + { + "hashed_secret": "4dfd3a58b4820476afe7efa2e2c52b267eec876a", + "is_secret": false, + "is_verified": false, + "line_number": 753, + "type": "Secret Keyword", + "verified_result": null + } + ], "test/src/test_db2.py": [ { "hashed_secret": "a4b48a81cdab1e1a5dd37907d6c85ca1c61ddc7c", diff --git a/src/mas/devops/backup.py b/src/mas/devops/backup.py index ebc1022c..4b62f74c 100644 --- a/src/mas/devops/backup.py +++ b/src/mas/devops/backup.py @@ -156,7 +156,7 @@ def backupResources(dynClient: DynamicClient, kind: str, api_version: str, backu resource = resourceAPI.get(name=name, namespace=namespace) else: resource = resourceAPI.get(name=name) - + if resource: resources_to_process = [resource] else: @@ -170,14 +170,14 @@ def backupResources(dynClient: DynamicClient, kind: str, api_version: str, backu else: # Backup all resources of this kind logger.info(f"Backing up all {kind} resources from {scope_desc} (API version: {api_version}){label_desc}") - + # Build get parameters get_params = {} if namespace: get_params['namespace'] = namespace if label_selector: get_params['label_selector'] = label_selector - + resources = resourceAPI.get(**get_params) resources_to_process = resources.items diff --git a/src/mas/devops/mas/suite.py b/src/mas/devops/mas/suite.py index 0cc46612..cff2580b 100644 --- a/src/mas/devops/mas/suite.py +++ b/src/mas/devops/mas/suite.py @@ -341,7 +341,7 @@ def getMasPublicClusterIssuer(dynClient: DynamicClient, instanceId: str) -> str issuerName = suite.spec.certificateIssuer.name logger.debug(f"Found custom certificate issuer: {issuerName}") return issuerName - + # Keys don't exist, return default defaultIssuer = f"mas-{instanceId}-core-public-issuer" logger.debug(f"No custom certificate issuer found, using default: {defaultIssuer}") diff --git a/test/src/test_backup.py b/test/src/test_backup.py index dc0f1b55..af52d382 100644 --- a/test/src/test_backup.py +++ b/test/src/test_backup.py @@ -491,7 +491,7 @@ class TestBackupResources: def test_backup_single_namespaced_resource(self, tmp_path, mocker): """Test backing up a single namespaced resource by name""" backup_path = str(tmp_path / "backup") - + # Mock resource data mock_resource = { "metadata": { @@ -501,21 +501,21 @@ def test_backup_single_namespaced_resource(self, tmp_path, mocker): }, "spec": {"replicas": 3} } - + # Create mock resource object with to_dict method mock_resource_obj = MagicMock() mock_resource_obj.__getitem__ = lambda self, key: mock_resource[key] mock_resource_obj.to_dict.return_value = mock_resource - + # Mock the dynamic client mock_client = MagicMock() mock_api = MagicMock() mock_api.get.return_value = mock_resource_obj mock_client.resources.get.return_value = mock_api - + # Mock the helper functions mocker.patch('mas.devops.backup.copyContentsToYamlFile', return_value=True) - + result = backupResources( mock_client, kind="ConfigMap", @@ -524,7 +524,7 @@ def test_backup_single_namespaced_resource(self, tmp_path, mocker): namespace="test-ns", name="test-resource" ) - + backed_up, not_found, failed, secrets = result assert backed_up == 1 assert not_found == 0 @@ -534,7 +534,7 @@ def test_backup_single_namespaced_resource(self, tmp_path, mocker): def test_backup_multiple_namespaced_resources(self, tmp_path, mocker): """Test backing up all resources of a kind in a namespace""" backup_path = str(tmp_path / "backup") - + # Mock multiple resources mock_resources = [ { @@ -546,7 +546,7 @@ def test_backup_multiple_namespaced_resources(self, tmp_path, mocker): "spec": {"data": "value2"} } ] - + # Create mock resource objects mock_resource_objs = [] for res in mock_resources: @@ -554,19 +554,19 @@ def test_backup_multiple_namespaced_resources(self, tmp_path, mocker): mock_obj.__getitem__ = lambda self, key, r=res: r[key] mock_obj.to_dict.return_value = res mock_resource_objs.append(mock_obj) - + # Mock the response with items mock_response = MagicMock() mock_response.items = mock_resource_objs - + # Mock the dynamic client mock_client = MagicMock() mock_api = MagicMock() mock_api.get.return_value = mock_response mock_client.resources.get.return_value = mock_api - + mocker.patch('mas.devops.backup.copyContentsToYamlFile', return_value=True) - + result = backupResources( mock_client, kind="ConfigMap", @@ -574,7 +574,7 @@ def test_backup_multiple_namespaced_resources(self, tmp_path, mocker): backup_path=backup_path, namespace="test-ns" ) - + backed_up, not_found, failed, secrets = result assert backed_up == 2 assert not_found == 0 @@ -583,23 +583,23 @@ def test_backup_multiple_namespaced_resources(self, tmp_path, mocker): def test_backup_cluster_level_resource(self, tmp_path, mocker): """Test backing up cluster-level resources (no namespace)""" backup_path = str(tmp_path / "backup") - + mock_resource = { "metadata": {"name": "cluster-role"}, "rules": [{"apiGroups": ["*"], "resources": ["*"], "verbs": ["*"]}] } - + mock_resource_obj = MagicMock() mock_resource_obj.__getitem__ = lambda self, key: mock_resource[key] mock_resource_obj.to_dict.return_value = mock_resource - + mock_client = MagicMock() mock_api = MagicMock() mock_api.get.return_value = mock_resource_obj mock_client.resources.get.return_value = mock_api - + mocker.patch('mas.devops.backup.copyContentsToYamlFile', return_value=True) - + result = backupResources( mock_client, kind="ClusterRole", @@ -607,7 +607,7 @@ def test_backup_cluster_level_resource(self, tmp_path, mocker): backup_path=backup_path, name="cluster-role" ) - + backed_up, not_found, failed, secrets = result assert backed_up == 1 assert not_found == 0 @@ -616,7 +616,7 @@ def test_backup_cluster_level_resource(self, tmp_path, mocker): def test_backup_with_label_selector(self, tmp_path, mocker): """Test backing up resources with label selectors""" backup_path = str(tmp_path / "backup") - + mock_resource = { "metadata": { "name": "labeled-resource", @@ -625,21 +625,21 @@ def test_backup_with_label_selector(self, tmp_path, mocker): }, "spec": {} } - + mock_resource_obj = MagicMock() mock_resource_obj.__getitem__ = lambda self, key: mock_resource[key] mock_resource_obj.to_dict.return_value = mock_resource - + mock_response = MagicMock() mock_response.items = [mock_resource_obj] - + mock_client = MagicMock() mock_api = MagicMock() mock_api.get.return_value = mock_response mock_client.resources.get.return_value = mock_api - + mocker.patch('mas.devops.backup.copyContentsToYamlFile', return_value=True) - + result = backupResources( mock_client, kind="ConfigMap", @@ -648,12 +648,12 @@ def test_backup_with_label_selector(self, tmp_path, mocker): namespace="test-ns", labels=["app=myapp", "env=prod"] ) - + backed_up, not_found, failed, secrets = result assert backed_up == 1 assert not_found == 0 assert failed == 0 - + # Verify label selector was passed correctly mock_api.get.assert_called_once_with(namespace="test-ns", label_selector="app=myapp,env=prod") @@ -663,7 +663,7 @@ def test_backup_resource_not_found_by_name(self, mocker): mock_api = MagicMock() mock_api.get.side_effect = NotFoundError(Mock()) mock_client.resources.get.return_value = mock_api - + result = backupResources( mock_client, kind="ConfigMap", @@ -672,7 +672,7 @@ def test_backup_resource_not_found_by_name(self, mocker): namespace="test-ns", name="nonexistent" ) - + backed_up, not_found, failed, secrets = result assert backed_up == 0 assert not_found == 1 @@ -683,12 +683,12 @@ def test_backup_no_resources_found(self, mocker): """Test when no resources of the kind exist""" mock_response = MagicMock() mock_response.items = [] - + mock_client = MagicMock() mock_api = MagicMock() mock_api.get.return_value = mock_response mock_client.resources.get.return_value = mock_api - + result = backupResources( mock_client, kind="ConfigMap", @@ -696,7 +696,7 @@ def test_backup_no_resources_found(self, mocker): backup_path="/tmp/backup", namespace="test-ns" ) - + backed_up, not_found, failed, secrets = result assert backed_up == 0 assert not_found == 0 @@ -705,7 +705,7 @@ def test_backup_no_resources_found(self, mocker): def test_backup_discovers_secrets(self, tmp_path, mocker): """Test that secrets are discovered from resource specs""" backup_path = str(tmp_path / "backup") - + mock_resource = { "metadata": {"name": "app-deployment", "namespace": "test-ns"}, "spec": { @@ -719,18 +719,18 @@ def test_backup_discovers_secrets(self, tmp_path, mocker): } } } - + mock_resource_obj = MagicMock() mock_resource_obj.__getitem__ = lambda self, key: mock_resource[key] mock_resource_obj.to_dict.return_value = mock_resource - + mock_client = MagicMock() mock_api = MagicMock() mock_api.get.return_value = mock_resource_obj mock_client.resources.get.return_value = mock_api - + mocker.patch('mas.devops.backup.copyContentsToYamlFile', return_value=True) - + result = backupResources( mock_client, kind="Deployment", @@ -739,7 +739,7 @@ def test_backup_discovers_secrets(self, tmp_path, mocker): namespace="test-ns", name="app-deployment" ) - + backed_up, not_found, failed, secrets = result assert backed_up == 1 assert secrets == {"db-credentials", "api-key"} @@ -747,23 +747,23 @@ def test_backup_discovers_secrets(self, tmp_path, mocker): def test_backup_secret_does_not_discover_itself(self, tmp_path, mocker): """Test that backing up Secrets doesn't try to discover secrets""" backup_path = str(tmp_path / "backup") - + mock_resource = { "metadata": {"name": "my-secret", "namespace": "test-ns"}, "data": {"password": "encoded-value"} } - + mock_resource_obj = MagicMock() mock_resource_obj.__getitem__ = lambda self, key: mock_resource[key] mock_resource_obj.to_dict.return_value = mock_resource - + mock_client = MagicMock() mock_api = MagicMock() mock_api.get.return_value = mock_resource_obj mock_client.resources.get.return_value = mock_api - + mocker.patch('mas.devops.backup.copyContentsToYamlFile', return_value=True) - + result = backupResources( mock_client, kind="Secret", @@ -772,7 +772,7 @@ def test_backup_secret_does_not_discover_itself(self, tmp_path, mocker): namespace="test-ns", name="my-secret" ) - + backed_up, not_found, failed, secrets = result assert backed_up == 1 assert secrets == set() # Should not discover secrets from Secret resources @@ -780,24 +780,24 @@ def test_backup_secret_does_not_discover_itself(self, tmp_path, mocker): def test_backup_write_failure(self, tmp_path, mocker): """Test handling when writing backup file fails""" backup_path = str(tmp_path / "backup") - + mock_resource = { "metadata": {"name": "test-resource", "namespace": "test-ns"}, "spec": {} } - + mock_resource_obj = MagicMock() mock_resource_obj.__getitem__ = lambda self, key: mock_resource[key] mock_resource_obj.to_dict.return_value = mock_resource - + mock_client = MagicMock() mock_api = MagicMock() mock_api.get.return_value = mock_resource_obj mock_client.resources.get.return_value = mock_api - + # Mock copyContentsToYamlFile to fail mocker.patch('mas.devops.backup.copyContentsToYamlFile', return_value=False) - + result = backupResources( mock_client, kind="ConfigMap", @@ -806,7 +806,7 @@ def test_backup_write_failure(self, tmp_path, mocker): namespace="test-ns", name="test-resource" ) - + backed_up, not_found, failed, secrets = result assert backed_up == 0 assert not_found == 0 @@ -816,7 +816,7 @@ def test_backup_api_exception(self, mocker): """Test handling of general API exceptions""" mock_client = MagicMock() mock_client.resources.get.side_effect = Exception("API error") - + result = backupResources( mock_client, kind="ConfigMap", @@ -824,7 +824,7 @@ def test_backup_api_exception(self, mocker): backup_path="/tmp/backup", namespace="test-ns" ) - + backed_up, not_found, failed, secrets = result assert backed_up == 0 assert not_found == 0 @@ -833,7 +833,7 @@ def test_backup_api_exception(self, mocker): def test_backup_mixed_success_and_failure(self, tmp_path, mocker): """Test backing up multiple resources with mixed success/failure""" backup_path = str(tmp_path / "backup") - + mock_resources = [ { "metadata": {"name": "resource1", "namespace": "test-ns"}, @@ -848,26 +848,26 @@ def test_backup_mixed_success_and_failure(self, tmp_path, mocker): "spec": {} } ] - + mock_resource_objs = [] for res in mock_resources: mock_obj = MagicMock() mock_obj.__getitem__ = lambda self, key, r=res: r[key] mock_obj.to_dict.return_value = res mock_resource_objs.append(mock_obj) - + mock_response = MagicMock() mock_response.items = mock_resource_objs - + mock_client = MagicMock() mock_api = MagicMock() mock_api.get.return_value = mock_response mock_client.resources.get.return_value = mock_api - + # Mock copyContentsToYamlFile to succeed for first two, fail for third mock_copy = mocker.patch('mas.devops.backup.copyContentsToYamlFile') mock_copy.side_effect = [True, True, False] - + result = backupResources( mock_client, kind="ConfigMap", @@ -875,7 +875,7 @@ def test_backup_mixed_success_and_failure(self, tmp_path, mocker): backup_path=backup_path, namespace="test-ns" ) - + backed_up, not_found, failed, secrets = result assert backed_up == 2 assert not_found == 0 @@ -885,7 +885,7 @@ def test_backup_resource_kind_not_found(self, mocker): """Test when the resource kind itself is not found in the API""" mock_client = MagicMock() mock_client.resources.get.side_effect = NotFoundError(Mock()) - + result = backupResources( mock_client, kind="NonExistentKind", @@ -893,7 +893,7 @@ def test_backup_resource_kind_not_found(self, mocker): backup_path="/tmp/backup", namespace="test-ns" ) - + backed_up, not_found, failed, secrets = result assert backed_up == 0 assert not_found == 0 From 436129649ff134406203cbf5e1ea5571e78b27b4 Mon Sep 17 00:00:00 2001 From: Sanjay Prabhakar Date: Wed, 14 Jan 2026 14:44:14 +0000 Subject: [PATCH 09/41] [patch] added ownerReferences to the list to filter --- src/mas/devops/backup.py | 1 + 1 file changed, 1 insertion(+) diff --git a/src/mas/devops/backup.py b/src/mas/devops/backup.py index 4b62f74c..156e6f3b 100644 --- a/src/mas/devops/backup.py +++ b/src/mas/devops/backup.py @@ -64,6 +64,7 @@ def filterResourceData(data: dict) -> dict: 'generation', 'resourceVersion', 'selfLink', + 'ownerReferences' 'uid', 'managedFields' ] From 78f4062a49266fb419512afe51d9a01b89f161ba Mon Sep 17 00:00:00 2001 From: Sanjay Prabhakar Date: Wed, 14 Jan 2026 21:06:53 +0000 Subject: [PATCH 10/41] [patch] updated secret extraction function --- src/mas/devops/backup.py | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/src/mas/devops/backup.py b/src/mas/devops/backup.py index 156e6f3b..0c0b720b 100644 --- a/src/mas/devops/backup.py +++ b/src/mas/devops/backup.py @@ -83,7 +83,7 @@ def filterResourceData(data: dict) -> dict: def extract_secrets_from_dict(data, secret_names=None): """ Recursively extract secret names from a dictionary structure. - Looks for keys named 'secretName' and collects their values. + Looks for keys like 'secretName' and 'secretRef.name' and collects their values. Args: data: Dictionary to search @@ -98,8 +98,12 @@ def extract_secrets_from_dict(data, secret_names=None): if isinstance(data, dict): for key, value in data.items(): # Check if this key is 'secretName' and has a string value - if key == 'secretName' and isinstance(value, str) and value: + if (key == 'secretName' or 'secretname' in key.lower()) and isinstance(value, str) and value: secret_names.add(value) + # Check if this key contains 'secretRef' and contains a 'name' field + elif 'SecretRef' in key and isinstance(value, dict): + if 'name' in value and isinstance(value['name'], str) and value['name']: + secret_names.add(value['name']) # Recursively search nested structures elif isinstance(value, (dict, list)): extract_secrets_from_dict(value, secret_names) From d37558c165c8aaf349bd37c6b46372ca777ef92c Mon Sep 17 00:00:00 2001 From: whitfiea Date: Mon, 19 Jan 2026 14:54:44 +0000 Subject: [PATCH 11/41] Add restore function --- src/mas/devops/backup.py | 4 +- src/mas/devops/restore.py | 122 ++++++++++++ test/src/test_restore.py | 397 ++++++++++++++++++++++++++++++++++++++ 3 files changed, 522 insertions(+), 1 deletion(-) create mode 100644 src/mas/devops/restore.py create mode 100644 test/src/test_restore.py diff --git a/src/mas/devops/backup.py b/src/mas/devops/backup.py index 0c0b720b..e134a77f 100644 --- a/src/mas/devops/backup.py +++ b/src/mas/devops/backup.py @@ -64,7 +64,7 @@ def filterResourceData(data: dict) -> dict: 'generation', 'resourceVersion', 'selfLink', - 'ownerReferences' + 'ownerReferences', 'uid', 'managedFields' ] @@ -228,3 +228,5 @@ def backupResources(dynClient: DynamicClient, kind: str, api_version: str, backu logger.error(f"Error backing up {kind} resources: {e}") failed_count = 1 return (backed_up_count, not_found_count, failed_count, discovered_secrets) + + diff --git a/src/mas/devops/restore.py b/src/mas/devops/restore.py new file mode 100644 index 00000000..52bfbac7 --- /dev/null +++ b/src/mas/devops/restore.py @@ -0,0 +1,122 @@ +# ***************************************************************************** +# Copyright (c) 2026 IBM Corporation and other Contributors. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Eclipse Public License v1.0 +# which accompanies this distribution, and is available at +# http://www.eclipse.org/legal/epl-v10.html +# +# ***************************************************************************** +import logging +import yaml +from openshift.dynamic import DynamicClient +from openshift.dynamic.exceptions import NotFoundError + +logger = logging.getLogger(name=__name__) + + +def loadYamlFile(file_path: str): + """ + Load YAML content from a file + + Args: + file_path: Path to the YAML file + + Returns: + dict: Parsed YAML content or None if error + """ + try: + with open(file_path, 'r') as yaml_file: + content = yaml.safe_load(yaml_file) + return content + except Exception as e: + logger.error(f"Error reading YAML file {file_path}: {e}") + return None + + +def restoreResource(dynClient: DynamicClient, resource_data: dict, namespace=None, replace_resource=True) -> tuple: + """ + Restore a single Kubernetes resource from its YAML representation. + If the resource exists and replace_resource is True, it will be updated (replaced). + If the resource exists and replace_resource is False, it will be skipped. + If the resource doesn't exist, it will be created. + + Args: + dynClient: Kubernetes dynamic client + resource_data: Dictionary containing the resource definition + namespace: Optional namespace override (uses resource's namespace if not provided) + replace_resource: If True, replace existing resources; if False, skip them (default: True) + + Returns: + tuple: (success: bool, resource_name: str, status_message: str or None) + - success: True if created, updated, or skipped; False if failed + - resource_name: Name of the resource + - status_message: None if created, "updated" if replaced, "skipped" if exists and not replaced, error message if failed + """ + try: + # Extract resource metadata + kind = resource_data.get('kind') + api_version = resource_data.get('apiVersion') + metadata = resource_data.get('metadata', {}) + resource_name = metadata.get('name') + resource_namespace = namespace or metadata.get('namespace') + + if not kind or not api_version or not resource_name: + error_msg = "Resource missing required fields (kind, apiVersion, or name)" + logger.error(error_msg) + return (False, resource_name or 'unknown', error_msg) + + # Get the resource API + resourceAPI = dynClient.resources.get(api_version=api_version, kind=kind) + + # Determine scope description for logging + scope_desc = f"namespace '{resource_namespace}'" if resource_namespace else "cluster-level" + + # Check if resource already exists + resource_exists = False + existing_resource = None + try: + if resource_namespace: + existing_resource = resourceAPI.get(name=resource_name, namespace=resource_namespace) + else: + existing_resource = resourceAPI.get(name=resource_name) + resource_exists = existing_resource is not None + except NotFoundError: + resource_exists = False + + # Apply the resource (create, update, or skip) + try: + if resource_exists: + if replace_resource: + # Resource exists - update it using strategic merge patch + logger.info(f"Patching existing {kind} '{resource_name}' in {scope_desc}") + + if resource_namespace: + resourceAPI.patch(body=resource_data, name=resource_name, namespace=resource_namespace, content_type='application/merge-patch+json') + else: + resourceAPI.patch(body=resource_data, name=resource_name, content_type='application/merge-patch+json') + logger.info(f"Successfully patched {kind} '{resource_name}' in {scope_desc}") + return (True, resource_name, "updated") + else: + # Resource exists but replace_resource is False - skip it + logger.info(f"{kind} '{resource_name}' already exists in {scope_desc}, skipping (replace_resource=False)") + return (True, resource_name, "skipped") + else: + # Resource doesn't exist - create it + logger.info(f"Creating {kind} '{resource_name}' in {scope_desc}") + if resource_namespace: + resourceAPI.create(body=resource_data, namespace=resource_namespace) + else: + resourceAPI.create(body=resource_data) + logger.info(f"Successfully created {kind} '{resource_name}' in {scope_desc}") + return (True, resource_name, None) + except Exception as e: + action = "update" if resource_exists else "create" + error_msg = f"Failed to {action} {kind} '{resource_name}': {e}" + logger.error(error_msg) + return (False, resource_name, error_msg) + + except Exception as e: + error_msg = f"Error restoring resource: {e}" + logger.error(error_msg) + return (False, resource_data.get('metadata', {}).get('name', 'unknown'), error_msg) \ No newline at end of file diff --git a/test/src/test_restore.py b/test/src/test_restore.py new file mode 100644 index 00000000..25e43a06 --- /dev/null +++ b/test/src/test_restore.py @@ -0,0 +1,397 @@ +# ***************************************************************************** +# Copyright (c) 2026 IBM Corporation and other Contributors. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Eclipse Public License v1.0 +# which accompanies this distribution, and is available at +# http://www.eclipse.org/legal/epl-v10.html +# +# ***************************************************************************** + +import yaml +from unittest.mock import MagicMock, Mock +from openshift.dynamic.exceptions import NotFoundError + +from mas.devops.restore import loadYamlFile, restoreResource + + +class TestLoadYamlFile: + """Tests for loadYamlFile function""" + + def test_load_valid_yaml_file(self, tmp_path): + """Test loading a valid YAML file""" + yaml_content = { + 'apiVersion': 'v1', + 'kind': 'ConfigMap', + 'metadata': { + 'name': 'test-config', + 'namespace': 'test-ns' + } + } + + yaml_file = tmp_path / "test.yaml" + with open(yaml_file, 'w') as f: + yaml.dump(yaml_content, f) + + result = loadYamlFile(str(yaml_file)) + + assert result is not None + assert result['kind'] == 'ConfigMap' + assert result['metadata']['name'] == 'test-config' + + def test_load_empty_yaml_file(self, tmp_path): + """Test loading an empty YAML file""" + yaml_file = tmp_path / "empty.yaml" + yaml_file.write_text("") + + result = loadYamlFile(str(yaml_file)) + + assert result is None + + def test_load_nonexistent_file(self): + """Test loading a non-existent file""" + result = loadYamlFile("/nonexistent/path/file.yaml") + + assert result is None + + def test_load_invalid_yaml_file(self, tmp_path): + """Test loading an invalid YAML file""" + yaml_file = tmp_path / "invalid.yaml" + yaml_file.write_text("invalid: yaml: content: [") + + result = loadYamlFile(str(yaml_file)) + + assert result is None + + def test_load_yaml_with_multiple_documents(self, tmp_path): + """Test loading YAML file with multiple documents returns None (not supported)""" + yaml_file = tmp_path / "multi.yaml" + yaml_file.write_text("---\nkey1: value1\n---\nkey2: value2") + + result = loadYamlFile(str(yaml_file)) + + # yaml.safe_load() doesn't support multiple documents, so it should return None + assert result is None + + +class TestRestoreResource: + """Tests for restoreResource function""" + + def setup_method(self): + """Set up test fixtures""" + self.mock_client = MagicMock() + self.mock_resource_api = MagicMock() + self.mock_client.resources.get.return_value = self.mock_resource_api + + def test_create_new_namespaced_resource(self): + """Test creating a new namespaced resource""" + resource_data = { + 'apiVersion': 'v1', + 'kind': 'ConfigMap', + 'metadata': { + 'name': 'test-config', + 'namespace': 'test-ns' + }, + 'data': { + 'key': 'value' + } + } + + # Resource doesn't exist + self.mock_resource_api.get.side_effect = NotFoundError(Mock()) + + success, name, status = restoreResource(self.mock_client, resource_data) + + assert success is True + assert name == 'test-config' + assert status is None + self.mock_resource_api.create.assert_called_once_with( + body=resource_data, + namespace='test-ns' + ) + + def test_create_new_cluster_resource(self): + """Test creating a new cluster-scoped resource""" + resource_data = { + 'apiVersion': 'v1', + 'kind': 'Namespace', + 'metadata': { + 'name': 'test-namespace' + } + } + + # Resource doesn't exist + self.mock_resource_api.get.side_effect = NotFoundError(Mock()) + + success, name, status = restoreResource(self.mock_client, resource_data) + + assert success is True + assert name == 'test-namespace' + assert status is None + self.mock_resource_api.create.assert_called_once_with( + body=resource_data + ) + + def test_update_existing_resource_with_replace_true(self): + """Test updating an existing resource when replace_resource is True""" + resource_data = { + 'apiVersion': 'v1', + 'kind': 'ConfigMap', + 'metadata': { + 'name': 'test-config', + 'namespace': 'test-ns' + }, + 'data': { + 'key': 'new-value' + } + } + + # Resource exists + existing_resource = { + 'metadata': { + 'name': 'test-config', + 'resourceVersion': '12345' + } + } + self.mock_resource_api.get.return_value = existing_resource + + success, name, status = restoreResource(self.mock_client, resource_data, replace_resource=True) + + assert success is True + assert name == 'test-config' + assert status == 'updated' + self.mock_resource_api.patch.assert_called_once_with( + body=resource_data, + name='test-config', + namespace='test-ns', + content_type='application/merge-patch+json' + ) + + def test_skip_existing_resource_with_replace_false(self): + """Test skipping an existing resource when replace_resource is False""" + resource_data = { + 'apiVersion': 'v1', + 'kind': 'ConfigMap', + 'metadata': { + 'name': 'test-config', + 'namespace': 'test-ns' + } + } + + # Resource exists + existing_resource = {'metadata': {'name': 'test-config'}} + self.mock_resource_api.get.return_value = existing_resource + + success, name, status = restoreResource(self.mock_client, resource_data, replace_resource=False) + + assert success is True + assert name == 'test-config' + assert status == 'skipped' + self.mock_resource_api.patch.assert_not_called() + self.mock_resource_api.create.assert_not_called() + + def test_namespace_override(self): + """Test that namespace parameter overrides resource namespace""" + resource_data = { + 'apiVersion': 'v1', + 'kind': 'ConfigMap', + 'metadata': { + 'name': 'test-config', + 'namespace': 'original-ns' + } + } + + # Resource doesn't exist + self.mock_resource_api.get.side_effect = NotFoundError(Mock()) + + success, name, status = restoreResource( + self.mock_client, + resource_data, + namespace='override-ns' + ) + + assert success is True + self.mock_resource_api.create.assert_called_once_with( + body=resource_data, + namespace='override-ns' + ) + + def test_missing_kind_field(self): + """Test handling resource missing kind field""" + resource_data = { + 'apiVersion': 'v1', + 'metadata': { + 'name': 'test-resource' + } + } + + success, name, status = restoreResource(self.mock_client, resource_data) + + assert success is False + assert name == 'test-resource' + assert 'missing required fields' in status.lower() + + def test_missing_api_version_field(self): + """Test handling resource missing apiVersion field""" + resource_data = { + 'kind': 'ConfigMap', + 'metadata': { + 'name': 'test-resource' + } + } + + success, name, status = restoreResource(self.mock_client, resource_data) + + assert success is False + assert name == 'test-resource' + assert 'missing required fields' in status.lower() + + def test_missing_name_field(self): + """Test handling resource missing name field""" + resource_data = { + 'apiVersion': 'v1', + 'kind': 'ConfigMap', + 'metadata': {} + } + + success, name, status = restoreResource(self.mock_client, resource_data) + + assert success is False + assert name == 'unknown' + assert 'missing required fields' in status.lower() + + def test_create_failure(self): + """Test handling create operation failure""" + resource_data = { + 'apiVersion': 'v1', + 'kind': 'ConfigMap', + 'metadata': { + 'name': 'test-config', + 'namespace': 'test-ns' + } + } + + # Resource doesn't exist + self.mock_resource_api.get.side_effect = NotFoundError(Mock()) + # Create fails + self.mock_resource_api.create.side_effect = Exception("Create failed") + + success, name, status = restoreResource(self.mock_client, resource_data) + + assert success is False + assert name == 'test-config' + assert 'Failed to create' in status + assert 'Create failed' in status + + def test_patch_failure(self): + """Test handling patch operation failure""" + resource_data = { + 'apiVersion': 'v1', + 'kind': 'ConfigMap', + 'metadata': { + 'name': 'test-config', + 'namespace': 'test-ns' + } + } + + # Resource exists + existing_resource = {'metadata': {'name': 'test-config'}} + self.mock_resource_api.get.return_value = existing_resource + # Patch fails + self.mock_resource_api.patch.side_effect = Exception("Patch failed") + + success, name, status = restoreResource(self.mock_client, resource_data, replace_resource=True) + + assert success is False + assert name == 'test-config' + assert 'Failed to update' in status + assert 'Patch failed' in status + + def test_resource_api_get_failure(self): + """Test handling failure to get resource API""" + resource_data = { + 'apiVersion': 'v1', + 'kind': 'ConfigMap', + 'metadata': { + 'name': 'test-config' + } + } + + # Getting resource API fails + self.mock_client.resources.get.side_effect = Exception("API not found") + + success, name, status = restoreResource(self.mock_client, resource_data) + + assert success is False + assert name == 'test-config' + assert 'Error restoring resource' in status + + def test_update_cluster_scoped_resource(self): + """Test updating a cluster-scoped resource""" + resource_data = { + 'apiVersion': 'v1', + 'kind': 'Namespace', + 'metadata': { + 'name': 'test-namespace' + } + } + + # Resource exists + existing_resource = {'metadata': {'name': 'test-namespace'}} + self.mock_resource_api.get.return_value = existing_resource + + success, name, status = restoreResource(self.mock_client, resource_data, replace_resource=True) + + assert success is True + assert name == 'test-namespace' + assert status == 'updated' + self.mock_resource_api.patch.assert_called_once_with( + body=resource_data, + name='test-namespace', + content_type='application/merge-patch+json' + ) + + def test_malformed_resource_data(self): + """Test handling malformed resource data""" + resource_data = { + 'apiVersion': 'v1', + 'kind': 'ConfigMap' + # Missing metadata entirely + } + + success, name, status = restoreResource(self.mock_client, resource_data) + + assert success is False + assert name == 'unknown' + assert 'missing required fields' in status.lower() + + def test_resource_with_complex_metadata(self): + """Test resource with complex metadata structure""" + resource_data = { + 'apiVersion': 'apps/v1', + 'kind': 'Deployment', + 'metadata': { + 'name': 'test-deployment', + 'namespace': 'test-ns', + 'labels': { + 'app': 'test', + 'version': 'v1' + }, + 'annotations': { + 'description': 'Test deployment' + } + }, + 'spec': { + 'replicas': 3 + } + } + + # Resource doesn't exist + self.mock_resource_api.get.side_effect = NotFoundError(Mock()) + + success, name, status = restoreResource(self.mock_client, resource_data) + + assert success is True + assert name == 'test-deployment' + assert status is None + self.mock_resource_api.create.assert_called_once() \ No newline at end of file From 1da6c996a166f2d26db6726f4619bec2040546c1 Mon Sep 17 00:00:00 2001 From: whitfiea Date: Mon, 19 Jan 2026 14:56:02 +0000 Subject: [PATCH 12/41] formatting --- src/mas/devops/backup.py | 2 - src/mas/devops/restore.py | 24 +++++----- test/src/test_restore.py | 98 +++++++++++++++++++-------------------- 3 files changed, 61 insertions(+), 63 deletions(-) diff --git a/src/mas/devops/backup.py b/src/mas/devops/backup.py index e134a77f..4e5f6c2d 100644 --- a/src/mas/devops/backup.py +++ b/src/mas/devops/backup.py @@ -228,5 +228,3 @@ def backupResources(dynClient: DynamicClient, kind: str, api_version: str, backu logger.error(f"Error backing up {kind} resources: {e}") failed_count = 1 return (backed_up_count, not_found_count, failed_count, discovered_secrets) - - diff --git a/src/mas/devops/restore.py b/src/mas/devops/restore.py index 52bfbac7..d427e628 100644 --- a/src/mas/devops/restore.py +++ b/src/mas/devops/restore.py @@ -18,10 +18,10 @@ def loadYamlFile(file_path: str): """ Load YAML content from a file - + Args: file_path: Path to the YAML file - + Returns: dict: Parsed YAML content or None if error """ @@ -40,13 +40,13 @@ def restoreResource(dynClient: DynamicClient, resource_data: dict, namespace=Non If the resource exists and replace_resource is True, it will be updated (replaced). If the resource exists and replace_resource is False, it will be skipped. If the resource doesn't exist, it will be created. - + Args: dynClient: Kubernetes dynamic client resource_data: Dictionary containing the resource definition namespace: Optional namespace override (uses resource's namespace if not provided) replace_resource: If True, replace existing resources; if False, skip them (default: True) - + Returns: tuple: (success: bool, resource_name: str, status_message: str or None) - success: True if created, updated, or skipped; False if failed @@ -60,18 +60,18 @@ def restoreResource(dynClient: DynamicClient, resource_data: dict, namespace=Non metadata = resource_data.get('metadata', {}) resource_name = metadata.get('name') resource_namespace = namespace or metadata.get('namespace') - + if not kind or not api_version or not resource_name: error_msg = "Resource missing required fields (kind, apiVersion, or name)" logger.error(error_msg) return (False, resource_name or 'unknown', error_msg) - + # Get the resource API resourceAPI = dynClient.resources.get(api_version=api_version, kind=kind) - + # Determine scope description for logging scope_desc = f"namespace '{resource_namespace}'" if resource_namespace else "cluster-level" - + # Check if resource already exists resource_exists = False existing_resource = None @@ -83,14 +83,14 @@ def restoreResource(dynClient: DynamicClient, resource_data: dict, namespace=Non resource_exists = existing_resource is not None except NotFoundError: resource_exists = False - + # Apply the resource (create, update, or skip) try: if resource_exists: if replace_resource: # Resource exists - update it using strategic merge patch logger.info(f"Patching existing {kind} '{resource_name}' in {scope_desc}") - + if resource_namespace: resourceAPI.patch(body=resource_data, name=resource_name, namespace=resource_namespace, content_type='application/merge-patch+json') else: @@ -115,8 +115,8 @@ def restoreResource(dynClient: DynamicClient, resource_data: dict, namespace=Non error_msg = f"Failed to {action} {kind} '{resource_name}': {e}" logger.error(error_msg) return (False, resource_name, error_msg) - + except Exception as e: error_msg = f"Error restoring resource: {e}" logger.error(error_msg) - return (False, resource_data.get('metadata', {}).get('name', 'unknown'), error_msg) \ No newline at end of file + return (False, resource_data.get('metadata', {}).get('name', 'unknown'), error_msg) diff --git a/test/src/test_restore.py b/test/src/test_restore.py index 25e43a06..3666b6b4 100644 --- a/test/src/test_restore.py +++ b/test/src/test_restore.py @@ -28,13 +28,13 @@ def test_load_valid_yaml_file(self, tmp_path): 'namespace': 'test-ns' } } - + yaml_file = tmp_path / "test.yaml" with open(yaml_file, 'w') as f: yaml.dump(yaml_content, f) - + result = loadYamlFile(str(yaml_file)) - + assert result is not None assert result['kind'] == 'ConfigMap' assert result['metadata']['name'] == 'test-config' @@ -43,33 +43,33 @@ def test_load_empty_yaml_file(self, tmp_path): """Test loading an empty YAML file""" yaml_file = tmp_path / "empty.yaml" yaml_file.write_text("") - + result = loadYamlFile(str(yaml_file)) - + assert result is None def test_load_nonexistent_file(self): """Test loading a non-existent file""" result = loadYamlFile("/nonexistent/path/file.yaml") - + assert result is None def test_load_invalid_yaml_file(self, tmp_path): """Test loading an invalid YAML file""" yaml_file = tmp_path / "invalid.yaml" yaml_file.write_text("invalid: yaml: content: [") - + result = loadYamlFile(str(yaml_file)) - + assert result is None def test_load_yaml_with_multiple_documents(self, tmp_path): """Test loading YAML file with multiple documents returns None (not supported)""" yaml_file = tmp_path / "multi.yaml" yaml_file.write_text("---\nkey1: value1\n---\nkey2: value2") - + result = loadYamlFile(str(yaml_file)) - + # yaml.safe_load() doesn't support multiple documents, so it should return None assert result is None @@ -96,12 +96,12 @@ def test_create_new_namespaced_resource(self): 'key': 'value' } } - + # Resource doesn't exist self.mock_resource_api.get.side_effect = NotFoundError(Mock()) - + success, name, status = restoreResource(self.mock_client, resource_data) - + assert success is True assert name == 'test-config' assert status is None @@ -119,12 +119,12 @@ def test_create_new_cluster_resource(self): 'name': 'test-namespace' } } - + # Resource doesn't exist self.mock_resource_api.get.side_effect = NotFoundError(Mock()) - + success, name, status = restoreResource(self.mock_client, resource_data) - + assert success is True assert name == 'test-namespace' assert status is None @@ -145,7 +145,7 @@ def test_update_existing_resource_with_replace_true(self): 'key': 'new-value' } } - + # Resource exists existing_resource = { 'metadata': { @@ -154,9 +154,9 @@ def test_update_existing_resource_with_replace_true(self): } } self.mock_resource_api.get.return_value = existing_resource - + success, name, status = restoreResource(self.mock_client, resource_data, replace_resource=True) - + assert success is True assert name == 'test-config' assert status == 'updated' @@ -177,13 +177,13 @@ def test_skip_existing_resource_with_replace_false(self): 'namespace': 'test-ns' } } - + # Resource exists existing_resource = {'metadata': {'name': 'test-config'}} self.mock_resource_api.get.return_value = existing_resource - + success, name, status = restoreResource(self.mock_client, resource_data, replace_resource=False) - + assert success is True assert name == 'test-config' assert status == 'skipped' @@ -200,16 +200,16 @@ def test_namespace_override(self): 'namespace': 'original-ns' } } - + # Resource doesn't exist self.mock_resource_api.get.side_effect = NotFoundError(Mock()) - + success, name, status = restoreResource( self.mock_client, resource_data, namespace='override-ns' ) - + assert success is True self.mock_resource_api.create.assert_called_once_with( body=resource_data, @@ -224,9 +224,9 @@ def test_missing_kind_field(self): 'name': 'test-resource' } } - + success, name, status = restoreResource(self.mock_client, resource_data) - + assert success is False assert name == 'test-resource' assert 'missing required fields' in status.lower() @@ -239,9 +239,9 @@ def test_missing_api_version_field(self): 'name': 'test-resource' } } - + success, name, status = restoreResource(self.mock_client, resource_data) - + assert success is False assert name == 'test-resource' assert 'missing required fields' in status.lower() @@ -253,9 +253,9 @@ def test_missing_name_field(self): 'kind': 'ConfigMap', 'metadata': {} } - + success, name, status = restoreResource(self.mock_client, resource_data) - + assert success is False assert name == 'unknown' assert 'missing required fields' in status.lower() @@ -270,14 +270,14 @@ def test_create_failure(self): 'namespace': 'test-ns' } } - + # Resource doesn't exist self.mock_resource_api.get.side_effect = NotFoundError(Mock()) # Create fails self.mock_resource_api.create.side_effect = Exception("Create failed") - + success, name, status = restoreResource(self.mock_client, resource_data) - + assert success is False assert name == 'test-config' assert 'Failed to create' in status @@ -293,15 +293,15 @@ def test_patch_failure(self): 'namespace': 'test-ns' } } - + # Resource exists existing_resource = {'metadata': {'name': 'test-config'}} self.mock_resource_api.get.return_value = existing_resource # Patch fails self.mock_resource_api.patch.side_effect = Exception("Patch failed") - + success, name, status = restoreResource(self.mock_client, resource_data, replace_resource=True) - + assert success is False assert name == 'test-config' assert 'Failed to update' in status @@ -316,12 +316,12 @@ def test_resource_api_get_failure(self): 'name': 'test-config' } } - + # Getting resource API fails self.mock_client.resources.get.side_effect = Exception("API not found") - + success, name, status = restoreResource(self.mock_client, resource_data) - + assert success is False assert name == 'test-config' assert 'Error restoring resource' in status @@ -335,13 +335,13 @@ def test_update_cluster_scoped_resource(self): 'name': 'test-namespace' } } - + # Resource exists existing_resource = {'metadata': {'name': 'test-namespace'}} self.mock_resource_api.get.return_value = existing_resource - + success, name, status = restoreResource(self.mock_client, resource_data, replace_resource=True) - + assert success is True assert name == 'test-namespace' assert status == 'updated' @@ -358,9 +358,9 @@ def test_malformed_resource_data(self): 'kind': 'ConfigMap' # Missing metadata entirely } - + success, name, status = restoreResource(self.mock_client, resource_data) - + assert success is False assert name == 'unknown' assert 'missing required fields' in status.lower() @@ -385,13 +385,13 @@ def test_resource_with_complex_metadata(self): 'replicas': 3 } } - + # Resource doesn't exist self.mock_resource_api.get.side_effect = NotFoundError(Mock()) - + success, name, status = restoreResource(self.mock_client, resource_data) - + assert success is True assert name == 'test-deployment' assert status is None - self.mock_resource_api.create.assert_called_once() \ No newline at end of file + self.mock_resource_api.create.assert_called_once() From 47e353a688ab098bcbcf9d9b69600e747272b305 Mon Sep 17 00:00:00 2001 From: Sanjay Prabhakar Date: Mon, 19 Jan 2026 16:26:15 +0000 Subject: [PATCH 13/41] [patch] add sls function --- src/mas/devops/sls.py | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/src/mas/devops/sls.py b/src/mas/devops/sls.py index 4927ed9a..65728f4b 100644 --- a/src/mas/devops/sls.py +++ b/src/mas/devops/sls.py @@ -77,3 +77,16 @@ def findSLSByNamespace(namespace: str, instances: list = None, dynClient: Dynami if namespace in instance['metadata']['namespace']: return True return False + +def getSLSRegistrationDetails(namespace: str, name: str, dynClient: DynamicClient): + try: + slsAPI = dynClient.resources.get(api_version="sls.ibm.com/v1", kind="LicenseService") + slsInstance = slsAPI.get(name=name, namespace=namespace) + if hasattr(slsInstance, 'status') and hasattr(slsInstance.status, 'licenseId') and hasattr(slsInstance.status, 'registrationKey'): + return dict( + registrationKey=slsInstance.status.registrationKey, + licenseId=slsInstance.status.licenseId + ) + except NotFoundError: + logger.info(f"No SLS '{name}' found in namespace {namespace}.'") + return dict() From 28a3a7d26f226ebd710e30daef001f7067a106c7 Mon Sep 17 00:00:00 2001 From: Sanjay Prabhakar Date: Mon, 19 Jan 2026 17:11:11 +0000 Subject: [PATCH 14/41] [patch] add comments --- src/mas/devops/sls.py | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/src/mas/devops/sls.py b/src/mas/devops/sls.py index 65728f4b..e204bc8d 100644 --- a/src/mas/devops/sls.py +++ b/src/mas/devops/sls.py @@ -78,7 +78,24 @@ def findSLSByNamespace(namespace: str, instances: list = None, dynClient: Dynami return True return False + def getSLSRegistrationDetails(namespace: str, name: str, dynClient: DynamicClient): + """ + Retrieve registration details like licenseId and registrationKey from the LicenseService instance's CR status + + This function gets the LicenseService instance of a specified name in a specified namespace. + It retrieves licenseId and registrationKey keys in CR status and returns. + + Args: + namespace (str): The OpenShift namespace to search for SLS instances. + name (str): Name of SLS(LicenseService) instance. + dynClient (DynamicClient): OpenShift dynamic client for querying instances. + Required if instances is None. Defaults to None. + + Returns: + dict: dict with 'licenseId' and 'registrationKey' when details are found. + Empty if not found. + """ try: slsAPI = dynClient.resources.get(api_version="sls.ibm.com/v1", kind="LicenseService") slsInstance = slsAPI.get(name=name, namespace=namespace) From f30019f0edff4bbdf5a2189ecdc1709ea7cd39d6 Mon Sep 17 00:00:00 2001 From: Sanjay Prabhakar Date: Wed, 21 Jan 2026 18:11:57 +0000 Subject: [PATCH 15/41] [patch] filter uid labels --- src/mas/devops/backup.py | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/src/mas/devops/backup.py b/src/mas/devops/backup.py index 4e5f6c2d..4a563990 100644 --- a/src/mas/devops/backup.py +++ b/src/mas/devops/backup.py @@ -77,6 +77,13 @@ def filterResourceData(data: dict) -> dict: if 'status' in filteredCopy: del filteredCopy['status'] + # Remove labels with uid + # this will cause problem when restoring the backup + if 'metadata' in filteredCopy and 'labels' in filteredCopy['metadata']: + for key in list(filteredCopy['metadata']['labels'].keys()): + if "uid" in key.lower(): + filteredCopy['metadata']['labels'].pop(key) + return filteredCopy From 971fe29175cb147d8e01b90f770acf18df4f4177 Mon Sep 17 00:00:00 2001 From: whitfiea Date: Mon, 26 Jan 2026 08:29:54 +0000 Subject: [PATCH 16/41] remove bob comments --- .github/workflows/docs.yml | 2 -- mkdocs.yml | 2 -- src/mas/devops/data/ocp.yaml | 2 -- test/src/test_backup.py | 2 -- 4 files changed, 8 deletions(-) diff --git a/.github/workflows/docs.yml b/.github/workflows/docs.yml index 269d36d2..da78b41e 100644 --- a/.github/workflows/docs.yml +++ b/.github/workflows/docs.yml @@ -49,5 +49,3 @@ jobs: with: branch: gh-pages folder: site - -# Made with Bob diff --git a/mkdocs.yml b/mkdocs.yml index adec8bab..cb5a0d82 100644 --- a/mkdocs.yml +++ b/mkdocs.yml @@ -112,5 +112,3 @@ extra: social: - icon: fontawesome/brands/github link: https://github.com/ibm-mas/python-devops - -# Made with Bob diff --git a/src/mas/devops/data/ocp.yaml b/src/mas/devops/data/ocp.yaml index 8bffc6c8..6db81de6 100644 --- a/src/mas/devops/data/ocp.yaml +++ b/src/mas/devops/data/ocp.yaml @@ -54,5 +54,3 @@ ocp_versions: # - Extended Support (EUS): Additional 6 months available for purchase # - EUS is included with Premium subscriptions # - Not all versions have EUS available - -# Made with Bob diff --git a/test/src/test_backup.py b/test/src/test_backup.py index af52d382..5ae37452 100644 --- a/test/src/test_backup.py +++ b/test/src/test_backup.py @@ -898,5 +898,3 @@ def test_backup_resource_kind_not_found(self, mocker): assert backed_up == 0 assert not_found == 0 assert failed == 0 - -# Made with Bob From 026bc1c97d8acf8e59f4369a46163552ac821f34 Mon Sep 17 00:00:00 2001 From: Sanjay Prabhakar Date: Tue, 27 Jan 2026 15:29:04 +0000 Subject: [PATCH 17/41] [patch] uploadToS3 --- src/mas/devops/backup.py | 84 ++++++++++++++++++++++++++++++++++++++++ 1 file changed, 84 insertions(+) diff --git a/src/mas/devops/backup.py b/src/mas/devops/backup.py index 4a563990..83c54c0f 100644 --- a/src/mas/devops/backup.py +++ b/src/mas/devops/backup.py @@ -12,6 +12,8 @@ import yaml from openshift.dynamic import DynamicClient from openshift.dynamic.exceptions import NotFoundError +import boto3 +from botocore.exceptions import ClientError, NoCredentialsError logger = logging.getLogger(name=__name__) @@ -235,3 +237,85 @@ def backupResources(dynClient: DynamicClient, kind: str, api_version: str, backu logger.error(f"Error backing up {kind} resources: {e}") failed_count = 1 return (backed_up_count, not_found_count, failed_count, discovered_secrets) + + + +def uploadToS3( + file_path: str, + bucket_name: str, + object_name=None, + endpoint_url=None, + aws_access_key_id=None, + aws_secret_access_key=None, + region_name=None +) -> bool: + """ + Upload a tar.gz file to S3-compatible storage. + + Args: + file_path: Path to the tar.gz file to upload + bucket_name: Name of the S3 bucket + object_name: S3 object name. If not specified, file_path basename is used + endpoint_url: S3-compatible endpoint URL (e.g., for MinIO, Ceph) + aws_access_key_id: AWS access key ID (if not using environment variables) + aws_secret_access_key: AWS secret access key (if not using environment variables) + region_name: AWS region name (default: us-east-1) + + Returns: + bool: True if file was uploaded successfully, False otherwise + """ + # If S3 object_name was not specified, use file_path basename + if object_name is None: + object_name = os.path.basename(file_path) + + # Validate file exists and is a tar.gz file + if not os.path.exists(file_path): + logger.error(f"File not found: {file_path}") + return False + + if not file_path.endswith('.tar.gz'): + logger.warning(f"File does not have .tar.gz extension: {file_path}") + + # Configure S3 client + try: + s3_config = {} + + if endpoint_url: + s3_config['endpoint_url'] = endpoint_url + + if aws_access_key_id and aws_secret_access_key: + s3_config['aws_access_key_id'] = aws_access_key_id + s3_config['aws_secret_access_key'] = aws_secret_access_key + + if region_name: + s3_config['region_name'] = region_name + else: + s3_config['region_name'] = 'us-east-1' + + s3_client = boto3.client('s3', **s3_config) + + # Upload the file + logger.info(f"Uploading {file_path} to s3://{bucket_name}/{object_name}") + + file_size = os.path.getsize(file_path) + logger.info(f"File size: {file_size / (1024 * 1024):.2f} MB") + + s3_client.upload_file(file_path, bucket_name, object_name) + + logger.info(f"Successfully uploaded {file_path} to s3://{bucket_name}/{object_name}") + return True + + except FileNotFoundError: + logger.error(f"File not found: {file_path}") + return False + except NoCredentialsError: + logger.error("AWS credentials not found. Please provide credentials or configure environment variables.") + return False + except ClientError as e: + error_code = e.response.get('Error', {}).get('Code', 'Unknown') + error_message = e.response.get('Error', {}).get('Message', str(e)) + logger.error(f"S3 client error ({error_code}): {error_message}") + return False + except Exception as e: + logger.error(f"Unexpected error uploading to S3: {e}") + return False From d1e2674611979d2e65a873560a67beea31209c8f Mon Sep 17 00:00:00 2001 From: Sanjay Prabhakar Date: Tue, 27 Jan 2026 15:38:38 +0000 Subject: [PATCH 18/41] [patch] fix lint --- src/mas/devops/backup.py | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/src/mas/devops/backup.py b/src/mas/devops/backup.py index 83c54c0f..88bb7dc1 100644 --- a/src/mas/devops/backup.py +++ b/src/mas/devops/backup.py @@ -279,14 +279,12 @@ def uploadToS3( # Configure S3 client try: s3_config = {} - + if endpoint_url: s3_config['endpoint_url'] = endpoint_url - if aws_access_key_id and aws_secret_access_key: s3_config['aws_access_key_id'] = aws_access_key_id s3_config['aws_secret_access_key'] = aws_secret_access_key - if region_name: s3_config['region_name'] = region_name else: @@ -296,12 +294,12 @@ def uploadToS3( # Upload the file logger.info(f"Uploading {file_path} to s3://{bucket_name}/{object_name}") - + file_size = os.path.getsize(file_path) logger.info(f"File size: {file_size / (1024 * 1024):.2f} MB") s3_client.upload_file(file_path, bucket_name, object_name) - + logger.info(f"Successfully uploaded {file_path} to s3://{bucket_name}/{object_name}") return True From 432a08d1a350b9ad4a25998174cac6d57b5ba9d3 Mon Sep 17 00:00:00 2001 From: Sanjay Prabhakar Date: Tue, 27 Jan 2026 15:42:40 +0000 Subject: [PATCH 19/41] [patch] lint again --- src/mas/devops/backup.py | 1 - 1 file changed, 1 deletion(-) diff --git a/src/mas/devops/backup.py b/src/mas/devops/backup.py index 88bb7dc1..92e559ba 100644 --- a/src/mas/devops/backup.py +++ b/src/mas/devops/backup.py @@ -239,7 +239,6 @@ def backupResources(dynClient: DynamicClient, kind: str, api_version: str, backu return (backed_up_count, not_found_count, failed_count, discovered_secrets) - def uploadToS3( file_path: str, bucket_name: str, From 86d7ebdf3b1e75ea4a61908cd9473b334991f85d Mon Sep 17 00:00:00 2001 From: whitfiea Date: Wed, 28 Jan 2026 12:56:05 +0000 Subject: [PATCH 20/41] Update with new pipelinerun for mas-backup --- src/mas/devops/tekton.py | 64 ++++++++++- .../templates/pipelinerun-backup.yml.j2 | 100 ++++++++++++++++++ .../templates/pipelines-backup-pvc.yml.j2 | 15 +++ 3 files changed, 174 insertions(+), 5 deletions(-) create mode 100644 src/mas/devops/templates/pipelinerun-backup.yml.j2 create mode 100644 src/mas/devops/templates/pipelines-backup-pvc.yml.j2 diff --git a/src/mas/devops/tekton.py b/src/mas/devops/tekton.py index 58042db0..e12290c1 100644 --- a/src/mas/devops/tekton.py +++ b/src/mas/devops/tekton.py @@ -262,7 +262,7 @@ def updateTektonDefinitions(namespace: str, yamlFile: str) -> None: logger.debug(line) -def preparePipelinesNamespace(dynClient: DynamicClient, instanceId: str = None, storageClass: str = None, accessMode: str = None, waitForBind: bool = True, configureRBAC: bool = True): +def preparePipelinesNamespace(dynClient: DynamicClient, instanceId: str = None, storageClass: str = None, accessMode: str = None, waitForBind: bool = True, configureRBAC: bool = True, createBackupPVC: bool = False, backupStorageSize: str = "20Gi"): """ Prepare a namespace for MAS pipelines by creating RBAC and PVC resources. @@ -276,6 +276,8 @@ def preparePipelinesNamespace(dynClient: DynamicClient, instanceId: str = None, accessMode (str, optional): Access mode for the PVC. Defaults to None. waitForBind (bool, optional): Whether to wait for PVC to bind. Defaults to True. configureRBAC (bool, optional): Whether to configure RBAC. Defaults to True. + createBackupPVC (bool, optional): Whether to create backup PVC. Defaults to False. + backupStorageSize (str, optional): Size of the backup PVC storage. Defaults to "20Gi". Returns: None @@ -304,6 +306,9 @@ def preparePipelinesNamespace(dynClient: DynamicClient, instanceId: str = None, # Create PVC (instanceId namespace only) if instanceId is not None: + pvcAPI = dynClient.resources.get(api_version="v1", kind="PersistentVolumeClaim") + + # Create config PVC template = env.get_template("pipelines-pvc.yml.j2") renderedTemplate = template.render( mas_instance_id=instanceId, @@ -312,24 +317,51 @@ def preparePipelinesNamespace(dynClient: DynamicClient, instanceId: str = None, ) logger.debug(renderedTemplate) pvc = yaml.safe_load(renderedTemplate) - pvcAPI = dynClient.resources.get(api_version="v1", kind="PersistentVolumeClaim") pvcAPI.apply(body=pvc, namespace=namespace) + # Automatically determine if we should wait for PVC binding based on storage class volumeBindingMode = getStorageClassVolumeBindingMode(dynClient, storageClass) waitForBind = (volumeBindingMode == "Immediate") if waitForBind: - logger.info(f"Storage class {storageClass} uses volumeBindingMode={volumeBindingMode}, waiting for PVC to bind") + logger.info(f"Storage class {storageClass} uses volumeBindingMode={volumeBindingMode}, waiting for config PVC to bind") pvcIsBound = False while not pvcIsBound: configPVC = pvcAPI.get(name="config-pvc", namespace=namespace) if configPVC.status.phase == "Bound": pvcIsBound = True else: - logger.debug("Waiting 15s before checking status of PVC again") + logger.debug("Waiting 15s before checking status of config PVC again") logger.debug(configPVC) sleep(15) else: - logger.info(f"Storage class {storageClass} uses volumeBindingMode={volumeBindingMode}, skipping PVC bind wait") + logger.info(f"Storage class {storageClass} uses volumeBindingMode={volumeBindingMode}, skipping config PVC bind wait") + + # Create backup PVC if requested + if createBackupPVC: + logger.info("Creating backup PVC") + backupTemplate = env.get_template("pipelines-backup-pvc.yml.j2") + renderedBackupTemplate = backupTemplate.render( + mas_instance_id=instanceId, + pipeline_storage_class=storageClass, + pipeline_storage_accessmode=accessMode + ) + logger.debug(renderedBackupTemplate) + backupPvc = yaml.safe_load(renderedBackupTemplate) + pvcAPI.apply(body=backupPvc, namespace=namespace) + + if waitForBind: + logger.info(f"Storage class {storageClass} uses volumeBindingMode={volumeBindingMode}, waiting for backup PVC to bind") + backupPvcIsBound = False + while not backupPvcIsBound: + backupPVC = pvcAPI.get(name="backup-pvc", namespace=namespace) + if backupPVC.status.phase == "Bound": + backupPvcIsBound = True + else: + logger.debug("Waiting 15s before checking status of backup PVC again") + logger.debug(backupPVC) + sleep(15) + else: + logger.info(f"Storage class {storageClass} uses volumeBindingMode={volumeBindingMode}, skipping backup PVC bind wait") def prepareAiServicePipelinesNamespace(dynClient: DynamicClient, instanceId: str = None, storageClass: str = None, accessMode: str = None, waitForBind: bool = True, configureRBAC: bool = True): @@ -697,6 +729,28 @@ def launchUpdatePipeline(dynClient: DynamicClient, params: dict) -> str: return pipelineURL +def launchBackupPipeline(dynClient: DynamicClient, params: dict) -> str: + """ + Create a PipelineRun to backup a MAS instance. + + Parameters: + dynClient (DynamicClient): OpenShift Dynamic Client + params (dict): Backup parameters including instance ID and configuration + + Returns: + str: URL to the PipelineRun in the OpenShift console + + Raises: + NotFoundError: If resources cannot be created + """ + instanceId = params["mas_instance_id"] + namespace = f"mas-{instanceId}-pipelines" + timestamp = launchPipelineRun(dynClient, namespace, "pipelinerun-backup", params) + + pipelineURL = f"{getConsoleURL(dynClient)}/k8s/ns/mas-{instanceId}-pipelines/tekton.dev~v1beta1~PipelineRun/{instanceId}-backup-{timestamp}" + return pipelineURL + + def launchAiServiceUpgradePipeline(dynClient: DynamicClient, aiserviceInstanceId: str, skipPreCheck: bool = False, diff --git a/src/mas/devops/templates/pipelinerun-backup.yml.j2 b/src/mas/devops/templates/pipelinerun-backup.yml.j2 new file mode 100644 index 00000000..4234f2a2 --- /dev/null +++ b/src/mas/devops/templates/pipelinerun-backup.yml.j2 @@ -0,0 +1,100 @@ +--- +apiVersion: tekton.dev/v1beta1 +kind: PipelineRun +metadata: + generateName: {{ mas_instance_id }}-backup-{{ backup_version }}- + labels: + mas.ibm.com/instanceId: "{{ mas_instance_id }}" +spec: + pipelineRef: + name: mas-backup + serviceAccountName: pipeline + workspaces: + - name: shared-configs + persistentVolumeClaim: + claimName: config-pvc + - name: shared-backups + persistentVolumeClaim: + claimName: backup-pvc + params: + # Common Parameters + - name: image_pull_policy + value: IfNotPresent + - name: mas_instance_id + value: "{{ mas_instance_id }}" + + # Backup Configuration + - name: backup_version + value: "{{ backup_version }}" + + # Component Flags + {% if include_sls is defined and include_sls != "" %} + - name: include_sls + value: "{{ include_sls }}" + {% endif %} + + # MongoDB Configuration + {% if mongodb_namespace is defined and mongodb_namespace != "" %} + - name: mongodb_namespace + value: "{{ mongodb_namespace }}" + {% endif %} + {% if mongodb_instance_name is defined and mongodb_instance_name != "" %} + - name: mongodb_instance_name + value: "{{ mongodb_instance_name }}" + {% endif %} + {% if mongodb_provider is defined and mongodb_provider != "" %} + - name: mongodb_provider + value: "{{ mongodb_provider }}" + {% endif %} + + # SLS Configuration + {% if sls_namespace is defined and sls_namespace != "" %} + - name: sls_namespace + value: "{{ sls_namespace }}" + {% endif %} + + # Certificate Manager Configuration + {% if cert_manager_provider is defined and cert_manager_provider != "" %} + - name: cert_manager_provider + value: "{{ cert_manager_provider }}" + {% endif %} + + # Development Build Support + {% if artifactory_username is defined and artifactory_username != "" %} + - name: artifactory_username + value: "{{ artifactory_username }}" + {% endif %} + {% if artifactory_token is defined and artifactory_token != "" %} + - name: artifactory_token + value: "{{ artifactory_token }}" + {% endif %} + + # Upload Configuration + {% if upload_backup is defined and upload_backup != "" %} + - name: upload_backup + value: "{{ upload_backup }}" + {% endif %} + {% if aws_access_key_id is defined and aws_access_key_id != "" %} + - name: aws_access_key_id + value: "{{ aws_access_key_id }}" + {% endif %} + {% if aws_secret_access_key is defined and aws_secret_access_key != "" %} + - name: aws_secret_access_key + value: "{{ aws_secret_access_key }}" + {% endif %} + {% if s3_bucket_name is defined and s3_bucket_name != "" %} + - name: s3_bucket_name + value: "{{ s3_bucket_name }}" + {% endif %} + {% if s3_region is defined and s3_region != "" %} + - name: s3_region + value: "{{ s3_region }}" + {% endif %} + {% if artifactory_url is defined and artifactory_url != "" %} + - name: artifactory_url + value: "{{ artifactory_url }}" + {% endif %} + {% if artifactory_repository is defined and artifactory_repository != "" %} + - name: artifactory_repository + value: "{{ artifactory_repository }}" + {% endif %} \ No newline at end of file diff --git a/src/mas/devops/templates/pipelines-backup-pvc.yml.j2 b/src/mas/devops/templates/pipelines-backup-pvc.yml.j2 new file mode 100644 index 00000000..655b9b4d --- /dev/null +++ b/src/mas/devops/templates/pipelines-backup-pvc.yml.j2 @@ -0,0 +1,15 @@ +--- +# PVC for backup storage +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: backup-pvc + namespace: mas-{{ mas_instance_id }}-pipelines +spec: + accessModes: + - {{ pipeline_storage_accessmode }} + volumeMode: Filesystem + storageClassName: {{ pipeline_storage_class }} + resources: + requests: + storage: {{ backup_storage_size }} \ No newline at end of file From 782e3bf827998d8fb1e93a75a51a8842e4cc1892 Mon Sep 17 00:00:00 2001 From: whitfiea Date: Thu, 29 Jan 2026 10:24:49 +0000 Subject: [PATCH 21/41] fix setting storage size --- src/mas/devops/tekton.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/mas/devops/tekton.py b/src/mas/devops/tekton.py index e12290c1..ba36d2c7 100644 --- a/src/mas/devops/tekton.py +++ b/src/mas/devops/tekton.py @@ -343,7 +343,8 @@ def preparePipelinesNamespace(dynClient: DynamicClient, instanceId: str = None, renderedBackupTemplate = backupTemplate.render( mas_instance_id=instanceId, pipeline_storage_class=storageClass, - pipeline_storage_accessmode=accessMode + pipeline_storage_accessmode=accessMode, + backup_storage_size=backupStorageSize ) logger.debug(renderedBackupTemplate) backupPvc = yaml.safe_load(renderedBackupTemplate) From 7b0ad570268540b4100ed20fc6305ee53adde8db Mon Sep 17 00:00:00 2001 From: Sanjay Prabhakar Date: Thu, 29 Jan 2026 10:49:51 +0000 Subject: [PATCH 22/41] [patch] downloadFromS3 --- src/mas/devops/backup.py | 95 ++++++++++++++++++++++++++++++++++++++++ 1 file changed, 95 insertions(+) diff --git a/src/mas/devops/backup.py b/src/mas/devops/backup.py index 92e559ba..172f26e4 100644 --- a/src/mas/devops/backup.py +++ b/src/mas/devops/backup.py @@ -316,3 +316,98 @@ def uploadToS3( except Exception as e: logger.error(f"Unexpected error uploading to S3: {e}") return False + + +def downloadFromS3( + bucket_name: str, + object_name: str, + local_dir: str, + endpoint_url=None, + aws_access_key_id=None, + aws_secret_access_key=None, + region_name=None +) -> bool: + """ + Download a tar.gz file from S3-compatible storage to a backup directory. + + Args: + bucket_name: Name of the S3 bucket + object_name: S3 object name to download + local_dir: Directory path where the file will be downloaded + endpoint_url: S3-compatible endpoint URL (e.g., for MinIO, Ceph) + aws_access_key_id: AWS access key ID (if not using environment variables) + aws_secret_access_key: AWS secret access key (if not using environment variables) + region_name: AWS region name (default: us-east-1) + + Returns: + bool: True if file was downloaded successfully, False otherwise + """ + # Validate backup directory + if not os.path.exists(local_dir): + logger.info(f"Backup directory does not exist, creating: {local_dir}") + try: + os.makedirs(local_dir, exist_ok=True) + except Exception as e: + logger.error(f"Failed to create backup directory {local_dir}: {e}") + return False + + # Construct the full file path + file_path = os.path.join(local_dir, object_name) + + # Warn if file doesn't have .tar.gz extension + if not object_name.endswith('.tar.gz'): + logger.warning(f"Object does not have .tar.gz extension: {object_name}") + + # Configure S3 client + try: + s3_config = {} + + if endpoint_url: + s3_config['endpoint_url'] = endpoint_url + if aws_access_key_id and aws_secret_access_key: + s3_config['aws_access_key_id'] = aws_access_key_id + s3_config['aws_secret_access_key'] = aws_secret_access_key + if region_name: + s3_config['region_name'] = region_name + else: + s3_config['region_name'] = 'us-east-1' + + s3_client = boto3.client('s3', **s3_config) + + # Check if object exists and get its size + logger.info(f"Downloading s3://{bucket_name}/{object_name} to {file_path}") + + try: + response = s3_client.head_object(Bucket=bucket_name, Key=object_name) + file_size = response.get('ContentLength', 0) + logger.info(f"Object size: {file_size / (1024 * 1024):.2f} MB") + except ClientError as e: + if e.response.get('Error', {}).get('Code') == '404': + logger.error(f"Object not found in S3: s3://{bucket_name}/{object_name}") + return False + raise + + # Download the file + s3_client.download_file(bucket_name, object_name, file_path) + + # Verify the downloaded file exists + if os.path.exists(file_path): + downloaded_size = os.path.getsize(file_path) + logger.info(f"Successfully downloaded {object_name} to {file_path}") + logger.info(f"Downloaded file size: {downloaded_size / (1024 * 1024):.2f} MB") + return True + else: + logger.error(f"Download completed but file not found at {file_path}") + return False + + except NoCredentialsError: + logger.error("AWS credentials not found. Please provide credentials or configure environment variables.") + return False + except ClientError as e: + error_code = e.response.get('Error', {}).get('Code', 'Unknown') + error_message = e.response.get('Error', {}).get('Message', str(e)) + logger.error(f"S3 client error ({error_code}): {error_message}") + return False + except Exception as e: + logger.error(f"Unexpected error downloading from S3: {e}") + return False From 15937b0e6fde067e5921b412827818162f354eb9 Mon Sep 17 00:00:00 2001 From: whitfiea Date: Thu, 29 Jan 2026 11:00:06 +0000 Subject: [PATCH 23/41] fix generatename --- src/mas/devops/templates/pipelinerun-backup.yml.j2 | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/mas/devops/templates/pipelinerun-backup.yml.j2 b/src/mas/devops/templates/pipelinerun-backup.yml.j2 index 4234f2a2..d174f981 100644 --- a/src/mas/devops/templates/pipelinerun-backup.yml.j2 +++ b/src/mas/devops/templates/pipelinerun-backup.yml.j2 @@ -2,7 +2,7 @@ apiVersion: tekton.dev/v1beta1 kind: PipelineRun metadata: - generateName: {{ mas_instance_id }}-backup-{{ backup_version }}- + name: "{{ mas_instance_id }}-backup-{{ backup_version }}" labels: mas.ibm.com/instanceId: "{{ mas_instance_id }}" spec: From 6cf4d61a606740cc2110fb6dfcfd4614cef14183 Mon Sep 17 00:00:00 2001 From: whitfiea Date: Thu, 29 Jan 2026 13:03:10 +0000 Subject: [PATCH 24/41] turn off affinity assistent --- src/mas/devops/templates/pipelinerun-backup.yml.j2 | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/src/mas/devops/templates/pipelinerun-backup.yml.j2 b/src/mas/devops/templates/pipelinerun-backup.yml.j2 index d174f981..3915f250 100644 --- a/src/mas/devops/templates/pipelinerun-backup.yml.j2 +++ b/src/mas/devops/templates/pipelinerun-backup.yml.j2 @@ -9,6 +9,12 @@ spec: pipelineRef: name: mas-backup serviceAccountName: pipeline + taskRunSpecs: + - pipelineTaskName: "*" + podTemplate: + schedulerName: default-scheduler + podTemplate: + schedulerName: default-scheduler workspaces: - name: shared-configs persistentVolumeClaim: From 5df5b6942398a0306e28843a369d3c9027ead684 Mon Sep 17 00:00:00 2001 From: whitfiea Date: Thu, 29 Jan 2026 14:35:51 +0000 Subject: [PATCH 25/41] set feature-flags --- src/mas/devops/tekton.py | 72 +++++++++++++++++++ .../templates/pipelinerun-backup.yml.j2 | 10 +-- 2 files changed, 74 insertions(+), 8 deletions(-) diff --git a/src/mas/devops/tekton.py b/src/mas/devops/tekton.py index ba36d2c7..de4d25bd 100644 --- a/src/mas/devops/tekton.py +++ b/src/mas/devops/tekton.py @@ -27,6 +27,73 @@ logger = logging.getLogger(__name__) +def configureTektonFeatureFlags(dynClient: DynamicClient) -> bool: + """ + Configure Tekton feature flags to disable coschedule (Affinity Assistant). + + This prevents the "more than one PersistentVolumeClaim is bound" error when + tasks use multiple PVCs with incompatible access modes. + + Parameters: + dynClient (DynamicClient): OpenShift Dynamic Client + + Returns: + bool: True if configuration is successful, False otherwise + """ + try: + configMapAPI = dynClient.resources.get(api_version="v1", kind="ConfigMap") + namespace = "openshift-pipelines" + configMapName = "feature-flags" + + # Get the existing ConfigMap + try: + featureFlags = configMapAPI.get(name=configMapName, namespace=namespace) + logger.info(f"Found existing Tekton feature-flags ConfigMap in {namespace}") + + # Update the coschedule setting + if featureFlags.data is None: + featureFlags.data = {} + + currentCoschedule = featureFlags.data.get("coschedule", "workspaces") + if currentCoschedule != "disabled": + logger.info(f"Updating Tekton coschedule setting from '{currentCoschedule}' to 'disabled'") + featureFlags.data["coschedule"] = "disabled" + configMapAPI.patch(body=featureFlags, namespace=namespace) + logger.info("Successfully updated Tekton feature flags to disable coschedule") + + # Restart the Tekton controller to apply changes + logger.info("Restarting tekton-pipelines-controller to apply feature flag changes") + deploymentAPI = dynClient.resources.get(api_version="apps/v1", kind="Deployment") + controller = deploymentAPI.get(name="tekton-pipelines-controller", namespace=namespace) + + # Trigger a rollout by updating an annotation + if controller.spec.template.metadata.annotations is None: + controller.spec.template.metadata.annotations = {} + controller.spec.template.metadata.annotations["kubectl.kubernetes.io/restartedAt"] = datetime.now().isoformat() + deploymentAPI.patch(body=controller, namespace=namespace) + + # Wait for the controller to be ready + logger.debug("Waiting for tekton-pipelines-controller to be ready after restart") + foundReadyController = waitForDeployment(dynClient, namespace=namespace, deploymentName="tekton-pipelines-controller") + if foundReadyController: + logger.info("Tekton controller restarted successfully") + return True + else: + logger.warning("Tekton controller restart may not have completed successfully") + return False + else: + logger.info("Tekton coschedule is already set to 'disabled', no changes needed") + return True + + except NotFoundError: + logger.warning(f"ConfigMap {configMapName} not found in {namespace}, it may not exist yet") + return False + + except Exception as e: + logger.error(f"Error configuring Tekton feature flags: {str(e)}") + return False + + def installOpenShiftPipelines(dynClient: DynamicClient, customStorageClassName: str = None) -> bool: """ Install the OpenShift Pipelines Operator and wait for it to be ready to use. @@ -97,6 +164,11 @@ def installOpenShiftPipelines(dynClient: DynamicClient, customStorageClassName: logger.error("OpenShift Pipelines Webhook is NOT installed and ready") return False + # Configure Tekton feature flags to disable coschedule + # ------------------------------------------------------------------------- + logger.debug("Configuring Tekton feature flags") + configureTektonFeatureFlags(dynClient) + # Workaround for bug in OpenShift Pipelines/Tekton # ------------------------------------------------------------------------- # Wait for the postgredb-tekton-results-postgres-0 PVC to be ready diff --git a/src/mas/devops/templates/pipelinerun-backup.yml.j2 b/src/mas/devops/templates/pipelinerun-backup.yml.j2 index 3915f250..2a015466 100644 --- a/src/mas/devops/templates/pipelinerun-backup.yml.j2 +++ b/src/mas/devops/templates/pipelinerun-backup.yml.j2 @@ -4,17 +4,11 @@ kind: PipelineRun metadata: name: "{{ mas_instance_id }}-backup-{{ backup_version }}" labels: - mas.ibm.com/instanceId: "{{ mas_instance_id }}" + tekton.dev/pipeline: mas-backup spec: pipelineRef: name: mas-backup - serviceAccountName: pipeline - taskRunSpecs: - - pipelineTaskName: "*" - podTemplate: - schedulerName: default-scheduler - podTemplate: - schedulerName: default-scheduler + serviceAccountName: "{{ service_account_name | default('pipeline', True) }}" workspaces: - name: shared-configs persistentVolumeClaim: From f2805a73d43d79a68da8fa8ecba06d37fc40bfee Mon Sep 17 00:00:00 2001 From: whitfiea Date: Thu, 29 Jan 2026 14:37:26 +0000 Subject: [PATCH 26/41] liniting --- src/mas/devops/tekton.py | 22 +++++++++++----------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/src/mas/devops/tekton.py b/src/mas/devops/tekton.py index de4d25bd..90e08464 100644 --- a/src/mas/devops/tekton.py +++ b/src/mas/devops/tekton.py @@ -30,13 +30,13 @@ def configureTektonFeatureFlags(dynClient: DynamicClient) -> bool: """ Configure Tekton feature flags to disable coschedule (Affinity Assistant). - + This prevents the "more than one PersistentVolumeClaim is bound" error when tasks use multiple PVCs with incompatible access modes. - + Parameters: dynClient (DynamicClient): OpenShift Dynamic Client - + Returns: bool: True if configuration is successful, False otherwise """ @@ -44,34 +44,34 @@ def configureTektonFeatureFlags(dynClient: DynamicClient) -> bool: configMapAPI = dynClient.resources.get(api_version="v1", kind="ConfigMap") namespace = "openshift-pipelines" configMapName = "feature-flags" - + # Get the existing ConfigMap try: featureFlags = configMapAPI.get(name=configMapName, namespace=namespace) logger.info(f"Found existing Tekton feature-flags ConfigMap in {namespace}") - + # Update the coschedule setting if featureFlags.data is None: featureFlags.data = {} - + currentCoschedule = featureFlags.data.get("coschedule", "workspaces") if currentCoschedule != "disabled": logger.info(f"Updating Tekton coschedule setting from '{currentCoschedule}' to 'disabled'") featureFlags.data["coschedule"] = "disabled" configMapAPI.patch(body=featureFlags, namespace=namespace) logger.info("Successfully updated Tekton feature flags to disable coschedule") - + # Restart the Tekton controller to apply changes logger.info("Restarting tekton-pipelines-controller to apply feature flag changes") deploymentAPI = dynClient.resources.get(api_version="apps/v1", kind="Deployment") controller = deploymentAPI.get(name="tekton-pipelines-controller", namespace=namespace) - + # Trigger a rollout by updating an annotation if controller.spec.template.metadata.annotations is None: controller.spec.template.metadata.annotations = {} controller.spec.template.metadata.annotations["kubectl.kubernetes.io/restartedAt"] = datetime.now().isoformat() deploymentAPI.patch(body=controller, namespace=namespace) - + # Wait for the controller to be ready logger.debug("Waiting for tekton-pipelines-controller to be ready after restart") foundReadyController = waitForDeployment(dynClient, namespace=namespace, deploymentName="tekton-pipelines-controller") @@ -84,11 +84,11 @@ def configureTektonFeatureFlags(dynClient: DynamicClient) -> bool: else: logger.info("Tekton coschedule is already set to 'disabled', no changes needed") return True - + except NotFoundError: logger.warning(f"ConfigMap {configMapName} not found in {namespace}, it may not exist yet") return False - + except Exception as e: logger.error(f"Error configuring Tekton feature flags: {str(e)}") return False From 8174e46422c6d028de6d37e418e37d0ccab35612 Mon Sep 17 00:00:00 2001 From: whitfiea Date: Thu, 29 Jan 2026 15:09:50 +0000 Subject: [PATCH 27/41] fix feature flag setting --- src/mas/devops/tekton.py | 17 ++++++++++------- 1 file changed, 10 insertions(+), 7 deletions(-) diff --git a/src/mas/devops/tekton.py b/src/mas/devops/tekton.py index 90e08464..0e04893d 100644 --- a/src/mas/devops/tekton.py +++ b/src/mas/devops/tekton.py @@ -49,16 +49,19 @@ def configureTektonFeatureFlags(dynClient: DynamicClient) -> bool: try: featureFlags = configMapAPI.get(name=configMapName, namespace=namespace) logger.info(f"Found existing Tekton feature-flags ConfigMap in {namespace}") - + + # Convert to dict to make it mutable + featureFlagsDict = featureFlags.to_dict() + # Update the coschedule setting - if featureFlags.data is None: - featureFlags.data = {} - - currentCoschedule = featureFlags.data.get("coschedule", "workspaces") + if featureFlagsDict.get("data") is None: + featureFlagsDict["data"] = {} + + currentCoschedule = featureFlagsDict["data"].get("coschedule", "workspaces") if currentCoschedule != "disabled": logger.info(f"Updating Tekton coschedule setting from '{currentCoschedule}' to 'disabled'") - featureFlags.data["coschedule"] = "disabled" - configMapAPI.patch(body=featureFlags, namespace=namespace) + featureFlagsDict["data"]["coschedule"] = "disabled" + configMapAPI.patch(body=featureFlagsDict, namespace=namespace) logger.info("Successfully updated Tekton feature flags to disable coschedule") # Restart the Tekton controller to apply changes From aa49c797238138e6c354e4b5bfb5ef6fb28cb887 Mon Sep 17 00:00:00 2001 From: whitfiea Date: Thu, 29 Jan 2026 15:11:11 +0000 Subject: [PATCH 28/41] fix linting --- src/mas/devops/tekton.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/mas/devops/tekton.py b/src/mas/devops/tekton.py index 0e04893d..8cf5f8b1 100644 --- a/src/mas/devops/tekton.py +++ b/src/mas/devops/tekton.py @@ -49,14 +49,14 @@ def configureTektonFeatureFlags(dynClient: DynamicClient) -> bool: try: featureFlags = configMapAPI.get(name=configMapName, namespace=namespace) logger.info(f"Found existing Tekton feature-flags ConfigMap in {namespace}") - + # Convert to dict to make it mutable featureFlagsDict = featureFlags.to_dict() - + # Update the coschedule setting if featureFlagsDict.get("data") is None: featureFlagsDict["data"] = {} - + currentCoschedule = featureFlagsDict["data"].get("coschedule", "workspaces") if currentCoschedule != "disabled": logger.info(f"Updating Tekton coschedule setting from '{currentCoschedule}' to 'disabled'") From a95ce693227f12e70a078e876c71e83776e300d2 Mon Sep 17 00:00:00 2001 From: whitfiea Date: Fri, 30 Jan 2026 11:22:23 +0000 Subject: [PATCH 29/41] Fix pipelinetun id --- src/mas/devops/tekton.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/src/mas/devops/tekton.py b/src/mas/devops/tekton.py index 8cf5f8b1..23ad7801 100644 --- a/src/mas/devops/tekton.py +++ b/src/mas/devops/tekton.py @@ -820,10 +820,11 @@ def launchBackupPipeline(dynClient: DynamicClient, params: dict) -> str: NotFoundError: If resources cannot be created """ instanceId = params["mas_instance_id"] + backupVersion = params["backup_version"] namespace = f"mas-{instanceId}-pipelines" - timestamp = launchPipelineRun(dynClient, namespace, "pipelinerun-backup", params) + launchPipelineRun(dynClient, namespace, "pipelinerun-backup", params) - pipelineURL = f"{getConsoleURL(dynClient)}/k8s/ns/mas-{instanceId}-pipelines/tekton.dev~v1beta1~PipelineRun/{instanceId}-backup-{timestamp}" + pipelineURL = f"{getConsoleURL(dynClient)}/k8s/ns/mas-{instanceId}-pipelines/tekton.dev~v1beta1~PipelineRun/{instanceId}-backup-{backupVersion}" return pipelineURL From f2586fcda28b02bcf6985d2a1f9fb3c7c9eb08ee Mon Sep 17 00:00:00 2001 From: whitfiea Date: Fri, 30 Jan 2026 12:16:21 +0000 Subject: [PATCH 30/41] set skip_pre_check --- src/mas/devops/templates/pipelinerun-backup.yml.j2 | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/src/mas/devops/templates/pipelinerun-backup.yml.j2 b/src/mas/devops/templates/pipelinerun-backup.yml.j2 index 2a015466..36c14d0b 100644 --- a/src/mas/devops/templates/pipelinerun-backup.yml.j2 +++ b/src/mas/devops/templates/pipelinerun-backup.yml.j2 @@ -23,6 +23,12 @@ spec: - name: mas_instance_id value: "{{ mas_instance_id }}" + {% if skip_pre_check is defined and skip_pre_check != "" %} + # Pipeline config + - name: skip_pre_check + value: "{{ skip_pre_check }}" + {% endif %} + # Backup Configuration - name: backup_version value: "{{ backup_version }}" From 05fb5b2e97b1f06ba067382a8646eceb71e68c90 Mon Sep 17 00:00:00 2001 From: Sanjay Prabhakar Date: Tue, 3 Feb 2026 09:48:14 +0000 Subject: [PATCH 31/41] new pipelinerun for mas restore (#181) Co-authored-by: Sanjay Prabhakar --- src/mas/devops/tekton.py | 23 +++ .../templates/pipelinerun-restore.yml.j2 | 178 ++++++++++++++++++ 2 files changed, 201 insertions(+) create mode 100644 src/mas/devops/templates/pipelinerun-restore.yml.j2 diff --git a/src/mas/devops/tekton.py b/src/mas/devops/tekton.py index 23ad7801..2e74a16a 100644 --- a/src/mas/devops/tekton.py +++ b/src/mas/devops/tekton.py @@ -828,6 +828,29 @@ def launchBackupPipeline(dynClient: DynamicClient, params: dict) -> str: return pipelineURL +def launchRestorePipeline(dynClient: DynamicClient, params: dict) -> str: + """ + Create a PipelineRun to restore a MAS instance. + + Parameters: + dynClient (DynamicClient): OpenShift Dynamic Client + params (dict): Backup/Restore parameters including instance ID and configuration + + Returns: + str: URL to the PipelineRun in the OpenShift console + + Raises: + NotFoundError: If resources cannot be created + """ + instanceId = params["mas_instance_id"] + restoreVersion = params["restore_version"] + namespace = f"mas-{instanceId}-pipelines" + launchPipelineRun(dynClient, namespace, "pipelinerun-restore", params) + + pipelineURL = f"{getConsoleURL(dynClient)}/k8s/ns/mas-{instanceId}-pipelines/tekton.dev~v1beta1~PipelineRun/{instanceId}-restore-{restoreVersion}" + return pipelineURL + + def launchAiServiceUpgradePipeline(dynClient: DynamicClient, aiserviceInstanceId: str, skipPreCheck: bool = False, diff --git a/src/mas/devops/templates/pipelinerun-restore.yml.j2 b/src/mas/devops/templates/pipelinerun-restore.yml.j2 new file mode 100644 index 00000000..a2f99c10 --- /dev/null +++ b/src/mas/devops/templates/pipelinerun-restore.yml.j2 @@ -0,0 +1,178 @@ +--- +apiVersion: tekton.dev/v1beta1 +kind: PipelineRun +metadata: + name: "{{ mas_instance_id }}-restore-{{ restore_version }}" + labels: + tekton.dev/pipeline: mas-restore +spec: + pipelineRef: + name: mas-restore + serviceAccountName: "{{ service_account_name | default('pipeline', True) }}" + workspaces: + - name: shared-configs + persistentVolumeClaim: + claimName: config-pvc + - name: shared-backups + persistentVolumeClaim: + claimName: backup-pvc + params: + # Common Parameters + - name: image_pull_policy + value: IfNotPresent + - name: mas_instance_id + value: "{{ mas_instance_id }}" + + {% if skip_pre_check is defined and skip_pre_check != "" %} + # Pipeline config + - name: skip_pre_check + value: "{{ skip_pre_check }}" + {% endif %} + + # Restore Configuration + - name: restore_version + value: "{{ restore_version }}" + + # Component Flags + {% if include_sls is defined and include_sls != "" %} + - name: include_sls + value: "{{ include_sls }}" + {% endif %} + {% if include_dro is defined and include_dro != "" %} + - name: include_dro + value: "{{ include_dro }}" + {% endif %} + {% if include_grafana is defined and include_grafana != "" %} + - name: include_grafana + value: "{{ include_grafana }}" + {% endif %} + + # MongoDB Configuration + {% if mongodb_namespace is defined and mongodb_namespace != "" %} + - name: mongodb_namespace + value: "{{ mongodb_namespace }}" + {% endif %} + {% if mongodb_instance_name is defined and mongodb_instance_name != "" %} + - name: mongodb_instance_name + value: "{{ mongodb_instance_name }}" + {% endif %} + {% if mongodb_provider is defined and mongodb_provider != "" %} + - name: mongodb_provider + value: "{{ mongodb_provider }}" + {% endif %} + + # SLS Configuration + {% if sls_namespace is defined and sls_namespace != "" %} + - name: sls_namespace + value: "{{ sls_namespace }}" + {% endif %} + {% if sls_domain is defined and sls_domain != "" %} + - name: sls_domain + value: "{{ sls_domain }}" + {% endif %} + + # DRO Configuration + {% if dro_contact_email is defined and dro_contact_email != "" %} + - name: dro_contact_email + value: "{{ dro_contact_email }}" + {% endif %} + {% if dro_contact_firstname is defined and dro_contact_firstname != "" %} + - name: dro_contact_firstname + value: "{{ dro_contact_firstname }}" + {% endif %} + {% if dro_contact_lastname is defined and dro_contact_lastname != "" %} + - name: dro_contact_lastname + value: "{{ dro_contact_lastname }}" + {% endif %} + {% if ibm_entitlement_key is defined and ibm_entitlement_key != "" %} + - name: ibm_entitlement_key + value: "{{ ibm_entitlement_key }}" + {% endif %} + {% if dro_namespace is defined and dro_namespace != "" %} + - name: dro_namespace + value: "{{ dro_namespace }}" + {% endif %} + {% if dro_storage_class is defined and dro_storage_class != "" %} + - name: dro_storage_class + value: "{{ dro_storage_class }}" + {% endif %} + + # Suite Restore Configuration + {% if include_slscfg_from_backup is defined and include_slscfg_from_backup != "" %} + - name: include_slscfg_from_backup + value: "{{ include_slscfg_from_backup }}" + {% endif %} + {% if sls_url_on_restore is defined and sls_url_on_restore != "" %} + - name: sls_url_on_restore + value: "{{ sls_url_on_restore }}" + {% endif %} + {% if sls_cfg_file is defined and sls_cfg_file != "" %} + - name: sls_cfg_file + value: "{{ sls_cfg_file }}" + {% endif %} + {% if include_drocfg_from_backup is defined and include_drocfg_from_backup != "" %} + - name: include_drocfg_from_backup + value: "{{ include_drocfg_from_backup }}" + {% endif %} + {% if dro_url_on_restore is defined and dro_url_on_restore != "" %} + - name: dro_url_on_restore + value: "{{ dro_url_on_restore }}" + {% endif %} + {% if dro_cfg_file is defined and dro_cfg_file != "" %} + - name: dro_cfg_file + value: "{{ dro_cfg_file }}" + {% endif %} + {% if mas_domain_on_restore is defined and mas_domain_on_restore != "" %} + - name: mas_domain_on_restore + value: "{{ mas_domain_on_restore }}" + {% endif %} + + # Certificate Manager Configuration + {% if cert_manager_provider is defined and cert_manager_provider != "" %} + - name: cert_manager_provider + value: "{{ cert_manager_provider }}" + {% endif %} + + # Development Build Support + {% if artifactory_username is defined and artifactory_username != "" %} + - name: artifactory_username + value: "{{ artifactory_username }}" + {% endif %} + {% if artifactory_token is defined and artifactory_token != "" %} + - name: artifactory_token + value: "{{ artifactory_token }}" + {% endif %} + + # Download Configuration + {% if backup_archive_name is defined and backup_archive_name != "" %} + - name: backup_archive_name + value: "{{ backup_archive_name }}" + {% endif %} + {% if download_backup is defined and download_backup != "" %} + - name: download_backup + value: "{{ download_backup }}" + {% endif %} + {% if aws_access_key_id is defined and aws_access_key_id != "" %} + - name: aws_access_key_id + value: "{{ aws_access_key_id }}" + {% endif %} + {% if aws_secret_access_key is defined and aws_secret_access_key != "" %} + - name: aws_secret_access_key + value: "{{ aws_secret_access_key }}" + {% endif %} + {% if s3_bucket_name is defined and s3_bucket_name != "" %} + - name: s3_bucket_name + value: "{{ s3_bucket_name }}" + {% endif %} + {% if s3_region is defined and s3_region != "" %} + - name: s3_region + value: "{{ s3_region }}" + {% endif %} + {% if artifactory_url is defined and artifactory_url != "" %} + - name: artifactory_url + value: "{{ artifactory_url }}" + {% endif %} + {% if artifactory_repository is defined and artifactory_repository != "" %} + - name: artifactory_repository + value: "{{ artifactory_repository }}" + {% endif %} \ No newline at end of file From e864215788cadae1bae93be115b173222dde108e Mon Sep 17 00:00:00 2001 From: Sanjay Prabhakar Date: Tue, 3 Feb 2026 14:58:36 +0000 Subject: [PATCH 32/41] Update tekton.py with prepareRestoreSecrets --- src/mas/devops/tekton.py | 39 +++++++++++++++++++ .../templates/pipelinerun-restore.yml.j2 | 5 ++- 2 files changed, 43 insertions(+), 1 deletion(-) diff --git a/src/mas/devops/tekton.py b/src/mas/devops/tekton.py index 2e74a16a..96d83d50 100644 --- a/src/mas/devops/tekton.py +++ b/src/mas/devops/tekton.py @@ -506,6 +506,45 @@ def prepareAiServicePipelinesNamespace(dynClient: DynamicClient, instanceId: str logger.info(f"Storage class {storageClass} uses volumeBindingMode={volumeBindingMode}, skipping PVC bind wait") +def prepareRestoreSecrets(dynClient: DynamicClient, namespace: str, restoreConfigs: dict = None): + """ + Create or update secret required for MAS Restore pipeline. + + Creates secret in the specified namespace: + - pipeline-restore-configs + + Parameters: + dynClient (DynamicClient): OpenShift Dynamic Client + namespace (str): The namespace to create secrets in + restoreConfigs (dict, optional): configuration data for restore. Defaults to None (empty secret). + + Returns: + None + + Raises: + NotFoundError: If secrets cannot be created + """ + secretsAPI = dynClient.resources.get(api_version="v1", kind="Secret") + + # 1. Secret/pipeline-restore-configs + # ------------------------------------------------------------------------- + # Must exist, but can be empty + try: + secretsAPI.delete(name="pipeline-restore-configs", namespace=namespace) + except NotFoundError: + pass + + if restoreConfigs is None: + restoreConfigs = { + "apiVersion": "v1", + "kind": "Secret", + "type": "Opaque", + "metadata": { + "name": "pipeline-restore-configs" + } + } + secretsAPI.create(body=restoreConfigs, namespace=namespace) + def prepareInstallSecrets(dynClient: DynamicClient, namespace: str, slsLicenseFile: str = None, additionalConfigs: dict = None, certs: str = None, podTemplates: str = None) -> None: """ Create or update secrets required for MAS installation pipelines. diff --git a/src/mas/devops/templates/pipelinerun-restore.yml.j2 b/src/mas/devops/templates/pipelinerun-restore.yml.j2 index a2f99c10..061bd7f0 100644 --- a/src/mas/devops/templates/pipelinerun-restore.yml.j2 +++ b/src/mas/devops/templates/pipelinerun-restore.yml.j2 @@ -16,6 +16,9 @@ spec: - name: shared-backups persistentVolumeClaim: claimName: backup-pvc + - name: restore-configurations + secret: + secretName: pipeline-restore-configs params: # Common Parameters - name: image_pull_policy @@ -175,4 +178,4 @@ spec: {% if artifactory_repository is defined and artifactory_repository != "" %} - name: artifactory_repository value: "{{ artifactory_repository }}" - {% endif %} \ No newline at end of file + {% endif %} From c729f65496d8afb210072c66bd709eec1de65f59 Mon Sep 17 00:00:00 2001 From: Sanjay Prabhakar Date: Tue, 3 Feb 2026 19:11:03 +0000 Subject: [PATCH 33/41] autopep8 format --- src/mas/devops/tekton.py | 1 + 1 file changed, 1 insertion(+) diff --git a/src/mas/devops/tekton.py b/src/mas/devops/tekton.py index 96d83d50..22b797f8 100644 --- a/src/mas/devops/tekton.py +++ b/src/mas/devops/tekton.py @@ -545,6 +545,7 @@ def prepareRestoreSecrets(dynClient: DynamicClient, namespace: str, restoreConfi } secretsAPI.create(body=restoreConfigs, namespace=namespace) + def prepareInstallSecrets(dynClient: DynamicClient, namespace: str, slsLicenseFile: str = None, additionalConfigs: dict = None, certs: str = None, podTemplates: str = None) -> None: """ Create or update secrets required for MAS installation pipelines. From 0be6bf5fafeb57191c2e81bf488c3f790c8f50cd Mon Sep 17 00:00:00 2001 From: Sanjay Prabhakar Date: Wed, 4 Feb 2026 10:31:54 +0000 Subject: [PATCH 34/41] added timestamp to backup & restore pipelinerun name --- src/mas/devops/tekton.py | 8 ++++---- src/mas/devops/templates/pipelinerun-backup.yml.j2 | 2 +- src/mas/devops/templates/pipelinerun-restore.yml.j2 | 2 +- 3 files changed, 6 insertions(+), 6 deletions(-) diff --git a/src/mas/devops/tekton.py b/src/mas/devops/tekton.py index 22b797f8..53f272e8 100644 --- a/src/mas/devops/tekton.py +++ b/src/mas/devops/tekton.py @@ -862,9 +862,9 @@ def launchBackupPipeline(dynClient: DynamicClient, params: dict) -> str: instanceId = params["mas_instance_id"] backupVersion = params["backup_version"] namespace = f"mas-{instanceId}-pipelines" - launchPipelineRun(dynClient, namespace, "pipelinerun-backup", params) + timestamp = launchPipelineRun(dynClient, namespace, "pipelinerun-backup", params) - pipelineURL = f"{getConsoleURL(dynClient)}/k8s/ns/mas-{instanceId}-pipelines/tekton.dev~v1beta1~PipelineRun/{instanceId}-backup-{backupVersion}" + pipelineURL = f"{getConsoleURL(dynClient)}/k8s/ns/mas-{instanceId}-pipelines/tekton.dev~v1beta1~PipelineRun/{instanceId}-backup-{backupVersion}-{timestamp}" return pipelineURL @@ -885,9 +885,9 @@ def launchRestorePipeline(dynClient: DynamicClient, params: dict) -> str: instanceId = params["mas_instance_id"] restoreVersion = params["restore_version"] namespace = f"mas-{instanceId}-pipelines" - launchPipelineRun(dynClient, namespace, "pipelinerun-restore", params) + timestamp = launchPipelineRun(dynClient, namespace, "pipelinerun-restore", params) - pipelineURL = f"{getConsoleURL(dynClient)}/k8s/ns/mas-{instanceId}-pipelines/tekton.dev~v1beta1~PipelineRun/{instanceId}-restore-{restoreVersion}" + pipelineURL = f"{getConsoleURL(dynClient)}/k8s/ns/mas-{instanceId}-pipelines/tekton.dev~v1beta1~PipelineRun/{instanceId}-restore-{restoreVersion}-{timestamp}" return pipelineURL diff --git a/src/mas/devops/templates/pipelinerun-backup.yml.j2 b/src/mas/devops/templates/pipelinerun-backup.yml.j2 index 36c14d0b..0ba2f124 100644 --- a/src/mas/devops/templates/pipelinerun-backup.yml.j2 +++ b/src/mas/devops/templates/pipelinerun-backup.yml.j2 @@ -2,7 +2,7 @@ apiVersion: tekton.dev/v1beta1 kind: PipelineRun metadata: - name: "{{ mas_instance_id }}-backup-{{ backup_version }}" + name: "{{ mas_instance_id }}-backup-{{ backup_version }}-{{ timestamp }}" labels: tekton.dev/pipeline: mas-backup spec: diff --git a/src/mas/devops/templates/pipelinerun-restore.yml.j2 b/src/mas/devops/templates/pipelinerun-restore.yml.j2 index 061bd7f0..7c0185b0 100644 --- a/src/mas/devops/templates/pipelinerun-restore.yml.j2 +++ b/src/mas/devops/templates/pipelinerun-restore.yml.j2 @@ -2,7 +2,7 @@ apiVersion: tekton.dev/v1beta1 kind: PipelineRun metadata: - name: "{{ mas_instance_id }}-restore-{{ restore_version }}" + name: "{{ mas_instance_id }}-restore-{{ restore_version }}-{{ timestamp }}" labels: tekton.dev/pipeline: mas-restore spec: From 4b80d7ce317f6f089b6b5e66b84ef48498f13a6f Mon Sep 17 00:00:00 2001 From: Andrew Whitfield Date: Fri, 6 Feb 2026 15:02:40 +0000 Subject: [PATCH 35/41] Backup for Manage dev (#184) --- .../templates/pipelinerun-backup.yml.j2 | 48 +++++++++++++++++++ 1 file changed, 48 insertions(+) diff --git a/src/mas/devops/templates/pipelinerun-backup.yml.j2 b/src/mas/devops/templates/pipelinerun-backup.yml.j2 index 0ba2f124..ac7cc072 100644 --- a/src/mas/devops/templates/pipelinerun-backup.yml.j2 +++ b/src/mas/devops/templates/pipelinerun-backup.yml.j2 @@ -103,4 +103,52 @@ spec: {% if artifactory_repository is defined and artifactory_repository != "" %} - name: artifactory_repository value: "{{ artifactory_repository }}" + {% endif %} + + # Manage Application Backup Configuration + {% if backup_manage_app is defined and backup_manage_app != "" %} + - name: backup_manage_app + value: "{{ backup_manage_app }}" + {% endif %} + {% if backup_manage_db is defined and backup_manage_db != "" %} + - name: backup_manage_db + value: "{{ backup_manage_db }}" + {% endif %} + {% if manage_workspace_id is defined and manage_workspace_id != "" %} + - name: manage_workspace_id + value: "{{ manage_workspace_id }}" + {% endif %} + + # Manage Db2 Backup Configuration + {% if manage_db2_namespace is defined and manage_db2_namespace != "" %} + - name: manage_db2_namespace + value: "{{ manage_db2_namespace }}" + {% endif %} + {% if manage_db2_instance_name is defined and manage_db2_instance_name != "" %} + - name: manage_db2_instance_name + value: "{{ manage_db2_instance_name }}" + {% endif %} + {% if manage_db2_backup_type is defined and manage_db2_backup_type != "" %} + - name: manage_db2_backup_type + value: "{{ manage_db2_backup_type }}" + {% endif %} + {% if manage_db2_backup_vendor is defined and manage_db2_backup_vendor != "" %} + - name: manage_db2_backup_vendor + value: "{{ manage_db2_backup_vendor }}" + {% endif %} + {% if backup_s3_endpoint is defined and backup_s3_endpoint != "" %} + - name: backup_s3_endpoint + value: "{{ backup_s3_endpoint }}" + {% endif %} + {% if backup_s3_bucket is defined and backup_s3_bucket != "" %} + - name: backup_s3_bucket + value: "{{ backup_s3_bucket }}" + {% endif %} + {% if backup_s3_access_key is defined and backup_s3_access_key != "" %} + - name: backup_s3_access_key + value: "{{ backup_s3_access_key }}" + {% endif %} + {% if backup_s3_secret_key is defined and backup_s3_secret_key != "" %} + - name: backup_s3_secret_key + value: "{{ backup_s3_secret_key }}" {% endif %} \ No newline at end of file From e8b12137d0d24dbfab8f2ff17a01e22adb1df6ac Mon Sep 17 00:00:00 2001 From: Sanjay Prabhakar Date: Wed, 11 Feb 2026 19:16:57 +0000 Subject: [PATCH 36/41] add Manage restore --- .../templates/pipelinerun-restore.yml.j2 | 54 +++++++++++++++++++ 1 file changed, 54 insertions(+) diff --git a/src/mas/devops/templates/pipelinerun-restore.yml.j2 b/src/mas/devops/templates/pipelinerun-restore.yml.j2 index 7c0185b0..49adcbe9 100644 --- a/src/mas/devops/templates/pipelinerun-restore.yml.j2 +++ b/src/mas/devops/templates/pipelinerun-restore.yml.j2 @@ -179,3 +179,57 @@ spec: - name: artifactory_repository value: "{{ artifactory_repository }}" {% endif %} + + # Manage Application Restore Configuration + {% if restore_manage_app is defined and restore_manage_app != "" %} + - name: restore_manage_app + value: "{{ restore_manage_app }}" + {% endif %} + {% if restore_manage_db is defined and restore_manage_db != "" %} + - name: restore_manage_db + value: "{{ restore_manage_db }}" + {% endif %} + # Manage App Storage class + {% if manage_app_override_storageclass is defined and manage_app_override_storageclass != "" %} + - name: manage_app_override_storageclass + value: "{{ manage_app_override_storageclass }}" + {% endif %} + {% if manage_app_storage_class_rwx is defined and manage_app_storage_class_rwx != "" %} + - name: manage_app_storage_class_rwx + value: "{{ manage_app_storage_class_rwx }}" + {% endif %} + {% if manage_app_storage_class_rwo is defined and manage_app_storage_class_rwo != "" %} + - name: manage_app_storage_class_rwo + value: "{{ manage_app_storage_class_rwo }}" + {% endif %} + + # Manage Db2 Restore Configuration + {% if manage_db2_restore_vendor is defined and manage_db2_restore_vendor != "" %} + - name: manage_db2_restore_vendor + value: "{{ manage_db2_restore_vendor }}" + {% endif %} + + {% if manage_db_override_storageclass is defined and manage_db_override_storageclass != "" %} + - name: manage_db_override_storageclass + value: "{{ manage_db_override_storageclass }}" + {% endif %} + {% if manage_db_meta_storage_class is defined and manage_db_meta_storage_class != "" %} + - name: manage_db_meta_storage_class + value: "{{ manage_db_meta_storage_class }}" + {% endif %} + {% if manage_db_data_storage_class is defined and manage_db_data_storage_class != "" %} + - name: manage_db_data_storage_class + value: "{{ manage_db_data_storage_class }}" + {% endif %} + {% if manage_db_backup_storage_class is defined and manage_db_backup_storage_class != "" %} + - name: manage_db_backup_storage_class + value: "{{ manage_db_backup_storage_class }}" + {% endif %} + {% if manage_db_logs_storage_class is defined and manage_db_logs_storage_class != "" %} + - name: manage_db_logs_storage_class + value: "{{ manage_db_logs_storage_class }}" + {% endif %} + {% if manage_db_temp_storage_class is defined and manage_db_temp_storage_class != "" %} + - name: manage_db_temp_storage_class + value: "{{ manage_db_temp_storage_class }}" + {% endif %} From d6bb9a3ac43b8f58555539c5cb3ea2931a9428fc Mon Sep 17 00:00:00 2001 From: Sanjay Prabhakar Date: Thu, 12 Feb 2026 15:01:25 +0000 Subject: [PATCH 37/41] Update pipelinerun-restore.yml.j2 --- src/mas/devops/templates/pipelinerun-restore.yml.j2 | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/src/mas/devops/templates/pipelinerun-restore.yml.j2 b/src/mas/devops/templates/pipelinerun-restore.yml.j2 index 49adcbe9..4b85279e 100644 --- a/src/mas/devops/templates/pipelinerun-restore.yml.j2 +++ b/src/mas/devops/templates/pipelinerun-restore.yml.j2 @@ -63,6 +63,10 @@ spec: - name: mongodb_provider value: "{{ mongodb_provider }}" {% endif %} + {% if override_mongodb_storageclass is defined and override_mongodb_storageclass != "" %} + - name: override_mongodb_storageclass + value: "{{ override_mongodb_storageclass }}" + {% endif %} # SLS Configuration {% if sls_namespace is defined and sls_namespace != "" %} From dc8ea1929a9c874639b9820f6c03dfee09ae0114 Mon Sep 17 00:00:00 2001 From: Sanjay Prabhakar Date: Thu, 19 Feb 2026 16:30:44 +0000 Subject: [PATCH 38/41] setting no timeouts for backup and restore pipeline --- src/mas/devops/templates/pipelinerun-backup.yml.j2 | 2 ++ src/mas/devops/templates/pipelinerun-restore.yml.j2 | 2 ++ 2 files changed, 4 insertions(+) diff --git a/src/mas/devops/templates/pipelinerun-backup.yml.j2 b/src/mas/devops/templates/pipelinerun-backup.yml.j2 index ac7cc072..1ca7e44e 100644 --- a/src/mas/devops/templates/pipelinerun-backup.yml.j2 +++ b/src/mas/devops/templates/pipelinerun-backup.yml.j2 @@ -9,6 +9,8 @@ spec: pipelineRef: name: mas-backup serviceAccountName: "{{ service_account_name | default('pipeline', True) }}" + timeouts: + pipeline: "0" workspaces: - name: shared-configs persistentVolumeClaim: diff --git a/src/mas/devops/templates/pipelinerun-restore.yml.j2 b/src/mas/devops/templates/pipelinerun-restore.yml.j2 index 4b85279e..b80342ca 100644 --- a/src/mas/devops/templates/pipelinerun-restore.yml.j2 +++ b/src/mas/devops/templates/pipelinerun-restore.yml.j2 @@ -9,6 +9,8 @@ spec: pipelineRef: name: mas-restore serviceAccountName: "{{ service_account_name | default('pipeline', True) }}" + timeouts: + pipeline: "0" workspaces: - name: shared-configs persistentVolumeClaim: From 0e766dedf0edb9b5186786e9dfc03b73b23cc79e Mon Sep 17 00:00:00 2001 From: Sanjay Prabhakar Date: Mon, 23 Feb 2026 18:48:16 +0000 Subject: [PATCH 39/41] add s3_endpoint_url to backup and restore pipelinerun --- src/mas/devops/templates/pipelinerun-backup.yml.j2 | 4 ++++ src/mas/devops/templates/pipelinerun-restore.yml.j2 | 4 ++++ 2 files changed, 8 insertions(+) diff --git a/src/mas/devops/templates/pipelinerun-backup.yml.j2 b/src/mas/devops/templates/pipelinerun-backup.yml.j2 index 1ca7e44e..b15421c9 100644 --- a/src/mas/devops/templates/pipelinerun-backup.yml.j2 +++ b/src/mas/devops/templates/pipelinerun-backup.yml.j2 @@ -98,6 +98,10 @@ spec: - name: s3_region value: "{{ s3_region }}" {% endif %} + {% if s3_endpoint_url is defined and s3_endpoint_url != "" %} + - name: s3_endpoint_url + value: "{{ s3_endpoint_url }}" + {% endif %} {% if artifactory_url is defined and artifactory_url != "" %} - name: artifactory_url value: "{{ artifactory_url }}" diff --git a/src/mas/devops/templates/pipelinerun-restore.yml.j2 b/src/mas/devops/templates/pipelinerun-restore.yml.j2 index b80342ca..c4cb01a6 100644 --- a/src/mas/devops/templates/pipelinerun-restore.yml.j2 +++ b/src/mas/devops/templates/pipelinerun-restore.yml.j2 @@ -177,6 +177,10 @@ spec: - name: s3_region value: "{{ s3_region }}" {% endif %} + {% if s3_endpoint_url is defined and s3_endpoint_url != "" %} + - name: s3_endpoint_url + value: "{{ s3_endpoint_url }}" + {% endif %} {% if artifactory_url is defined and artifactory_url != "" %} - name: artifactory_url value: "{{ artifactory_url }}" From 69530d36f506a5b6f4c8231e81c4c3475d0d1522 Mon Sep 17 00:00:00 2001 From: Sanjay Prabhakar Date: Wed, 25 Feb 2026 13:41:44 +0000 Subject: [PATCH 40/41] Update pipelinerun-restore.yml.j2 --- .../templates/pipelinerun-restore.yml.j2 | 24 +++++-------------- 1 file changed, 6 insertions(+), 18 deletions(-) diff --git a/src/mas/devops/templates/pipelinerun-restore.yml.j2 b/src/mas/devops/templates/pipelinerun-restore.yml.j2 index c4cb01a6..3bf43934 100644 --- a/src/mas/devops/templates/pipelinerun-restore.yml.j2 +++ b/src/mas/devops/templates/pipelinerun-restore.yml.j2 @@ -223,23 +223,11 @@ spec: - name: manage_db_override_storageclass value: "{{ manage_db_override_storageclass }}" {% endif %} - {% if manage_db_meta_storage_class is defined and manage_db_meta_storage_class != "" %} - - name: manage_db_meta_storage_class - value: "{{ manage_db_meta_storage_class }}" + {% if manage_db_storage_class_rwo is defined and manage_db_storage_class_rwo != "" %} + - name: manage_db_storage_class_rwo + value: "{{ manage_db_storage_class_rwo }}" {% endif %} - {% if manage_db_data_storage_class is defined and manage_db_data_storage_class != "" %} - - name: manage_db_data_storage_class - value: "{{ manage_db_data_storage_class }}" - {% endif %} - {% if manage_db_backup_storage_class is defined and manage_db_backup_storage_class != "" %} - - name: manage_db_backup_storage_class - value: "{{ manage_db_backup_storage_class }}" - {% endif %} - {% if manage_db_logs_storage_class is defined and manage_db_logs_storage_class != "" %} - - name: manage_db_logs_storage_class - value: "{{ manage_db_logs_storage_class }}" - {% endif %} - {% if manage_db_temp_storage_class is defined and manage_db_temp_storage_class != "" %} - - name: manage_db_temp_storage_class - value: "{{ manage_db_temp_storage_class }}" + {% if manage_db_storage_class_rwx is defined and manage_db_storage_class_rwx != "" %} + - name: manage_db_storage_class_rwx + value: "{{ manage_db_storage_class_rwx }}" {% endif %} From b65b758f56e1d52b955642398211a33bb4a9cab5 Mon Sep 17 00:00:00 2001 From: Sanjay Prabhakar Date: Wed, 11 Mar 2026 08:11:16 +0000 Subject: [PATCH 41/41] remove configuring tekton feature flag and shared-configs pvc from backup and restore pipelines (#208) --- src/mas/devops/tekton.py | 129 ++++-------------- .../templates/pipelinerun-backup.yml.j2 | 3 - .../templates/pipelinerun-restore.yml.j2 | 3 - 3 files changed, 29 insertions(+), 106 deletions(-) diff --git a/src/mas/devops/tekton.py b/src/mas/devops/tekton.py index 3f472952..96054c99 100644 --- a/src/mas/devops/tekton.py +++ b/src/mas/devops/tekton.py @@ -27,76 +27,6 @@ logger = logging.getLogger(__name__) -def configureTektonFeatureFlags(dynClient: DynamicClient) -> bool: - """ - Configure Tekton feature flags to disable coschedule (Affinity Assistant). - - This prevents the "more than one PersistentVolumeClaim is bound" error when - tasks use multiple PVCs with incompatible access modes. - - Parameters: - dynClient (DynamicClient): OpenShift Dynamic Client - - Returns: - bool: True if configuration is successful, False otherwise - """ - try: - configMapAPI = dynClient.resources.get(api_version="v1", kind="ConfigMap") - namespace = "openshift-pipelines" - configMapName = "feature-flags" - - # Get the existing ConfigMap - try: - featureFlags = configMapAPI.get(name=configMapName, namespace=namespace) - logger.info(f"Found existing Tekton feature-flags ConfigMap in {namespace}") - - # Convert to dict to make it mutable - featureFlagsDict = featureFlags.to_dict() - - # Update the coschedule setting - if featureFlagsDict.get("data") is None: - featureFlagsDict["data"] = {} - - currentCoschedule = featureFlagsDict["data"].get("coschedule", "workspaces") - if currentCoschedule != "disabled": - logger.info(f"Updating Tekton coschedule setting from '{currentCoschedule}' to 'disabled'") - featureFlagsDict["data"]["coschedule"] = "disabled" - configMapAPI.patch(body=featureFlagsDict, namespace=namespace) - logger.info("Successfully updated Tekton feature flags to disable coschedule") - - # Restart the Tekton controller to apply changes - logger.info("Restarting tekton-pipelines-controller to apply feature flag changes") - deploymentAPI = dynClient.resources.get(api_version="apps/v1", kind="Deployment") - controller = deploymentAPI.get(name="tekton-pipelines-controller", namespace=namespace) - - # Trigger a rollout by updating an annotation - if controller.spec.template.metadata.annotations is None: - controller.spec.template.metadata.annotations = {} - controller.spec.template.metadata.annotations["kubectl.kubernetes.io/restartedAt"] = datetime.now().isoformat() - deploymentAPI.patch(body=controller, namespace=namespace) - - # Wait for the controller to be ready - logger.debug("Waiting for tekton-pipelines-controller to be ready after restart") - foundReadyController = waitForDeployment(dynClient, namespace=namespace, deploymentName="tekton-pipelines-controller") - if foundReadyController: - logger.info("Tekton controller restarted successfully") - return True - else: - logger.warning("Tekton controller restart may not have completed successfully") - return False - else: - logger.info("Tekton coschedule is already set to 'disabled', no changes needed") - return True - - except NotFoundError: - logger.warning(f"ConfigMap {configMapName} not found in {namespace}, it may not exist yet") - return False - - except Exception as e: - logger.error(f"Error configuring Tekton feature flags: {str(e)}") - return False - - def installOpenShiftPipelines(dynClient: DynamicClient, customStorageClassName: str = None) -> bool: """ Install the OpenShift Pipelines Operator and wait for it to be ready to use. @@ -167,11 +97,6 @@ def installOpenShiftPipelines(dynClient: DynamicClient, customStorageClassName: logger.error("OpenShift Pipelines Webhook is NOT installed and ready") return False - # Configure Tekton feature flags to disable coschedule - # ------------------------------------------------------------------------- - logger.debug("Configuring Tekton feature flags") - configureTektonFeatureFlags(dynClient) - # Workaround for bug in OpenShift Pipelines/Tekton # ------------------------------------------------------------------------- # Wait for the postgredb-tekton-results-postgres-0 PVC to be ready @@ -337,7 +262,7 @@ def updateTektonDefinitions(namespace: str, yamlFile: str) -> None: logger.debug(line) -def preparePipelinesNamespace(dynClient: DynamicClient, instanceId: str = None, storageClass: str = None, accessMode: str = None, waitForBind: bool = True, configureRBAC: bool = True, createBackupPVC: bool = False, backupStorageSize: str = "20Gi"): +def preparePipelinesNamespace(dynClient: DynamicClient, instanceId: str = None, storageClass: str = None, accessMode: str = None, waitForBind: bool = True, configureRBAC: bool = True, createConfigPVC: bool = True, createBackupPVC: bool = False, backupStorageSize: str = "20Gi"): """ Prepare a namespace for MAS pipelines by creating RBAC and PVC resources. @@ -351,6 +276,7 @@ def preparePipelinesNamespace(dynClient: DynamicClient, instanceId: str = None, accessMode (str, optional): Access mode for the PVC. Defaults to None. waitForBind (bool, optional): Whether to wait for PVC to bind. Defaults to True. configureRBAC (bool, optional): Whether to configure RBAC. Defaults to True. + createConfigPVC (bool, optional): Whether to create config PVC. Defaults to True. createBackupPVC (bool, optional): Whether to create backup PVC. Defaults to False. backupStorageSize (str, optional): Size of the backup PVC storage. Defaults to "20Gi". @@ -383,33 +309,36 @@ def preparePipelinesNamespace(dynClient: DynamicClient, instanceId: str = None, if instanceId is not None: pvcAPI = dynClient.resources.get(api_version="v1", kind="PersistentVolumeClaim") - # Create config PVC - template = env.get_template("pipelines-pvc.yml.j2") - renderedTemplate = template.render( - mas_instance_id=instanceId, - pipeline_storage_class=storageClass, - pipeline_storage_accessmode=accessMode - ) - logger.debug(renderedTemplate) - pvc = yaml.safe_load(renderedTemplate) - pvcAPI.apply(body=pvc, namespace=namespace) - # Automatically determine if we should wait for PVC binding based on storage class volumeBindingMode = getStorageClassVolumeBindingMode(dynClient, storageClass) waitForBind = (volumeBindingMode == "Immediate") - if waitForBind: - logger.info(f"Storage class {storageClass} uses volumeBindingMode={volumeBindingMode}, waiting for config PVC to bind") - pvcIsBound = False - while not pvcIsBound: - configPVC = pvcAPI.get(name="config-pvc", namespace=namespace) - if configPVC.status.phase == "Bound": - pvcIsBound = True - else: - logger.debug("Waiting 15s before checking status of config PVC again") - logger.debug(configPVC) - sleep(15) - else: - logger.info(f"Storage class {storageClass} uses volumeBindingMode={volumeBindingMode}, skipping config PVC bind wait") + + # Create config PVC if requested + if createConfigPVC: + logger.info("Creating config PVC") + template = env.get_template("pipelines-pvc.yml.j2") + renderedTemplate = template.render( + mas_instance_id=instanceId, + pipeline_storage_class=storageClass, + pipeline_storage_accessmode=accessMode + ) + logger.debug(renderedTemplate) + pvc = yaml.safe_load(renderedTemplate) + pvcAPI.apply(body=pvc, namespace=namespace) + + if waitForBind: + logger.info(f"Storage class {storageClass} uses volumeBindingMode={volumeBindingMode}, waiting for config PVC to bind") + pvcIsBound = False + while not pvcIsBound: + configPVC = pvcAPI.get(name="config-pvc", namespace=namespace) + if configPVC.status.phase == "Bound": + pvcIsBound = True + else: + logger.debug("Waiting 15s before checking status of config PVC again") + logger.debug(configPVC) + sleep(15) + else: + logger.info(f"Storage class {storageClass} uses volumeBindingMode={volumeBindingMode}, skipping config PVC bind wait") # Create backup PVC if requested if createBackupPVC: diff --git a/src/mas/devops/templates/pipelinerun-backup.yml.j2 b/src/mas/devops/templates/pipelinerun-backup.yml.j2 index b15421c9..7c9bb94a 100644 --- a/src/mas/devops/templates/pipelinerun-backup.yml.j2 +++ b/src/mas/devops/templates/pipelinerun-backup.yml.j2 @@ -12,9 +12,6 @@ spec: timeouts: pipeline: "0" workspaces: - - name: shared-configs - persistentVolumeClaim: - claimName: config-pvc - name: shared-backups persistentVolumeClaim: claimName: backup-pvc diff --git a/src/mas/devops/templates/pipelinerun-restore.yml.j2 b/src/mas/devops/templates/pipelinerun-restore.yml.j2 index 3bf43934..5a17786f 100644 --- a/src/mas/devops/templates/pipelinerun-restore.yml.j2 +++ b/src/mas/devops/templates/pipelinerun-restore.yml.j2 @@ -12,9 +12,6 @@ spec: timeouts: pipeline: "0" workspaces: - - name: shared-configs - persistentVolumeClaim: - claimName: config-pvc - name: shared-backups persistentVolumeClaim: claimName: backup-pvc