From 1c13b31784d1b979317c565808311f637f91ae49 Mon Sep 17 00:00:00 2001 From: David Parker Date: Thu, 25 Dec 2025 17:22:52 +0000 Subject: [PATCH 1/4] [patch] Add method documentation --- .gitignore | 1 + src/mas/devops/db2.py | 109 ++++++++++++++++++++ src/mas/devops/ocp.py | 211 +++++++++++++++++++++++++++++++++++++-- src/mas/devops/olm.py | 78 ++++++++++++++- src/mas/devops/slack.py | 114 ++++++++++++++++++--- src/mas/devops/tekton.py | 136 +++++++++++++++++++++++-- 6 files changed, 616 insertions(+), 33 deletions(-) diff --git a/.gitignore b/.gitignore index 688c5d28..f182cf60 100644 --- a/.gitignore +++ b/.gitignore @@ -19,3 +19,4 @@ venv/ # Other kubectl.exe /build +/.vscode diff --git a/src/mas/devops/db2.py b/src/mas/devops/db2.py index 5aa8f84b..1f6ed37b 100644 --- a/src/mas/devops/db2.py +++ b/src/mas/devops/db2.py @@ -22,6 +22,21 @@ def get_db2u_instance_cr(custom_objects_api: client.CustomObjectsApi, mas_instance_id: str, mas_app_id: str, database_role='primary') -> dict: + """ + Retrieve the Db2uInstance custom resource for a specific MAS application database. + + Parameters: + custom_objects_api (client.CustomObjectsApi): Kubernetes custom objects API client + mas_instance_id (str): The ID of the MAS instance + mas_app_id (str): The ID of the MAS application (e.g., "manage", "iot") + database_role (str, optional): The database role, either 'primary' or 'standby'. Defaults to 'primary'. + + Returns: + dict: The Db2uInstance custom resource as a dictionary + + Raises: + kubernetes.client.exceptions.ApiException: If the custom resource is not found or cannot be retrieved + """ cr_name = {'primary': f"db2wh-{mas_instance_id}-{mas_app_id}", 'standby': f"db2wh-{mas_instance_id}-{mas_app_id}-sdb"}[database_role] namespace = f"db2u-{mas_instance_id}" logger.debug(f"Getting Db2uInstance CR {cr_name} in {namespace}") @@ -38,28 +53,103 @@ def get_db2u_instance_cr(custom_objects_api: client.CustomObjectsApi, mas_instan def db2_pod_exec(core_v1_api: client.CoreV1Api, mas_instance_id: str, mas_app_id: str, command: list, database_role='primary') -> str: + """ + Execute a command in a DB2 pod for a specific MAS application database. + + Parameters: + core_v1_api (client.CoreV1Api): Kubernetes Core V1 API client + mas_instance_id (str): The ID of the MAS instance + mas_app_id (str): The ID of the MAS application (e.g., "manage", "iot") + command (list): The command to execute as a list of strings + database_role (str, optional): The database role, either 'primary' or 'standby'. Defaults to 'primary'. + + Returns: + str: The standard output from the command execution + + Raises: + Exception: If the command execution fails + """ pod_name = {'primary': f"c-db2wh-{mas_instance_id}-{mas_app_id}-db2u-0", 'standby': f"c-db2wh-{mas_instance_id}-{mas_app_id}-sdb-db2u-0"}[database_role] namespace = f"db2u-{mas_instance_id}" return execInPod(core_v1_api, pod_name, namespace, command) def db2_pod_exec_db2_get_db_cfg(core_v1_api: client.CoreV1Api, mas_instance_id: str, mas_app_id: str, db_name: str, database_role='primary') -> str: + """ + Execute 'db2 get db cfg' command in a DB2 pod to retrieve database configuration. + + Parameters: + core_v1_api (client.CoreV1Api): Kubernetes Core V1 API client + mas_instance_id (str): The ID of the MAS instance + mas_app_id (str): The ID of the MAS application (e.g., "manage", "iot") + db_name (str): The name of the database to query + database_role (str, optional): The database role, either 'primary' or 'standby'. Defaults to 'primary'. + + Returns: + str: The output of the 'db2 get db cfg' command + + Raises: + Exception: If the command execution fails + """ command = ["su", "-lc", f"db2 get db cfg for {db_name}", "db2inst1"] return db2_pod_exec(core_v1_api, mas_instance_id, mas_app_id, command, database_role) def db2_pod_exec_db2_get_dbm_cfg(core_v1_api: client.CoreV1Api, mas_instance_id: str, mas_app_id: str, database_role='primary') -> str: + """ + Execute 'db2 get dbm cfg' command in a DB2 pod to retrieve database manager configuration. + + Parameters: + core_v1_api (client.CoreV1Api): Kubernetes Core V1 API client + mas_instance_id (str): The ID of the MAS instance + mas_app_id (str): The ID of the MAS application (e.g., "manage", "iot") + database_role (str, optional): The database role, either 'primary' or 'standby'. Defaults to 'primary'. + + Returns: + str: The output of the 'db2 get dbm cfg' command + + Raises: + Exception: If the command execution fails + """ command = ["su", "-lc", "db2 get dbm cfg", "db2inst1"] return db2_pod_exec(core_v1_api, mas_instance_id, mas_app_id, command, database_role) def db2_pod_exec_db2set(core_v1_api: client.CoreV1Api, mas_instance_id: str, mas_app_id: str, database_role='primary') -> str: + """ + Execute 'db2set' command in a DB2 pod to retrieve registry configuration variables. + + Parameters: + core_v1_api (client.CoreV1Api): Kubernetes Core V1 API client + mas_instance_id (str): The ID of the MAS instance + mas_app_id (str): The ID of the MAS application (e.g., "manage", "iot") + database_role (str, optional): The database role, either 'primary' or 'standby'. Defaults to 'primary'. + + Returns: + str: The output of the 'db2set' command + + Raises: + Exception: If the command execution fails + """ command = ["su", "-lc", "db2set", "db2inst1"] return db2_pod_exec(core_v1_api, mas_instance_id, mas_app_id, command, database_role) def cr_pod_v_matches(cr_k: str, cr_v: str, pod_v: str) -> bool: + """ + Compare a configuration value from the Db2uInstance CR with the actual value from the DB2 pod. + This function handles special cases where the CR and pod values are expressed differently + even when they represent the same configuration (e.g., "8192 AUTOMATIC" vs "AUTOMATIC(8192)"). + + Parameters: + cr_k (str): The configuration parameter key/name + cr_v (str): The configuration value from the Db2uInstance CR + pod_v (str): The actual configuration value from the DB2 pod + + Returns: + bool: True if the values match (considering special cases), False otherwise + """ logger.debug(f"[{cr_k}] '{cr_v}' ~= '{pod_v}'") # special cases where cr_v and pod_v values are expressed differently even if they mean the same thing if cr_k in ["MIRRORLOGPATH"]: @@ -257,7 +347,26 @@ def check_reg_cfg(db2u_instance_cr: dict, core_v1_api: client.CoreV1Api, mas_ins def validate_db2_config(k8s_client: client.api_client.ApiClient, mas_instance_id: str, mas_app_id: str, database_role='primary'): + """ + Validate that the DB2 configuration in the Db2uInstance CR matches the actual configuration in the DB2 pods. + This function orchestrates validation of database configuration (db cfg), database manager + configuration (dbm cfg), and registry configuration (db2set) by comparing values from the + Db2uInstance custom resource against the actual running configuration in DB2. + + Parameters: + k8s_client (client.api_client.ApiClient): Kubernetes API client + mas_instance_id (str): The ID of the MAS instance + mas_app_id (str): The ID of the MAS application (e.g., "manage", "iot") + database_role (str, optional): The database role, either 'primary' or 'standby'. Defaults to 'primary'. + + Returns: + None: Logs results and raises an exception if any validation checks fail + + Raises: + Exception: If any configuration mismatches are detected between the CR and actual DB2 configuration. + The exception contains a dict with 'message' and 'details' keys listing all failures. + """ core_v1_api = client.CoreV1Api(k8s_client) custom_objects_api = client.CustomObjectsApi(k8s_client) diff --git a/src/mas/devops/ocp.py b/src/mas/devops/ocp.py index 24d9a2dd..a091f373 100644 --- a/src/mas/devops/ocp.py +++ b/src/mas/devops/ocp.py @@ -27,7 +27,20 @@ def connect(server: str, token: str, skipVerify: bool = False) -> bool: """ - Connect to target OCP + Connect to a target OpenShift Container Platform (OCP) cluster. + + Configures kubectl/oc context with the provided server URL and authentication token. + + Parameters: + server (str): The OpenShift cluster API server URL (e.g., "https://api.cluster.example.com:6443") + token (str): The authentication token for cluster access + skipVerify (bool, optional): Whether to skip TLS certificate verification. Defaults to False. + + Returns: + bool: True if connection was successful, False if kubectl is not found on the path + + Raises: + KubectlNotFoundError: If kubectl/oc is not available in the system PATH """ logger.info(f"Connect(server={server}, token=***)") @@ -63,7 +76,18 @@ def connect(server: str, token: str, skipVerify: bool = False) -> bool: def getClusterVersion(dynClient: DynamicClient) -> str: """ - Get a namespace + Get the current OpenShift cluster version. + + Retrieves the completed cluster version from the ClusterVersion custom resource. + + Parameters: + dynClient (DynamicClient): OpenShift Dynamic Client + + Returns: + str: The cluster version string (e.g., "4.12.0"), or None if not found + + Raises: + NotFoundError: If the ClusterVersion resource cannot be retrieved """ clusterVersionAPI = dynClient.resources.get(api_version="config.openshift.io/v1", kind="ClusterVersion") @@ -79,6 +103,16 @@ def getClusterVersion(dynClient: DynamicClient) -> str: def isClusterVersionInRange(version: str, releases: list[str]) -> bool: + """ + Check if a cluster version matches any of the specified release versions. + + Parameters: + version (str): The cluster version to check (e.g., "4.12.0") + releases (list[str]): List of release version prefixes to match against (e.g., ["4.12", "4.13"]) + + Returns: + bool: True if the version starts with any of the release prefixes, False otherwise + """ if releases is not None: for release in releases: if version.startswith(f"{release}."): @@ -88,7 +122,17 @@ def isClusterVersionInRange(version: str, releases: list[str]) -> bool: def getNamespace(dynClient: DynamicClient, namespace: str) -> dict: """ - Get a namespace + Get a Kubernetes namespace by name. + + Parameters: + dynClient (DynamicClient): OpenShift Dynamic Client + namespace (str): The name of the namespace to retrieve + + Returns: + dict: The namespace resource as a dictionary, or an empty dict if not found + + Raises: + NotFoundError: If the namespace does not exist """ namespaceAPI = dynClient.resources.get(api_version="v1", kind="Namespace") @@ -104,7 +148,21 @@ def getNamespace(dynClient: DynamicClient, namespace: str) -> dict: def createNamespace(dynClient: DynamicClient, namespace: str, kyvernoLabel: str = None) -> bool: """ - Create a namespace if it does not exist + Create a Kubernetes namespace if it does not already exist. + + If the namespace exists and a Kyverno label is provided, the namespace will be patched + to include the label. + + Parameters: + dynClient (DynamicClient): OpenShift Dynamic Client + namespace (str): The name of the namespace to create + kyvernoLabel (str, optional): Value for the 'ibm.com/kyverno' label. Defaults to None. + + Returns: + bool: Always returns True + + Raises: + NotFoundError: If the namespace resource cannot be accessed """ namespaceAPI = dynClient.resources.get(api_version="v1", kind="Namespace") try: @@ -138,7 +196,17 @@ def createNamespace(dynClient: DynamicClient, namespace: str, kyvernoLabel: str def deleteNamespace(dynClient: DynamicClient, namespace: str) -> bool: """ - Delete a namespace if it exists + Delete a Kubernetes namespace if it exists. + + Parameters: + dynClient (DynamicClient): OpenShift Dynamic Client + namespace (str): The name of the namespace to delete + + Returns: + bool: Always returns True + + Raises: + NotFoundError: If the namespace does not exist (caught and logged) """ namespaceAPI = dynClient.resources.get(api_version="v1", kind="Namespace") try: @@ -150,6 +218,21 @@ def deleteNamespace(dynClient: DynamicClient, namespace: str) -> bool: def waitForCRD(dynClient: DynamicClient, crdName: str) -> bool: + """ + Wait for a Custom Resource Definition (CRD) to be established and ready. + + Polls the CRD status up to 100 times with 5-second intervals (max ~8 minutes). + + Parameters: + dynClient (DynamicClient): OpenShift Dynamic Client + crdName (str): The name of the CRD to wait for (e.g., "suites.core.mas.ibm.com") + + Returns: + bool: True if the CRD becomes established, False if timeout is reached + + Raises: + NotFoundError: If the CRD is not found (caught and retried) + """ crdAPI = dynClient.resources.get(api_version="apiextensions.k8s.io/v1", kind="CustomResourceDefinition") maxRetries = 100 foundReadyCRD = False @@ -179,6 +262,22 @@ def waitForCRD(dynClient: DynamicClient, crdName: str) -> bool: def waitForDeployment(dynClient: DynamicClient, namespace: str, deploymentName: str) -> bool: + """ + Wait for a Kubernetes Deployment to have at least one ready replica. + + Polls the deployment status up to 100 times with 5-second intervals (max ~8 minutes). + + Parameters: + dynClient (DynamicClient): OpenShift Dynamic Client + namespace (str): The namespace containing the deployment + deploymentName (str): The name of the deployment to wait for + + Returns: + bool: True if the deployment becomes ready, False if timeout is reached + + Raises: + NotFoundError: If the deployment is not found (caught and retried) + """ deploymentAPI = dynClient.resources.get(api_version="apps/v1", kind="Deployment") maxRetries = 100 foundReadyDeployment = False @@ -202,18 +301,55 @@ def waitForDeployment(dynClient: DynamicClient, namespace: str, deploymentName: def getConsoleURL(dynClient: DynamicClient) -> str: + """ + Get the OpenShift web console URL. + + Parameters: + dynClient (DynamicClient): OpenShift Dynamic Client + + Returns: + str: The HTTPS URL of the OpenShift console (e.g., "https://console-openshift-console.apps.cluster.example.com") + + Raises: + NotFoundError: If the console route is not found + """ routesAPI = dynClient.resources.get(api_version="route.openshift.io/v1", kind="Route") consoleRoute = routesAPI.get(name="console", namespace="openshift-console") return f"https://{consoleRoute.spec.host}" def getNodes(dynClient: DynamicClient) -> str: + """ + Get all nodes in the cluster. + + Parameters: + dynClient (DynamicClient): OpenShift Dynamic Client + + Returns: + list: List of node resources as dictionaries + + Raises: + NotFoundError: If nodes cannot be retrieved + """ nodesAPI = dynClient.resources.get(api_version="v1", kind="Node") nodes = nodesAPI.get().to_dict()['items'] return nodes def getStorageClass(dynClient: DynamicClient, name: str) -> str: + """ + Get a specific StorageClass by name. + + Parameters: + dynClient (DynamicClient): OpenShift Dynamic Client + name (str): The name of the StorageClass to retrieve + + Returns: + StorageClass: The StorageClass resource, or None if not found + + Raises: + NotFoundError: If the StorageClass does not exist (caught and returns None) + """ try: storageClassAPI = dynClient.resources.get(api_version="storage.k8s.io/v1", kind="StorageClass") storageclass = storageClassAPI.get(name=name) @@ -223,16 +359,50 @@ def getStorageClass(dynClient: DynamicClient, name: str) -> str: def getStorageClasses(dynClient: DynamicClient) -> list: + """ + Get all StorageClasses in the cluster. + + Parameters: + dynClient (DynamicClient): OpenShift Dynamic Client + + Returns: + list: List of StorageClass resources + + Raises: + NotFoundError: If StorageClasses cannot be retrieved + """ storageClassAPI = dynClient.resources.get(api_version="storage.k8s.io/v1", kind="StorageClass") storageClasses = storageClassAPI.get().items return storageClasses def isSNO(dynClient: DynamicClient) -> bool: + """ + Check if the cluster is a Single Node OpenShift (SNO) deployment. + + Parameters: + dynClient (DynamicClient): OpenShift Dynamic Client + + Returns: + bool: True if the cluster has exactly one node, False otherwise + """ return len(getNodes(dynClient)) == 1 def crdExists(dynClient: DynamicClient, crdName: str) -> bool: + """ + Check if a Custom Resource Definition (CRD) exists in the cluster. + + Parameters: + dynClient (DynamicClient): OpenShift Dynamic Client + crdName (str): The name of the CRD to check (e.g., "suites.core.mas.ibm.com") + + Returns: + bool: True if the CRD exists, False otherwise + + Raises: + NotFoundError: If the CRD does not exist (caught and returns False) + """ crdAPI = dynClient.resources.get(api_version="apiextensions.k8s.io/v1", kind="CustomResourceDefinition") try: crdAPI.get(name=crdName) @@ -245,7 +415,20 @@ def crdExists(dynClient: DynamicClient, crdName: str) -> bool: def listInstances(dynClient: DynamicClient, apiVersion: str, kind: str) -> list: """ - Get a list of instances of a particular CR on the cluster + Get a list of instances of a particular custom resource on the cluster. + + Logs information about each instance found, including name and reconciled version. + + Parameters: + dynClient (DynamicClient): OpenShift Dynamic Client + apiVersion (str): The API version of the custom resource (e.g., "core.mas.ibm.com/v1") + kind (str): The kind of custom resource (e.g., "Suite") + + Returns: + list: List of custom resource instances as dictionaries + + Raises: + NotFoundError: If the custom resource type is not found """ api = dynClient.resources.get(api_version=apiVersion, kind=kind) instances = api.get().to_dict()['items'] @@ -260,7 +443,21 @@ def listInstances(dynClient: DynamicClient, apiVersion: str, kind: str) -> list: def waitForPVC(dynClient: DynamicClient, namespace: str, pvcName: str) -> bool: """ - We will allow up to 10 minutes for a PVC to report a successful binding + Wait for a PersistentVolumeClaim (PVC) to be bound. + + Allows up to 10 minutes for a PVC to report successful binding, with increasing + retry delays (30s, then 1m, 2m, and 5m intervals). + + Parameters: + dynClient (DynamicClient): OpenShift Dynamic Client + namespace (str): The namespace containing the PVC + pvcName (str): The name of the PVC to wait for + + Returns: + bool: True if the PVC becomes bound, False if timeout is reached + + Raises: + NotFoundError: If the PVC is not found (caught and retried) """ pvcAPI = dynClient.resources.get(api_version="v1", kind="PersistentVolumeClaim") maxRetries = 20 diff --git a/src/mas/devops/olm.py b/src/mas/devops/olm.py index 844e4343..5b0967b2 100644 --- a/src/mas/devops/olm.py +++ b/src/mas/devops/olm.py @@ -28,8 +28,22 @@ class OLMException(Exception): def getPackageManifest(dynClient: DynamicClient, packageName: str, catalogSourceNamespace: str = "openshift-marketplace"): - # Assert that the PackageManifest exists - # ----------------------------------------------------------------------------- + """ + Get the PackageManifest for an operator package. + + Retrieves package information including available channels and catalog source. + + Parameters: + dynClient (DynamicClient): OpenShift Dynamic Client + packageName (str): Name of the operator package (e.g., "ibm-mas-operator") + catalogSourceNamespace (str, optional): Namespace containing the catalog source. Defaults to "openshift-marketplace". + + Returns: + PackageManifest: The package manifest resource, or None if not found + + Raises: + NotFoundError: If the package manifest is not found (caught and returns None) + """ packagemanifestAPI = dynClient.resources.get(api_version="packages.operators.coreos.com/v1", kind="PackageManifest") try: manifestResource = packagemanifestAPI.get(name=packageName, namespace=catalogSourceNamespace) @@ -41,7 +55,23 @@ def getPackageManifest(dynClient: DynamicClient, packageName: str, catalogSource def ensureOperatorGroupExists(dynClient: DynamicClient, env: Environment, namespace: str, installMode: str = "OwnNamespace"): - # Create a new OperatorGroup if necessary + """ + Ensure an OperatorGroup exists in the specified namespace. + + Creates a new OperatorGroup if one doesn't already exist in the namespace. + + Parameters: + dynClient (DynamicClient): OpenShift Dynamic Client + env (Environment): Jinja2 environment for template rendering + namespace (str): The namespace to check/create the OperatorGroup in + installMode (str, optional): The install mode for the OperatorGroup. Defaults to "OwnNamespace". + + Returns: + None + + Raises: + NotFoundError: If resources cannot be accessed + """ operatorGroupsAPI = dynClient.resources.get(api_version="operators.coreos.com/v1", kind="OperatorGroup") operatorGroupList = operatorGroupsAPI.get(namespace=namespace) if len(operatorGroupList.items) == 0: @@ -59,6 +89,22 @@ def ensureOperatorGroupExists(dynClient: DynamicClient, env: Environment, namesp def getSubscription(dynClient: DynamicClient, namespace: str, packageName: str): + """ + Get the Subscription for an operator package in a namespace. + + Searches for subscriptions using label selector based on package name and namespace. + + Parameters: + dynClient (DynamicClient): OpenShift Dynamic Client + namespace (str): The namespace to search in + packageName (str): Name of the operator package + + Returns: + Subscription: The subscription resource, or None if not found + + Raises: + NotFoundError: If no subscription is found (returns None) + """ labelSelector = f"operators.coreos.com/{packageName}.{namespace}" logger.debug(f"Get Subscription for {packageName} in {namespace}") subscriptionsAPI = dynClient.resources.get(api_version="operators.coreos.com/v1alpha1", kind="Subscription") @@ -73,8 +119,30 @@ def getSubscription(dynClient: DynamicClient, namespace: str, packageName: str): def applySubscription(dynClient: DynamicClient, namespace: str, packageName: str, packageChannel: str = None, catalogSource: str = None, catalogSourceNamespace: str = "openshift-marketplace", config: dict = None, installMode: str = "OwnNamespace"): """ - Usage: - createSubscription(dynClient, "testns1", "sub1", "ibm-sls") # use default channel, & auto-detect CatalogSource + Create or update an operator subscription in a namespace. + + Automatically detects default channel and catalog source from PackageManifest if not provided. + Ensures an OperatorGroup exists before creating the subscription. + + Parameters: + dynClient (DynamicClient): OpenShift Dynamic Client + namespace (str): The namespace to create the subscription in + packageName (str): Name of the operator package (e.g., "ibm-mas-operator") + packageChannel (str, optional): Subscription channel. Auto-detected if None. Defaults to None. + catalogSource (str, optional): Catalog source name. Auto-detected if None. Defaults to None. + catalogSourceNamespace (str, optional): Namespace of the catalog source. Defaults to "openshift-marketplace". + config (dict, optional): Additional subscription configuration. Defaults to None. + installMode (str, optional): Install mode for the OperatorGroup. Defaults to "OwnNamespace". + + Returns: + Subscription: The created or updated subscription resource + + Raises: + OLMException: If the package is not available in any catalog + NotFoundError: If resources cannot be created + + Example: + applySubscription(dynClient, "my-namespace", "ibm-sls") # use defaults """ if catalogSourceNamespace is None: catalogSourceNamespace = "openshift-marketplace" diff --git a/src/mas/devops/slack.py b/src/mas/devops/slack.py index 6aa39bc0..a97cfd5d 100644 --- a/src/mas/devops/slack.py +++ b/src/mas/devops/slack.py @@ -26,6 +26,17 @@ def __init__(cls, *args, **kwargs): @property def client(cls) -> WebClient: + """ + Get or create the Slack WebClient instance. + + Lazily initializes the Slack client using the SLACK_TOKEN environment variable. + + Returns: + WebClient: The Slack WebClient instance + + Raises: + Exception: If SLACK_TOKEN environment variable is not set + """ if cls._client is not None: return cls._client else: @@ -37,9 +48,21 @@ def client(cls) -> WebClient: cls._client = WebClient(token=SLACK_TOKEN) return cls._client - # Post message to Slack - # ----------------------------------------------------------------------------- def postMessageBlocks(cls, channelList: str | list[str], messageBlocks: list, threadId: str = None) -> SlackResponse | list[SlackResponse]: + """ + Post a message with block formatting to one or more Slack channels. + + Parameters: + channelList (str | list[str]): Single channel ID/name or list of channel IDs/names + messageBlocks (list): List of Slack block kit elements defining the message structure + threadId (str, optional): Thread timestamp to post as a reply. Defaults to None. + + Returns: + SlackResponse | list[SlackResponse]: Single response if one channel, list of responses if multiple channels + + Raises: + Exception: If message posting fails + """ responses: list[SlackResponse] = [] if isinstance(channelList, str): @@ -85,6 +108,21 @@ def postMessageBlocks(cls, channelList: str | list[str], messageBlocks: list, th return responses if len(responses) > 1 else responses[0] def postMessageText(cls, channelList: str | list[str], message: str, attachments=None, threadId: str = None) -> SlackResponse | list[SlackResponse]: + """ + Post a plain text message to one or more Slack channels. + + Parameters: + channelList (str | list[str]): Single channel ID/name or list of channel IDs/names + message (str): The text message to post + attachments (list, optional): List of message attachments. Defaults to None. + threadId (str, optional): Thread timestamp to post as a reply. Defaults to None. + + Returns: + SlackResponse | list[SlackResponse]: Single response if one channel, list of responses if multiple channels + + Raises: + Exception: If message posting fails + """ responses: list[SlackResponse] = [] if isinstance(channelList, str): @@ -129,6 +167,21 @@ def postMessageText(cls, channelList: str | list[str], message: str, attachments def createMessagePermalink( cls, slackResponse: SlackResponse = None, channelId: str = None, messageTimestamp: str = None, domain: str = "ibm-mas" ) -> str: + """ + Create a permanent link to a Slack message. + + Parameters: + slackResponse (SlackResponse, optional): Slack response object containing channel and timestamp. Defaults to None. + channelId (str, optional): Channel ID if not using slackResponse. Defaults to None. + messageTimestamp (str, optional): Message timestamp if not using slackResponse. Defaults to None. + domain (str, optional): Slack workspace domain. Defaults to "ibm-mas". + + Returns: + str: Permanent URL to the Slack message + + Raises: + Exception: If neither slackResponse nor both channelId and messageTimestamp are provided + """ if slackResponse is not None: channelId = slackResponse["channel"] messageTimestamp = slackResponse["ts"] @@ -137,9 +190,21 @@ def createMessagePermalink( return f"https://{domain}.slack.com/archives/{channelId}/p{messageTimestamp.replace('.', '')}" - # Edit message in Slack - # ----------------------------------------------------------------------------- def updateMessageBlocks(cls, channelName: str, threadId: str, messageBlocks: list) -> SlackResponse: + """ + Update an existing Slack message with new block content. + + Parameters: + channelName (str): The channel ID or name containing the message + threadId (str): The timestamp of the message to update + messageBlocks (list): List of Slack block kit elements for the updated message + + Returns: + SlackResponse: Response from the Slack API + + Raises: + Exception: If message update fails + """ logger.debug(f"Updating {len(messageBlocks)} block message in {channelName} on thread {threadId} in Slack") response = cls.client.chat_update( channel=channelName, @@ -158,28 +223,53 @@ def updateMessageBlocks(cls, channelName: str, threadId: str, messageBlocks: lis logger.warning("Failed to call Slack API") return response - # Build header block for Slack message - # ----------------------------------------------------------------------------- def buildHeader(cls, title: str) -> dict: + """ + Build a header block for a Slack message. + + Parameters: + title (str): The header text + + Returns: + dict: Slack block kit header element + """ return {"type": "header", "text": {"type": "plain_text", "text": title, "emoji": True}} - # Build section block for Slack message - # ----------------------------------------------------------------------------- def buildSection(cls, text: str) -> dict: + """ + Build a section block for a Slack message with markdown text. + + Parameters: + text (str): The section text (supports markdown formatting) + + Returns: + dict: Slack block kit section element + """ return {"type": "section", "text": {"type": "mrkdwn", "text": text}} - # Build context block for Slack message - # ----------------------------------------------------------------------------- def buildContext(cls, texts: list) -> dict: + """ + Build a context block for a Slack message with multiple text elements. + + Parameters: + texts (list): List of text strings to include in the context + + Returns: + dict: Slack block kit context element + """ elements = [] for text in texts: elements.append({"type": "mrkdwn", "text": text}) return {"type": "context", "elements": elements} - # Build divider block for Slack message - # ----------------------------------------------------------------------------- def buildDivider(cls) -> dict: + """ + Build a divider block for a Slack message. + + Returns: + dict: Slack block kit divider element + """ return {"type": "divider"} diff --git a/src/mas/devops/tekton.py b/src/mas/devops/tekton.py index 6c169ce2..3e135b1d 100644 --- a/src/mas/devops/tekton.py +++ b/src/mas/devops/tekton.py @@ -29,7 +29,21 @@ def installOpenShiftPipelines(dynClient: DynamicClient, customStorageClassName: str = None) -> bool: """ - Install the OpenShift Pipelines Operator and wait for it to be ready to use + Install the OpenShift Pipelines Operator and wait for it to be ready to use. + + Creates the operator subscription, waits for the CRD and webhook to be ready, + and handles PVC storage class configuration if needed. + + Parameters: + dynClient (DynamicClient): OpenShift Dynamic Client + customStorageClassName (str, optional): Custom storage class name for Tekton PVC. Defaults to None. + + Returns: + bool: True if installation is successful, False otherwise + + Raises: + NotFoundError: If the package manifest is not found + UnprocessibleEntityError: If the subscription cannot be created """ packagemanifestAPI = dynClient.resources.get(api_version="packages.operators.coreos.com/v1", kind="PackageManifest") subscriptionsAPI = dynClient.resources.get(api_version="operators.coreos.com/v1alpha1", kind="Subscription") @@ -160,15 +174,19 @@ def addMissingStorageClassToTektonPVC(dynClient: DynamicClient, namespace: str, def updateTektonDefinitions(namespace: str, yamlFile: str) -> None: """ - Install/update the MAS tekton pipeline and task definitions + Install or update MAS Tekton pipeline and task definitions from a YAML file. + + Uses kubectl to apply a YAML file containing multiple resource types. - Unfortunately there's no API equivalent of what the kubectl CLI gives - us with the ability to just apply a file containing a mix of resource types + Parameters: + namespace (str): The namespace to apply the definitions to + yamlFile (str): Path to the YAML file containing Tekton definitions - https://github.com/gtaylor/kubeconfig-python/ + Returns: + None - Throws: - - kubeconfig.exceptions.KubectlCommandError + Raises: + kubeconfig.exceptions.KubectlCommandError: If kubectl command fails """ result = kubectl.run(subcmd_args=['apply', '-n', namespace, '-f', yamlFile]) for line in result.split("\n"): @@ -176,6 +194,26 @@ def updateTektonDefinitions(namespace: str, yamlFile: str) -> None: def preparePipelinesNamespace(dynClient: DynamicClient, instanceId: str = None, storageClass: str = None, accessMode: str = None, waitForBind: bool = True, configureRBAC: bool = True): + """ + Prepare a namespace for MAS pipelines by creating RBAC and PVC resources. + + Creates cluster-wide or instance-specific pipeline namespace with necessary + role bindings and persistent volume claims. + + Parameters: + dynClient (DynamicClient): OpenShift Dynamic Client + instanceId (str, optional): MAS instance ID. If None, creates cluster-wide namespace. Defaults to None. + storageClass (str, optional): Storage class for the PVC. Defaults to None. + accessMode (str, optional): Access mode for the PVC. Defaults to None. + waitForBind (bool, optional): Whether to wait for PVC to bind. Defaults to True. + configureRBAC (bool, optional): Whether to configure RBAC. Defaults to True. + + Returns: + None + + Raises: + NotFoundError: If resources cannot be created + """ templateDir = path.join(path.abspath(path.dirname(__file__)), "templates") env = Environment( loader=FileSystemLoader(searchpath=templateDir) @@ -222,6 +260,26 @@ def preparePipelinesNamespace(dynClient: DynamicClient, instanceId: str = None, def prepareAiServicePipelinesNamespace(dynClient: DynamicClient, instanceId: str = None, storageClass: str = None, accessMode: str = None, waitForBind: bool = True, configureRBAC: bool = True): + """ + Prepare a namespace for AI Service pipelines by creating RBAC and PVC resources. + + Creates AI Service-specific pipeline namespace with necessary role bindings + and persistent volume claims. + + Parameters: + dynClient (DynamicClient): OpenShift Dynamic Client + instanceId (str, optional): AI Service instance ID. Defaults to None. + storageClass (str, optional): Storage class for the PVC. Defaults to None. + accessMode (str, optional): Access mode for the PVC. Defaults to None. + waitForBind (bool, optional): Whether to wait for PVC to bind. Defaults to True. + configureRBAC (bool, optional): Whether to configure RBAC. Defaults to True. + + Returns: + None + + Raises: + NotFoundError: If resources cannot be created + """ templateDir = path.join(path.abspath(path.dirname(__file__)), "templates") env = Environment( loader=FileSystemLoader(searchpath=templateDir) @@ -262,6 +320,26 @@ def prepareAiServicePipelinesNamespace(dynClient: DynamicClient, instanceId: str def prepareInstallSecrets(dynClient: DynamicClient, namespace: str, slsLicenseFile: str = None, additionalConfigs: dict = None, certs: str = None, podTemplates: str = None) -> None: + """ + Create or update secrets required for MAS installation pipelines. + + Creates four secrets in the specified namespace: pipeline-additional-configs, + pipeline-sls-entitlement, pipeline-certificates, and pipeline-pod-templates. + + Parameters: + dynClient (DynamicClient): OpenShift Dynamic Client + namespace (str): The namespace to create secrets in + slsLicenseFile (str, optional): SLS license file content. Defaults to None (empty secret). + additionalConfigs (dict, optional): Additional configuration data. Defaults to None (empty secret). + certs (str, optional): Certificate data. Defaults to None (empty secret). + podTemplates (str, optional): Pod template data. Defaults to None (empty secret). + + Returns: + None + + Raises: + NotFoundError: If secrets cannot be created + """ secretsAPI = dynClient.resources.get(api_version="v1", kind="Secret") # 1. Secret/pipeline-additional-configs @@ -455,6 +533,23 @@ def launchUninstallPipeline(dynClient: DynamicClient, def launchPipelineRun(dynClient: DynamicClient, namespace: str, templateName: str, params: dict) -> str: + """ + Launch a Tekton PipelineRun from a template. + + Creates a PipelineRun resource by rendering a Jinja2 template with the provided parameters. + + Parameters: + dynClient (DynamicClient): OpenShift Dynamic Client + namespace (str): The namespace to create the PipelineRun in + templateName (str): Name of the template file (without .yml.j2 extension) + params (dict): Parameters to pass to the template + + Returns: + str: Timestamp string used in the PipelineRun name (format: YYMMDD-HHMM) + + Raises: + NotFoundError: If the template or namespace is not found + """ pipelineRunsAPI = dynClient.resources.get(api_version="tekton.dev/v1beta1", kind="PipelineRun") timestamp = datetime.now().strftime("%y%m%d-%H%M") # Create the PipelineRun @@ -477,7 +572,20 @@ def launchPipelineRun(dynClient: DynamicClient, namespace: str, templateName: st def launchInstallPipeline(dynClient: DynamicClient, params: dict) -> str: """ - Create a PipelineRun to install the chosen MAS ( or AI Service ) instance (and selected dependencies) + Create a PipelineRun to install a MAS or AI Service instance with selected dependencies. + + Automatically detects whether to install MAS or AI Service based on the presence + of mas_instance_id in params. + + Parameters: + dynClient (DynamicClient): OpenShift Dynamic Client + params (dict): Installation parameters including instance ID and configuration + + Returns: + str: URL to the PipelineRun in the OpenShift console + + Raises: + NotFoundError: If resources cannot be created """ applicationType = "aiservice" if not params.get("mas_instance_id") else "mas" params["applicationType"] = applicationType @@ -491,7 +599,17 @@ def launchInstallPipeline(dynClient: DynamicClient, params: dict) -> str: def launchUpdatePipeline(dynClient: DynamicClient, params: dict) -> str: """ - Create a PipelineRun to update the Maximo Operator Catalog + Create a PipelineRun to update the Maximo Operator Catalog. + + Parameters: + dynClient (DynamicClient): OpenShift Dynamic Client + params (dict): Update parameters + + Returns: + str: URL to the PipelineRun in the OpenShift console + + Raises: + NotFoundError: If resources cannot be created """ namespace = "mas-pipelines" timestamp = launchPipelineRun(dynClient, namespace, "pipelinerun-update", params) From af62165c29ae40b14ccf73d02e5cb2b85f6986e2 Mon Sep 17 00:00:00 2001 From: David Parker Date: Thu, 25 Dec 2025 17:32:52 +0000 Subject: [PATCH 2/4] More updates --- src/mas/devops/aiservice.py | 105 ++++++- src/mas/devops/sls.py | 49 ++- src/mas/devops/users.py | 610 ++++++++++++++++++++++++++++++++---- src/mas/devops/utils.py | 75 ++++- 4 files changed, 764 insertions(+), 75 deletions(-) diff --git a/src/mas/devops/aiservice.py b/src/mas/devops/aiservice.py index 04104c96..26a37a09 100644 --- a/src/mas/devops/aiservice.py +++ b/src/mas/devops/aiservice.py @@ -20,14 +20,51 @@ def listAiServiceInstances(dynClient: DynamicClient) -> list: """ - Get a list of AI Service instances on the cluster + Retrieve all AI Service instances from the OpenShift cluster. + + This function queries the cluster for AIServiceApp custom resources and returns + a list of all AI Service instances found. + + Args: + dynClient (DynamicClient): OpenShift dynamic client for cluster API interactions. + + Returns: + list: A list of dictionaries representing AI Service instances. + Returns an empty list if no instances are found or if errors occur. + + Example: + >>> from openshift.dynamic import DynamicClient + >>> client = DynamicClient(...) + >>> instances = listAiServiceInstances(client) + >>> for instance in instances: + ... print(f"Instance: {instance['metadata']['name']}") """ return listInstances(dynClient, "aiservice.ibm.com/v1", "AIServiceApp") def verifyAiServiceInstance(dynClient: DynamicClient, instanceId: str) -> bool: """ - Validate that the chosen AI Service instance exists + Verify that a specific AI Service instance exists in the cluster. + + This function checks if an AIServiceApp custom resource with the given instance ID + exists in the expected namespace. It handles various error conditions including + missing instances, missing CRDs, and authorization failures. + + Args: + dynClient (DynamicClient): OpenShift dynamic client for cluster API interactions. + instanceId (str): The unique identifier of the AI Service instance to verify. + + Returns: + bool: True if the instance exists and is accessible, False otherwise. + Returns False if the instance is not found, the CRD doesn't exist, + or authorization fails. + + Example: + >>> from openshift.dynamic import DynamicClient + >>> client = DynamicClient(...) + >>> exists = verifyAiServiceInstance(client, "aiservice-inst1") + >>> if exists: + ... print("AI Service instance found") """ try: aiserviceAPI = dynClient.resources.get(api_version="aiservice.ibm.com/v1", kind="AIServiceApp") @@ -47,14 +84,52 @@ def verifyAiServiceInstance(dynClient: DynamicClient, instanceId: str) -> bool: def listAiServiceTenantInstances(dynClient: DynamicClient) -> list: """ - Get a list of AI Service Tenant instances on the cluster + Retrieve all AI Service Tenant instances from the OpenShift cluster. + + This function queries the cluster for AIServiceTenant custom resources and returns + a list of all tenant instances found across all AI Service instances. + + Args: + dynClient (DynamicClient): OpenShift dynamic client for cluster API interactions. + + Returns: + list: A list of dictionaries representing AI Service Tenant instances. + Returns an empty list if no tenant instances are found or if errors occur. + + Example: + >>> from openshift.dynamic import DynamicClient + >>> client = DynamicClient(...) + >>> tenants = listAiServiceTenantInstances(client) + >>> for tenant in tenants: + ... print(f"Tenant: {tenant['metadata']['name']}") """ return listInstances(dynClient, "aiservice.ibm.com/v1", "AIServiceTenant") def verifyAiServiceTenantInstance(dynClient: DynamicClient, instanceId: str, tenantId: str) -> bool: """ - Validate that the chosen AI Service Tenant exists + Verify that a specific AI Service Tenant exists in the cluster. + + This function checks if an AIServiceTenant custom resource with the given instance ID + and tenant ID exists in the expected namespace. The tenant resource name follows the + pattern "aiservice-{instanceId}-{tenantId}". + + Args: + dynClient (DynamicClient): OpenShift dynamic client for cluster API interactions. + instanceId (str): The unique identifier of the AI Service instance. + tenantId (str): The unique identifier of the tenant within the AI Service instance. + + Returns: + bool: True if the tenant exists and is accessible, False otherwise. + Returns False if the tenant is not found, the CRD doesn't exist, + or authorization fails. + + Example: + >>> from openshift.dynamic import DynamicClient + >>> client = DynamicClient(...) + >>> exists = verifyAiServiceTenantInstance(client, "aiservice-inst1", "tenant1") + >>> if exists: + ... print("AI Service Tenant found") """ try: aiserviceTenantAPI = dynClient.resources.get(api_version="aiservice.ibm.com/v1", kind="AIServiceTenant") @@ -72,9 +147,27 @@ def verifyAiServiceTenantInstance(dynClient: DynamicClient, instanceId: str, ten return False -def getAiserviceChannel(dynClient: DynamicClient, instanceId: str) -> str: +def getAiserviceChannel(dynClient: DynamicClient, instanceId: str) -> str | None: """ - Get the AI Service channel from the subscription + Retrieve the update channel for an AI Service instance. + + This function queries the Operator Lifecycle Manager (OLM) subscription for the + AI Service instance to determine which update channel it is subscribed to. + + Args: + dynClient (DynamicClient): OpenShift dynamic client for cluster API interactions. + instanceId (str): The unique identifier of the AI Service instance. + + Returns: + str: The channel name (e.g., "v1.0", "stable") if the subscription exists, + None if the subscription is not found. + + Example: + >>> from openshift.dynamic import DynamicClient + >>> client = DynamicClient(...) + >>> channel = getAiserviceChannel(client, "aiservice-inst1") + >>> if channel: + ... print(f"AI Service is on channel: {channel}") """ aiserviceSubscription = getSubscription(dynClient, f"aiservice-{instanceId}", "ibm-aiservice") if aiserviceSubscription is None: diff --git a/src/mas/devops/sls.py b/src/mas/devops/sls.py index 1aabc115..119d436d 100644 --- a/src/mas/devops/sls.py +++ b/src/mas/devops/sls.py @@ -17,7 +17,28 @@ def listSLSInstances(dynClient: DynamicClient) -> list: """ - Get a list of SLS instances on the cluster + Retrieve all Suite License Service (SLS) instances from the OpenShift cluster. + + This function queries the cluster for LicenseService custom resources and returns + a list of all SLS instances found. It handles various error conditions gracefully, + including missing CRDs and authorization failures. + + Args: + dynClient (DynamicClient): OpenShift dynamic client for cluster API interactions. + + Returns: + list: A list of dictionaries representing SLS LicenseService instances. + Returns an empty list if no instances are found, the CRD doesn't exist, + or authorization fails. + + Raises: + No exceptions are raised; all errors are caught and logged internally. + + Example: + >>> from openshift.dynamic import DynamicClient + >>> client = DynamicClient(...) + >>> instances = listSLSInstances(client) + >>> print(f"Found {len(instances)} SLS instances") """ try: slsAPI = dynClient.resources.get(api_version="sls.ibm.com/v1", kind="LicenseService") @@ -34,6 +55,32 @@ def listSLSInstances(dynClient: DynamicClient) -> list: def findSLSByNamespace(namespace: str, instances: list = None, dynClient: DynamicClient = None): + """ + Check if an SLS instance exists in a specific namespace. + + This function searches for Suite License Service instances in the specified namespace. + It can work with either a pre-fetched list of instances or dynamically query the cluster + using the provided DynamicClient. + + Args: + namespace (str): The OpenShift namespace to search for SLS instances. + instances (list, optional): Pre-fetched list of SLS instance dictionaries. + If None, dynClient must be provided. Defaults to None. + dynClient (DynamicClient, optional): OpenShift dynamic client for querying instances. + Required if instances is None. Defaults to None. + + Returns: + bool: True if an SLS instance is found in the specified namespace, False otherwise. + Also returns False if neither instances nor dynClient is provided. + + Example: + >>> # Using pre-fetched instances + >>> instances = listSLSInstances(client) + >>> exists = findSLSByNamespace("ibm-sls", instances=instances) + >>> + >>> # Using dynamic client + >>> exists = findSLSByNamespace("ibm-sls", dynClient=client) + """ if not instances and not dynClient: return False diff --git a/src/mas/devops/users.py b/src/mas/devops/users.py index 6baf291b..a6019099 100644 --- a/src/mas/devops/users.py +++ b/src/mas/devops/users.py @@ -21,14 +21,47 @@ class MASUserUtils(): - ''' - A collection of utilities for interacting with the MAS Core V3 User APIs and related APIs. - Each instance of this class is tied to a specific MAS instance and workspace ID. - ''' + """ + Utility class for managing IBM Maximo Application Suite (MAS) users and permissions. + + This class provides a comprehensive set of methods for interacting with MAS Core V3 User APIs, + including user creation, workspace management, application permissions, and Manage-specific + operations. Each instance is bound to a specific MAS instance and workspace. + + The class handles authentication, TLS certificates, and API interactions with: + - MAS Core API (user management, workspaces, applications) + - MAS Admin Dashboard (authentication) + - Manage API (API keys, security groups) + + Attributes: + MAXADMIN (str): Constant for the MAXADMIN user identifier. + mas_instance_id (str): The MAS instance identifier. + mas_workspace_id (str): The workspace identifier within the MAS instance. + mas_core_namespace (str): Kubernetes namespace for MAS core components. + manage_namespace (str): Kubernetes namespace for Manage application. + + Example: + >>> from kubernetes import client, config + >>> config.load_kube_config() + >>> k8s_client = client.ApiClient() + >>> mas_utils = MASUserUtils("inst1", "masdev", k8s_client) + >>> user = mas_utils.get_user("user@example.com") + """ MAXADMIN = "MAXADMIN" def __init__(self, mas_instance_id: str, mas_workspace_id: str, k8s_client: client.api_client.ApiClient, coreapi_port: int = 443, admin_dashboard_port: int = 443, manage_api_port: int = 443): + """ + Initialize MASUserUtils for a specific MAS instance and workspace. + + Args: + mas_instance_id (str): The MAS instance identifier (e.g., "inst1"). + mas_workspace_id (str): The workspace identifier (e.g., "masdev"). + k8s_client (client.api_client.ApiClient): Kubernetes API client for cluster operations. + coreapi_port (int, optional): Port for MAS Core API internal service. Defaults to 443. + admin_dashboard_port (int, optional): Port for Admin Dashboard internal service. Defaults to 443. + manage_api_port (int, optional): Port for Manage API internal service. Defaults to 443. + """ self.mas_instance_id = mas_instance_id self.mas_workspace_id = mas_workspace_id self.logger = logging.getLogger(f"{__name__}.{self.__class__.__name__}") @@ -164,6 +197,23 @@ def mas_workspace_application_ids(self): return self._mas_workspace_application_ids def get_user(self, user_id): + """ + Retrieve a user's details from MAS Core API. + + Args: + user_id (str): The unique identifier of the user to retrieve. + + Returns: + dict: User details dictionary if found, None if user doesn't exist (404). + + Raises: + Exception: If the API returns an unexpected status code. + + Example: + >>> user = mas_utils.get_user("user@example.com") + >>> if user: + ... print(f"User: {user['displayName']}") + """ self.logger.debug(f"Getting user {user_id}") url = f"{self.mas_api_url_internal}/v3/users/{user_id}" headers = { @@ -185,42 +235,51 @@ def get_user(self, user_id): raise Exception(f"{response.status_code} {response.text}") def get_or_create_user(self, payload): - ''' - User is identified by payload["id"] field - If user already exists, return their record. No attempt will be made to update the user if other fields of the payload differ from the existing user. - Otherwise, the user will be created. - - Example payload: - { - "id": user_id, - "status": {"active": True}, - "username": username, - "token": password, - "owner": "local", - "emails": [ - { - "value": email, - "type": "Work", - "primary": True - } - ], - "displayName": display_name, - "issuer": "local", - "permissions": { - "systemAdmin": True, - "userAdmin": True, - "apikeyAdmin": True - }, - "entitlement": { - "application": "PREMIUM", - "admin": "ADMIN_PREMIUM", - "alwaysReserveLicense": True - }, - "title": title, - "givenName": given_name, - "familyName": family_name - } - ''' + """ + Get an existing user or create a new one if not found. + + This method is idempotent - if the user already exists (identified by payload["id"]), + their existing record is returned without modification. If the user doesn't exist, + they are created with the provided payload. + + Args: + payload (dict): User definition dictionary containing user details. + Must include "id" field as the unique identifier. + + Returns: + dict: The user record (either existing or newly created). + + Raises: + Exception: If user creation fails with an unexpected status code. + + Example: + >>> user_payload = { + ... "id": "user@example.com", + ... "status": {"active": True}, + ... "username": "user@example.com", + ... "owner": "local", + ... "emails": [{ + ... "value": "user@example.com", + ... "type": "Work", + ... "primary": True + ... }], + ... "displayName": "John Doe", + ... "issuer": "local", + ... "permissions": { + ... "systemAdmin": False, + ... "userAdmin": True, + ... "apikeyAdmin": False + ... }, + ... "entitlement": { + ... "application": "PREMIUM", + ... "admin": "ADMIN_BASE", + ... "alwaysReserveLicense": True + ... }, + ... "givenName": "John", + ... "familyName": "Doe" + ... } + >>> user = mas_utils.get_or_create_user(user_payload) + """ existing_user = self.get_user(payload["id"]) if existing_user is not None: @@ -253,6 +312,23 @@ def get_or_create_user(self, payload): raise Exception(f"{response.status_code} {response.text}") def update_user(self, payload): + """ + Update an existing user's details. + + Args: + payload (dict): User definition dictionary with updated fields. + Must include "id" field to identify the user. + + Returns: + dict: Updated user record. + + Raises: + Exception: If the update fails or user doesn't exist. + + Example: + >>> updated_payload = {"id": "user@example.com", "displayName": "Jane Doe"} + >>> user = mas_utils.update_user(updated_payload) + """ user_id = payload["id"] self.logger.debug(f"Updating user {user_id}") url = f"{self.mas_api_url_internal}/v3/users/{user_id}" @@ -273,6 +349,25 @@ def update_user(self, payload): raise Exception(f"{response.status_code} {response.text}") def update_user_display_name(self, user_id, display_name): + """ + Update only the display name of a user. + + This method performs a partial update (PATCH) to modify just the displayName field, + reducing the risk of race conditions from concurrent updates. + + Args: + user_id (str): The unique identifier of the user. + display_name (str): The new display name for the user. + + Returns: + dict: Updated user record. + + Raises: + Exception: If the update fails or user doesn't exist. + + Example: + >>> user = mas_utils.update_user_display_name("user@example.com", "Jane Smith") + """ self.logger.debug(f"Updating user display name {user_id} to {display_name}") url = f"{self.mas_api_url_internal}/v3/users/{user_id}" headers = { @@ -294,10 +389,31 @@ def update_user_display_name(self, user_id, display_name): raise Exception(f"{response.status_code} {response.text}") def link_user_to_local_idp(self, user_id, email_password=False): - ''' - Checks if user already has a local identity, no-op if so. - Assumes user exists, raises if not - ''' + """ + Link a user to the local identity provider (IDP). + + This method is idempotent - if the user already has a local identity, no action is taken. + The method creates a local authentication identity for the user, enabling them to log in + with username/password. + + Args: + user_id (str): The unique identifier of the user to link. + email_password (bool, optional): Whether to enable email/password authentication. + Defaults to False. + + Returns: + None: Always returns None (authentication token is not exposed). + + Raises: + Exception: If the user doesn't exist or the linking operation fails. + + Note: + The API response contains a generated user token which is intentionally not logged + or returned for security reasons. + + Example: + >>> mas_utils.link_user_to_local_idp("user@example.com", email_password=True) + """ # For the sake of idempotency, check if the user already has a local identity user = self.get_user(user_id) @@ -335,9 +451,23 @@ def link_user_to_local_idp(self, user_id, email_password=False): return None def get_user_workspaces(self, user_id): - ''' - Assumes user exists, raises if not. - ''' + """ + Retrieve all workspaces that a user has access to. + + Args: + user_id (str): The unique identifier of the user. + + Returns: + list: List of workspace dictionaries the user has access to. + + Raises: + Exception: If the user doesn't exist (404) or the API call fails. + + Example: + >>> workspaces = mas_utils.get_user_workspaces("user@example.com") + >>> for ws in workspaces: + ... print(f"Workspace: {ws['id']}") + """ self.logger.debug(f"Getting workspaces for user {user_id}") url = f"{self.mas_api_url_internal}/v3/users/{user_id}/workspaces" headers = { @@ -359,9 +489,26 @@ def get_user_workspaces(self, user_id): raise Exception(f"{response.status_code} {response.text}") def add_user_to_workspace(self, user_id, is_workspace_admin=False): - ''' - No-op if user is already a member of the workspace. No attempt will be made to update their existing is_workspace_admin flag if it differs. - ''' + """ + Add a user to the configured workspace. + + This method is idempotent - if the user is already a member of the workspace, + no action is taken. The existing workspace admin flag is not updated if it differs. + + Args: + user_id (str): The unique identifier of the user to add. + is_workspace_admin (bool, optional): Whether to grant workspace admin permissions. + Defaults to False. + + Returns: + None: Returns None on success. + + Raises: + Exception: If the operation fails. + + Example: + >>> mas_utils.add_user_to_workspace("user@example.com", is_workspace_admin=True) + """ workspaces = self.get_user_workspaces(user_id) for workspace in workspaces: if "id" in workspace and workspace["id"] == self.mas_workspace_id: @@ -394,6 +541,24 @@ def add_user_to_workspace(self, user_id, is_workspace_admin=False): raise Exception(f"{response.status_code} {response.text}") def get_user_application_permissions(self, user_id, application_id): + """ + Retrieve a user's permissions for a specific MAS application. + + Args: + user_id (str): The unique identifier of the user. + application_id (str): The MAS application identifier (e.g., "manage", "health"). + + Returns: + dict: User's application permissions if they exist, None if not found (404). + + Raises: + Exception: If the API call fails with an unexpected status code. + + Example: + >>> perms = mas_utils.get_user_application_permissions("user@example.com", "manage") + >>> if perms: + ... print(f"Role: {perms.get('role')}") + """ self.logger.debug(f"Getting user {user_id} permissions for application {application_id}") url = f"{self.mas_api_url_internal}/workspaces/{self.mas_workspace_id}/applications/{application_id}/users/{user_id}" headers = { @@ -415,9 +580,26 @@ def get_user_application_permissions(self, user_id, application_id): raise Exception(f"{response.status_code} {response.text}") def set_user_application_permission(self, user_id, application_id, role): - ''' - No-op if user already has a role established for the application. No attempt will be made to update the role if it differs. - ''' + """ + Set a user's role for a specific MAS application. + + This method is idempotent - if the user already has permissions for the application, + no action is taken. The existing role is not updated if it differs. + + Args: + user_id (str): The unique identifier of the user. + application_id (str): The MAS application identifier (e.g., "manage", "health"). + role (str): The role to assign (e.g., "ADMIN", "USER", "MANAGEUSER"). + + Returns: + None: Returns None on success. + + Raises: + Exception: If the operation fails. + + Example: + >>> mas_utils.set_user_application_permission("user@example.com", "manage", "ADMIN") + """ existing_permissions = self.get_user_application_permissions(user_id, application_id) @@ -449,6 +631,28 @@ def set_user_application_permission(self, user_id, application_id, role): raise Exception(f"{response.status_code} {response.text}") def check_user_sync(self, user_id, application_id, timeout_secs=60 * 10, retry_interval_secs=5): + """ + Wait for a user's sync status to reach SUCCESS for a specific application. + + This method polls the user's sync state for the given application and waits until + it reaches "SUCCESS" status. If the sync state is "ERROR" or missing, it triggers + a resync operation. + + Args: + user_id (str): The unique identifier of the user. + application_id (str): The MAS application identifier to check sync status for. + timeout_secs (int, optional): Maximum time to wait in seconds. Defaults to 600 (10 minutes). + retry_interval_secs (int, optional): Time between retry attempts in seconds. Defaults to 5. + + Returns: + None: Returns when sync status reaches SUCCESS. + + Raises: + Exception: If sync doesn't complete within the timeout period. + + Example: + >>> mas_utils.check_user_sync("user@example.com", "manage", timeout_secs=300) + """ t_end = time.time() + timeout_secs self.logger.info(f"Awaiting user {user_id} sync status \"SUCCESS\" for app {application_id}: {t_end - time.time():.2f} seconds remaining") while time.time() < t_end: @@ -472,6 +676,26 @@ def check_user_sync(self, user_id, application_id, timeout_secs=60 * 10, retry_i raise Exception(f"User {user_id} sync failed to complete for app within {timeout_secs} seconds") def resync_users(self, user_ids): + """ + Trigger a resync operation for one or more users. + + This method forces MAS to resynchronize user data across applications. It performs + a no-op update to each user's display name to trigger the sync mechanism, which is + compatible with all MAS versions. + + Args: + user_ids (list): List of user identifiers to resync. + + Returns: + None + + Note: + The "/v3/users/utils/resync" API is only available in MAS Core >= 9.1. + This implementation uses a no-op profile update for backward compatibility. + + Example: + >>> mas_utils.resync_users(["user1@example.com", "user2@example.com"]) + """ self.logger.info(f"Issuing resync request(s) for user(s) {user_ids}") # The "/v3/users/utils/resync" API is only available in MAS Core >= 9.1 (coreapi >= 25.2.3) @@ -485,10 +709,28 @@ def resync_users(self, user_ids): self.update_user_display_name(user_id, user["displayName"]) def create_or_get_manage_api_key_for_user(self, user_id, temporary=False): - ''' - Get singleton API for user_id if it already exists, create it if not - if temporary is True AND we created the API key, delete it on exit - ''' + """ + Get or create a Manage API key for a user. + + This method retrieves an existing API key for the user or creates a new one if none exists. + Only one API key is allowed per user in Manage. If temporary is True and a new key is created, + it will be automatically deleted when the program exits. + + Args: + user_id (str): The unique identifier of the user. + temporary (bool, optional): If True and a new key is created, delete it on program exit. + Defaults to False. + + Returns: + dict: The Manage API key record containing the apikey value and metadata. + + Raises: + Exception: If API key creation/retrieval fails or if the key is unexpectedly not found. + + Example: + >>> api_key = mas_utils.create_or_get_manage_api_key_for_user("MAXADMIN", temporary=True) + >>> print(f"API Key: {api_key['apikey']}") + """ self.logger.debug(f"Attempting to create Manage API Key for user {user_id}") url = f"{self.manage_api_url_internal}/maximo/api/os/mxapiapikey" querystring = { @@ -547,6 +789,23 @@ def create_or_get_manage_api_key_for_user(self, user_id, temporary=False): return apikey def get_manage_api_key_for_user(self, user_id): + """ + Retrieve the Manage API key for a specific user. + + Args: + user_id (str): The unique identifier of the user. + + Returns: + dict: The API key record if found, None if no API key exists for the user. + + Raises: + Exception: If the API call fails. + + Example: + >>> api_key = mas_utils.get_manage_api_key_for_user("user@example.com") + >>> if api_key: + ... print(f"Key expires: {api_key.get('expiration')}") + """ self.logger.debug(f"Getting Manage API Key for user {user_id}") url = f"{self.manage_api_url_internal}/maximo/api/os/mxapiapikey" querystring = { @@ -578,6 +837,23 @@ def get_manage_api_key_for_user(self, user_id): raise Exception(f"{response.status_code} {response.text}") def delete_manage_api_key(self, manage_api_key): + """ + Delete a Manage API key. + + Args: + manage_api_key (dict): The API key record to delete (must contain 'href' and 'userid'). + + Returns: + None + + Raises: + Exception: If deletion fails (except for 404 which is treated as success). + + Example: + >>> api_key = mas_utils.get_manage_api_key_for_user("user@example.com") + >>> if api_key: + ... mas_utils.delete_manage_api_key(api_key) + """ self.logger.info(f"Deleting Manage API Key for user {manage_api_key['userid']}") # extract the apikey's identifier from the href @@ -607,6 +883,23 @@ def delete_manage_api_key(self, manage_api_key): raise Exception(f"{response.status_code} {response.text}") def get_manage_group_id(self, group_name, manage_api_key): + """ + Get the internal ID for a Manage security group by name. + + Args: + group_name (str): The name of the Manage security group (e.g., "MAXADMIN"). + manage_api_key (dict): API key record with 'apikey' field for authentication. + + Returns: + str: The maxgroupid if found, None if the group doesn't exist. + + Raises: + Exception: If the API call fails. + + Example: + >>> api_key = mas_utils.create_or_get_manage_api_key_for_user("MAXADMIN") + >>> group_id = mas_utils.get_manage_group_id("MAXADMIN", api_key) + """ self.logger.debug(f"Getting ID for Manage group with name {group_name}") url = f"{self.manage_api_url_internal}/maximo/api/os/mxapigroup" querystring = { @@ -636,6 +929,24 @@ def get_manage_group_id(self, group_name, manage_api_key): return None def is_user_in_manage_group(self, group_name, user_id, manage_api_key): + """ + Check if a user is a member of a Manage security group. + + Args: + group_name (str): The name of the Manage security group. + user_id (str): The unique identifier of the user. + manage_api_key (dict): API key record with 'apikey' field for authentication. + + Returns: + bool: True if the user is a member of the group, False otherwise. + + Raises: + Exception: If the group doesn't exist or the API call fails. + + Example: + >>> api_key = mas_utils.create_or_get_manage_api_key_for_user("MAXADMIN") + >>> is_member = mas_utils.is_user_in_manage_group("MAXADMIN", "user@example.com", api_key) + """ self.logger.debug(f"Checking if {user_id} is a member of Manage group with name {group_name}") group_id = self.get_manage_group_id(group_name, manage_api_key) @@ -667,9 +978,27 @@ def is_user_in_manage_group(self, group_name, user_id, manage_api_key): raise Exception(f"{response.status_code} {response.text}") def add_user_to_manage_group(self, user_id, group_name, manage_api_key): - ''' - No-op if user_id is already a member of the manage security group - ''' + """ + Add a user to a Manage security group. + + This method is idempotent - if the user is already a member of the group, + no action is taken. + + Args: + user_id (str): The unique identifier of the user. + group_name (str): The name of the Manage security group. + manage_api_key (dict): API key record with 'apikey' field for authentication. + + Returns: + None: Returns None on success. + + Raises: + Exception: If the operation fails. + + Example: + >>> api_key = mas_utils.create_or_get_manage_api_key_for_user("MAXADMIN") + >>> mas_utils.add_user_to_manage_group("user@example.com", "MAXADMIN", api_key) + """ if self.is_user_in_manage_group(group_name, user_id, manage_api_key): self.logger.info(f"User {user_id} is already a member of Manage Security Group {group_name}") @@ -710,6 +1039,20 @@ def add_user_to_manage_group(self, user_id, group_name, manage_api_key): raise Exception(f"{response.status_code} {response.text}") def get_mas_applications_in_workspace(self): + """ + Retrieve all MAS applications configured in the workspace. + + Returns: + list: List of application dictionaries with details like id, status, etc. + + Raises: + Exception: If the API call fails. + + Example: + >>> apps = mas_utils.get_mas_applications_in_workspace() + >>> for app in apps: + ... print(f"App: {app['id']}") + """ self.logger.debug(f"Getting MAS Applications in workspace {self.mas_workspace_id}") url = f"{self.mas_api_url_internal}/workspaces/{self.mas_workspace_id}/applications" headers = { @@ -726,6 +1069,22 @@ def get_mas_applications_in_workspace(self): raise Exception(f"{response.status_code} {response.text}") def get_mas_application_availability(self, mas_application_id): + """ + Get the availability status of a specific MAS application. + + Args: + mas_application_id (str): The MAS application identifier (e.g., "manage", "health"). + + Returns: + dict: Application details including 'ready' and 'available' status flags. + + Raises: + Exception: If the API call fails. + + Example: + >>> app_status = mas_utils.get_mas_application_availability("manage") + >>> print(f"Ready: {app_status['ready']}, Available: {app_status['available']}") + """ self.logger.debug(f"Getting availability of MAS Application {mas_application_id} in workspace {self.mas_workspace_id}") url = f"{self.mas_api_url_internal}/workspaces/{self.mas_workspace_id}/applications/{mas_application_id}" headers = { @@ -742,6 +1101,25 @@ def get_mas_application_availability(self, mas_application_id): raise Exception(f"{response.status_code} {response.text}") def await_mas_application_availability(self, mas_application_id, timeout_secs=60 * 10, retry_interval_secs=5): + """ + Wait for a MAS application to become ready and available. + + This method polls the application status until both 'ready' and 'available' flags are True. + + Args: + mas_application_id (str): The MAS application identifier. + timeout_secs (int, optional): Maximum time to wait in seconds. Defaults to 600 (10 minutes). + retry_interval_secs (int, optional): Time between retry attempts in seconds. Defaults to 5. + + Returns: + None: Returns when the application is ready and available. + + Raises: + Exception: If the application doesn't become available within the timeout period. + + Example: + >>> mas_utils.await_mas_application_availability("manage", timeout_secs=300) + """ t_end = time.time() + timeout_secs self.logger.info(f"Waiting for {mas_application_id} to become ready and available: {t_end - time.time():.2f} seconds remaining") while time.time() < t_end: @@ -754,6 +1132,31 @@ def await_mas_application_availability(self, mas_application_id, timeout_secs=60 raise Exception(f"{mas_application_id} did not become ready and available in time, aborting") def parse_initial_users_from_aws_secret_json(self, secret_json): + """ + Parse user definitions from AWS Secrets Manager JSON format. + + This method converts a JSON structure from AWS Secrets Manager into the internal + user definition format used by create_initial_users_for_saas. + + Args: + secret_json (dict): Dictionary where keys are email addresses and values are + CSV strings in format: "user_type,given_name,family_name[,id]" + where user_type is "primary" or "secondary". + + Returns: + dict: Parsed user structure with 'users' key containing 'primary' and 'secondary' lists. + + Raises: + Exception: If CSV format is invalid or user_type is not "primary" or "secondary". + + Example: + >>> secret = { + ... "admin@example.com": "primary,John,Doe", + ... "user@example.com": "secondary,Jane,Smith,jsmith" + ... } + >>> users = mas_utils.parse_initial_users_from_aws_secret_json(secret) + >>> print(len(users['users']['primary'])) # 1 + """ primary = [] secondary = [] for (email, csv) in secret_json.items(): @@ -793,6 +1196,45 @@ def parse_initial_users_from_aws_secret_json(self, secret_json): return initial_users def create_initial_users_for_saas(self, initial_users): + """ + Create and configure initial users for a SaaS MAS environment. + + This method processes a list of primary and secondary users, creating them in MAS Core, + linking them to the local IDP, adding them to the workspace, setting application permissions, + and adding them to Manage security groups. It ensures all applications are ready before + proceeding and waits for user sync to complete. + + Args: + initial_users (dict): Dictionary with structure: + { + "users": { + "primary": [list of user dicts], + "secondary": [list of user dicts] + } + } + Each user dict must contain: email, given_name, family_name + Optional: id (defaults to email if not provided) + + Returns: + dict: Summary with 'completed' and 'failed' lists of user records. + + Raises: + Exception: If input validation fails. + + Example: + >>> initial_users = { + ... "users": { + ... "primary": [ + ... {"email": "admin@example.com", "given_name": "John", "family_name": "Doe"} + ... ], + ... "secondary": [ + ... {"email": "user@example.com", "given_name": "Jane", "family_name": "Smith"} + ... ] + ... } + ... } + >>> result = mas_utils.create_initial_users_for_saas(initial_users) + >>> print(f"Completed: {len(result['completed'])}, Failed: {len(result['failed'])}") + """ # Validate input if "users" not in initial_users: @@ -850,6 +1292,54 @@ def create_initial_users_for_saas(self, initial_users): } def create_initial_user_for_saas(self, user, user_type): + """ + Create and fully configure a single initial user for SaaS. + + This method performs the complete user setup workflow: + 1. Creates the user in MAS Core with appropriate permissions and entitlements + 2. Links the user to the local identity provider + 3. Adds the user to the workspace + 4. Sets application-specific roles + 5. Waits for user sync to complete across all applications + 6. Adds user to Manage security groups (if applicable) + + Args: + user (dict): User definition containing: + - email (str, required): User's email address + - given_name (str, required): User's first name + - family_name (str, required): User's last name + - id (str, optional): User ID (defaults to email) + user_type (str): Either "PRIMARY" or "SECONDARY" to determine permissions level. + + Returns: + None + + Raises: + Exception: If required fields are missing or user creation fails. + + Note: + PRIMARY users get: + - userAdmin permission + - PREMIUM application entitlement + - Workspace admin access + - ADMIN role for most apps, MANAGEUSER for Manage + - MAXADMIN security group membership + + SECONDARY users get: + - No admin permissions + - BASE application entitlement + - Regular workspace access + - USER role for most apps, MANAGEUSER for Manage + - No security group memberships + + Example: + >>> user = { + ... "email": "admin@example.com", + ... "given_name": "John", + ... "family_name": "Doe" + ... } + >>> mas_utils.create_initial_user_for_saas(user, "PRIMARY") + """ if "email" not in user: raise Exception("'email' not found in at least one of the user defs") if "given_name" not in user: diff --git a/src/mas/devops/utils.py b/src/mas/devops/utils.py index 6b75f112..ae745e87 100644 --- a/src/mas/devops/utils.py +++ b/src/mas/devops/utils.py @@ -1,12 +1,44 @@ +""" +Utility functions for version comparison and other common operations. + +This module provides semantic version comparison utilities with custom handling +for pre-release versions and wildcard version strings. +""" + import semver def isVersionBefore(_compare_to_version, _current_version): """ - The method does a modified semantic version comparison, - as we want to treat any pre-release as == to the real release - but in strict semantic versioning it is < - ie. '8.6.0-pre.m1dev86' is converted to '8.6.0' + Check if the current version is before (older than) the comparison version. + + This function performs a modified semantic version comparison where pre-release + versions are treated as equal to their base release version. For example, + '8.6.0-pre.m1dev86' is normalized to '8.6.0' before comparison. Wildcard versions + like '8.6.x' are converted to '8.6.0'. + + Args: + _compare_to_version (str): The version to compare against (e.g., "8.6.0"). + _current_version (str): The current version to check (e.g., "8.5.0" or "8.6.0-pre.m1dev86"). + Can be None, in which case False is returned. + + Returns: + bool: True if current_version < compare_to_version, False otherwise. + Returns False if _current_version is None. + + Note: + This differs from strict semantic versioning where pre-release versions + are considered less than their base version. + + Example: + >>> isVersionBefore("8.6.0", "8.5.0") + False + >>> isVersionBefore("8.6.0", "8.7.0") + True + >>> isVersionBefore("8.6.0", "8.6.0-pre.m1dev86") + False + >>> isVersionBefore("8.6.0", "8.6.x") + False """ if _current_version is None: print("Version is not informed. Returning False") @@ -22,10 +54,37 @@ def isVersionBefore(_compare_to_version, _current_version): def isVersionEqualOrAfter(_compare_to_version, _current_version): """ - The method does a modified semantic version comparison, - as we want to treat any pre-release as == to the real release - but in strict semantic versioning it is < - ie. '8.6.0-pre.m1dev86' is converted to '8.6.0' + Check if the current version is equal to or after (newer than) the comparison version. + + This function performs a modified semantic version comparison where pre-release + versions are treated as equal to their base release version. For example, + '8.6.0-pre.m1dev86' is normalized to '8.6.0' before comparison. Wildcard versions + like '8.6.x' are converted to '8.6.0'. + + Args: + _compare_to_version (str): The version to compare against (e.g., "8.6.0"). + _current_version (str): The current version to check (e.g., "8.7.0" or "8.6.0-pre.m1dev86"). + Can be None, in which case False is returned. + + Returns: + bool: True if current_version >= compare_to_version, False otherwise. + Returns False if _current_version is None. + + Note: + This differs from strict semantic versioning where pre-release versions + are considered less than their base version. + + Example: + >>> isVersionEqualOrAfter("8.6.0", "8.7.0") + True + >>> isVersionEqualOrAfter("8.6.0", "8.5.0") + False + >>> isVersionEqualOrAfter("8.6.0", "8.6.0") + True + >>> isVersionEqualOrAfter("8.6.0", "8.6.0-pre.m1dev86") + True + >>> isVersionEqualOrAfter("8.6.0", "8.6.x") + True """ if _current_version is None: print("Version is not informed. Returning False") From 172d1d19ca533263ba0646b49b10de4de2c01983 Mon Sep 17 00:00:00 2001 From: David Parker Date: Thu, 25 Dec 2025 17:37:32 +0000 Subject: [PATCH 3/4] Update --- src/mas/devops/data/__init__.py | 70 ++++++++++++- src/mas/devops/mas/apps.py | 111 ++++++++++++++------ src/mas/devops/mas/suite.py | 159 ++++++++++++++++++++++++++++- src/mas/devops/saas/job_cleaner.py | 91 +++++++++++++++++ 4 files changed, 394 insertions(+), 37 deletions(-) diff --git a/src/mas/devops/data/__init__.py b/src/mas/devops/data/__init__.py index ae33168b..36f4ef2f 100644 --- a/src/mas/devops/data/__init__.py +++ b/src/mas/devops/data/__init__.py @@ -7,12 +7,38 @@ # http://www.eclipse.org/legal/epl-v10.html # # ***************************************************************************** +""" +IBM Operator Catalog data management module. + +This module provides functions to access and query IBM Operator Catalog definitions +stored as YAML files. Catalogs contain operator version information and are organized +by version tag and architecture. +""" + import yaml from glob import glob from os import path -def getCatalog(name: str) -> dict: +def getCatalog(name: str) -> dict | None: + """ + Load a specific IBM Operator Catalog definition by name. + + This function reads a catalog YAML file from the catalogs directory and returns + its contents as a dictionary. + + Args: + name (str): The catalog name/tag (e.g., "v9-241205-amd64", "v8-240528-amd64"). + + Returns: + dict: The catalog definition dictionary containing operator versions and metadata. + Returns None if the catalog file doesn't exist. + + Example: + >>> catalog = getCatalog("v9-241205-amd64") + >>> if catalog: + ... print(f"Catalog version: {catalog.get('version')}") + """ moduleFile = path.abspath(__file__) modulePath = path.dirname(moduleFile) catalogFileName = f"{name}.yaml" @@ -26,6 +52,26 @@ def getCatalog(name: str) -> dict: def listCatalogTags(arch="amd64") -> list: + """ + List all available IBM Operator Catalog tags for a specific architecture. + + This function scans the catalogs directory and returns a sorted list of all + catalog tags matching the specified architecture. + + Args: + arch (str, optional): The target architecture (e.g., "amd64", "s390x", "ppc64le"). + Defaults to "amd64". + + Returns: + list: Sorted list of catalog tag strings (e.g., ["v8-240528-amd64", "v9-241205-amd64"]). + Returns empty list if no catalogs are found for the architecture. + + Example: + >>> tags = listCatalogTags("amd64") + >>> print(f"Available catalogs: {len(tags)}") + >>> for tag in tags[-3:]: # Show last 3 + ... print(tag) + """ moduleFile = path.abspath(__file__) modulePath = path.dirname(moduleFile) yamlFiles = glob(path.join(modulePath, "catalogs", f"*-{arch}.yaml")) @@ -35,7 +81,27 @@ def listCatalogTags(arch="amd64") -> list: return result -def getNewestCatalogTag(arch="amd64") -> str: +def getNewestCatalogTag(arch="amd64") -> str | None: + """ + Get the most recent IBM Operator Catalog tag for a specific architecture. + + This function returns the newest (last in sorted order) catalog tag available + for the specified architecture. + + Args: + arch (str, optional): The target architecture (e.g., "amd64", "s390x", "ppc64le"). + Defaults to "amd64". + + Returns: + str: The newest catalog tag (e.g., "v9-241205-amd64"). + Returns None if no catalogs are found for the architecture. + + Example: + >>> newest = getNewestCatalogTag("amd64") + >>> if newest: + ... print(f"Latest catalog: {newest}") + ... catalog = getCatalog(newest) + """ catalogs = listCatalogTags(arch) if len(catalogs) == 0: return None diff --git a/src/mas/devops/mas/apps.py b/src/mas/devops/mas/apps.py index 28ca355c..307d17bc 100644 --- a/src/mas/devops/mas/apps.py +++ b/src/mas/devops/mas/apps.py @@ -55,19 +55,27 @@ def getAppResource(dynClient: DynamicClient, instanceId: str, applicationId: str, workspaceId: str = None) -> bool: """ - Get the application or workspace Custom Resource - - :param dynClient: Description - :type dynClient: DynamicClient - :param instanceId: Description - :type instanceId: str - :param applicationId: Description - :type applicationId: str - :return: Description - :rtype: bool - :type workspaceId: str - :return: Description - :rtype: bool + Retrieve a MAS application or workspace custom resource. + + This function fetches either an application-level CR (e.g., ManageApp) or a + workspace-level CR (e.g., ManageWorkspace) depending on whether workspaceId is provided. + + Args: + dynClient (DynamicClient): OpenShift dynamic client for cluster API interactions. + instanceId (str): The MAS instance identifier (e.g., "inst1"). + applicationId (str): The MAS application identifier (e.g., "manage", "iot", "monitor"). + workspaceId (str, optional): The workspace identifier. If provided, retrieves workspace CR. + Defaults to None (retrieves application CR). + + Returns: + ResourceInstance: The custom resource object if found, None otherwise. + Returns None if the resource doesn't exist, CRD is missing, or authorization fails. + + Example: + >>> # Get application CR + >>> app = getAppResource(client, "inst1", "manage") + >>> # Get workspace CR + >>> workspace = getAppResource(client, "inst1", "manage", "masdev") """ apiVersion = APP_API_VERSIONS[applicationId] if applicationId in APP_API_VERSIONS else "apps.mas.ibm.com/v1" @@ -93,7 +101,19 @@ def getAppResource(dynClient: DynamicClient, instanceId: str, applicationId: str def verifyAppInstance(dynClient: DynamicClient, instanceId: str, applicationId: str) -> bool: """ - Validate that the chosen app instance exists + Verify that a MAS application instance exists in the cluster. + + Args: + dynClient (DynamicClient): OpenShift dynamic client for cluster API interactions. + instanceId (str): The MAS instance identifier. + applicationId (str): The MAS application identifier (e.g., "manage", "iot"). + + Returns: + bool: True if the application instance exists, False otherwise. + + Example: + >>> if verifyAppInstance(client, "inst1", "manage"): + ... print("Manage application is installed") """ return getAppResource(dynClient, instanceId, applicationId) is not None @@ -108,22 +128,33 @@ def waitForAppReady( debugLogFunction=logger.debug, infoLogFunction=logger.info) -> bool: """ - Docstring for waitForAppReady - - :param dynClient: Description - :type dynClient: DynamicClient - :param instanceId: Description - :type instanceId: str - :param applicationId: Description - :type applicationId: str - :param workspaceId: Description - :type workspaceId: str - :param retries: Description - :type retries: int - :param delay: Description - :type delay: int - :return: Description - :rtype: bool + Wait for a MAS application or workspace to reach ready state. + + This function polls the application/workspace custom resource until its Ready condition + status becomes True, or until the retry limit is reached. It checks the status.conditions + array for a condition with type="Ready" and status="True". + + Args: + dynClient (DynamicClient): OpenShift dynamic client for cluster API interactions. + instanceId (str): The MAS instance identifier. + applicationId (str): The MAS application identifier (e.g., "manage", "iot"). + workspaceId (str, optional): The workspace identifier. If provided, waits for workspace CR. + Defaults to None (waits for application CR). + retries (int, optional): Maximum number of polling attempts. Defaults to 100. + delay (int, optional): Delay in seconds between polling attempts. Defaults to 600 (10 minutes). + debugLogFunction (callable, optional): Function for debug logging. Defaults to logger.debug. + infoLogFunction (callable, optional): Function for info logging. Defaults to logger.info. + + Returns: + bool: True if the resource reaches ready state within the retry limit, False otherwise. + + Example: + >>> # Wait for Manage application to be ready + >>> if waitForAppReady(client, "inst1", "manage", retries=50, delay=300): + ... print("Manage is ready") + >>> # Wait for Manage workspace to be ready + >>> if waitForAppReady(client, "inst1", "manage", "masdev", retries=50): + ... print("Manage workspace is ready") """ resourceName = f"{APP_KINDS[applicationId]}/{instanceId}" @@ -176,7 +207,25 @@ def waitForAppReady( def getAppsSubscriptionChannel(dynClient: DynamicClient, instanceId: str) -> list: """ - Return list of installed apps with their subscribed channel + Retrieve the OLM subscription channels for all installed MAS applications. + + This function queries the Operator Lifecycle Manager subscriptions for each known + MAS application and returns a list of installed applications with their update channels. + + Args: + dynClient (DynamicClient): OpenShift dynamic client for cluster API interactions. + instanceId (str): The MAS instance identifier. + + Returns: + list: List of dictionaries with 'appId' and 'channel' keys for each installed app. + Returns empty list if no apps are found or if errors occur. + + Example: + >>> apps = getAppsSubscriptionChannel(client, "inst1") + >>> for app in apps: + ... print(f"{app['appId']}: {app['channel']}") + manage: 8.7.x + iot: 8.8.x """ try: installedApps = [] diff --git a/src/mas/devops/mas/suite.py b/src/mas/devops/mas/suite.py index ab3fb727..64ee66a5 100644 --- a/src/mas/devops/mas/suite.py +++ b/src/mas/devops/mas/suite.py @@ -25,6 +25,25 @@ def isAirgapInstall(dynClient: DynamicClient, checkICSP: bool = False) -> bool: + """ + Determine if MAS is installed in an air-gapped (disconnected) environment. + + This function checks for the presence of ImageDigestMirrorSet (IDMS) or + ImageContentSourcePolicy (ICSP) resources that indicate mirror registries + are configured for air-gapped installations. + + Args: + dynClient (DynamicClient): OpenShift dynamic client for cluster API interactions. + checkICSP (bool, optional): If True, check for legacy ICSP resources instead of IDMS. + Defaults to False (checks IDMS). + + Returns: + bool: True if air-gap configuration is detected, False otherwise. + + Example: + >>> if isAirgapInstall(client): + ... print("Air-gapped installation detected") + """ if checkICSP: try: ICSPApi = dynClient.resources.get(api_version="operator.openshift.io/v1alpha1", kind="ImageContentSourcePolicy") @@ -40,6 +59,30 @@ def isAirgapInstall(dynClient: DynamicClient, checkICSP: bool = False) -> bool: def getDefaultStorageClasses(dynClient: DynamicClient) -> dict: + """ + Detect and return default storage classes for the cluster environment. + + This function identifies the storage provider (IBM Cloud, OCS, Azure, AWS, etc.) + by examining available storage classes and returns appropriate RWO (ReadWriteOnce) + and RWX (ReadWriteMany) storage class names. + + Args: + dynClient (DynamicClient): OpenShift dynamic client for cluster API interactions. + + Returns: + SimpleNamespace: Object with attributes: + - provider (str): Provider identifier (e.g., "ibmc", "ocs", "aws") + - providerName (str): Human-readable provider name + - rwo (str): Storage class name for RWO volumes + - rwx (str): Storage class name for RWX volumes + All attributes are None if no recognized provider is found. + + Example: + >>> storage = getDefaultStorageClasses(client) + >>> if storage.provider: + ... print(f"Provider: {storage.providerName}") + ... print(f"RWO: {storage.rwo}, RWX: {storage.rwx}") + """ result = SimpleNamespace( provider=None, providerName=None, @@ -98,6 +141,28 @@ def getDefaultStorageClasses(dynClient: DynamicClient) -> dict: def getCurrentCatalog(dynClient: DynamicClient) -> dict: + """ + Retrieve information about the currently installed IBM Operator Catalog. + + This function queries the ibm-operator-catalog CatalogSource and extracts + version information from its display name and image reference. + + Args: + dynClient (DynamicClient): OpenShift dynamic client for cluster API interactions. + + Returns: + dict: Dictionary with keys: + - displayName (str): Catalog display name + - image (str): Catalog image reference + - catalogId (str): Parsed catalog identifier (e.g., "v9-241205-amd64") + Returns None if the catalog is not found. + + Example: + >>> catalog = getCurrentCatalog(client) + >>> if catalog: + ... print(f"Catalog: {catalog['catalogId']}") + ... print(f"Image: {catalog['image']}") + """ catalogsAPI = dynClient.resources.get(api_version="operators.coreos.com/v1alpha1", kind="CatalogSource") try: catalog = catalogsAPI.get(name="ibm-operator-catalog", namespace="openshift-marketplace") @@ -125,14 +190,44 @@ def getCurrentCatalog(dynClient: DynamicClient) -> dict: def listMasInstances(dynClient: DynamicClient) -> list: """ - Get a list of MAS instances on the cluster + Retrieve all MAS Suite instances from the OpenShift cluster. + + This function queries the cluster for Suite custom resources and returns + a list of all MAS instances found. + + Args: + dynClient (DynamicClient): OpenShift dynamic client for cluster API interactions. + + Returns: + list: A list of dictionaries representing MAS Suite instances. + Returns an empty list if no instances are found or if errors occur. + + Example: + >>> instances = listMasInstances(client) + >>> for instance in instances: + ... print(f"MAS Instance: {instance['metadata']['name']}") """ return listInstances(dynClient, "core.mas.ibm.com/v1", "Suite") def getWorkspaceId(dynClient: DynamicClient, instanceId: str) -> str: """ - Get the MAS workspace ID for namespace "mas-{instanceId}-core" + Retrieve the workspace ID for a MAS instance. + + This function queries the Workspace custom resources in the MAS core namespace + and returns the workspace ID from the first workspace found. + + Args: + dynClient (DynamicClient): OpenShift dynamic client for cluster API interactions. + instanceId (str): The MAS instance identifier. + + Returns: + str: The workspace ID if found, None if no workspaces exist for the instance. + + Example: + >>> workspace_id = getWorkspaceId(client, "inst1") + >>> if workspace_id: + ... print(f"Workspace ID: {workspace_id}") """ workspaceId = None workspacesAPI = dynClient.resources.get(api_version="core.mas.ibm.com/v1", kind="Workspace") @@ -146,7 +241,20 @@ def getWorkspaceId(dynClient: DynamicClient, instanceId: str) -> str: def verifyMasInstance(dynClient: DynamicClient, instanceId: str) -> bool: """ - Validate that the chosen MAS instance exists + Verify that a MAS Suite instance exists in the cluster. + + Args: + dynClient (DynamicClient): OpenShift dynamic client for cluster API interactions. + instanceId (str): The MAS instance identifier to verify. + + Returns: + bool: True if the instance exists and is accessible, False otherwise. + Returns False if the instance is not found, the CRD doesn't exist, + or authorization fails. + + Example: + >>> if verifyMasInstance(client, "inst1"): + ... print("MAS instance found") """ try: suitesAPI = dynClient.resources.get(api_version="core.mas.ibm.com/v1", kind="Suite") @@ -164,7 +272,23 @@ def verifyMasInstance(dynClient: DynamicClient, instanceId: str) -> bool: def getMasChannel(dynClient: DynamicClient, instanceId: str) -> str: """ - Get the MAS channel from the subscription + Retrieve the OLM subscription channel for a MAS instance. + + This function queries the Operator Lifecycle Manager subscription for the + MAS Core operator to determine which update channel it is subscribed to. + + Args: + dynClient (DynamicClient): OpenShift dynamic client for cluster API interactions. + instanceId (str): The MAS instance identifier. + + Returns: + str: The channel name (e.g., "8.11.x", "9.0.x") if the subscription exists, + None if the subscription is not found. + + Example: + >>> channel = getMasChannel(client, "inst1") + >>> if channel: + ... print(f"MAS is on channel: {channel}") """ masSubscription = getSubscription(dynClient, f"mas-{instanceId}-core", "ibm-mas") if masSubscription is None: @@ -174,6 +298,33 @@ def getMasChannel(dynClient: DynamicClient, instanceId: str) -> str: def updateIBMEntitlementKey(dynClient: DynamicClient, namespace: str, icrUsername: str, icrPassword: str, artifactoryUsername: str = None, artifactoryPassword: str = None, secretName: str = "ibm-entitlement") -> ResourceInstance: + """ + Create or update the IBM Entitlement secret for accessing IBM container registries. + + This function generates a Docker config JSON with credentials for IBM Container Registry + (ICR) and optionally Artifactory, then creates or updates a Kubernetes secret. + + Args: + dynClient (DynamicClient): OpenShift dynamic client for cluster API interactions. + namespace (str): The namespace where the secret should be created/updated. + icrUsername (str): Username for IBM Container Registry (typically "cp"). + icrPassword (str): Entitlement key for IBM Container Registry. + artifactoryUsername (str, optional): Username for Artifactory access. Defaults to None. + artifactoryPassword (str, optional): Password/token for Artifactory access. Defaults to None. + secretName (str, optional): Name of the secret to create/update. Defaults to "ibm-entitlement". + + Returns: + ResourceInstance: The created or updated Secret resource. + + Example: + >>> secret = updateIBMEntitlementKey( + ... client, + ... "mas-inst1-core", + ... "cp", + ... "your-entitlement-key" + ... ) + >>> print(f"Secret {secret.metadata.name} updated") + """ if secretName is None: secretName = "ibm-entitlement" if artifactoryUsername is not None: diff --git a/src/mas/devops/saas/job_cleaner.py b/src/mas/devops/saas/job_cleaner.py index 43a539b3..1e56aaa6 100644 --- a/src/mas/devops/saas/job_cleaner.py +++ b/src/mas/devops/saas/job_cleaner.py @@ -19,12 +19,57 @@ class JobCleaner: + """ + Kubernetes Job cleanup utility for managing ArgoCD-style job retention. + + This class provides functionality to clean up old Kubernetes Job resources while + retaining the most recent job in each cleanup group. Jobs are grouped by a label + and only the newest job (by creation timestamp) in each group is preserved. + + This is useful for ArgoCD applications where auto_delete is not enabled but you + still want to prevent job accumulation. + + Attributes: + k8s_client (client.api_client.ApiClient): Kubernetes API client. + batch_v1_api (client.BatchV1Api): Kubernetes Batch V1 API interface. + logger (logging.Logger): Logger instance for this class. + + Example: + >>> from kubernetes import client, config + >>> config.load_kube_config() + >>> k8s_client = client.ApiClient() + >>> cleaner = JobCleaner(k8s_client) + >>> cleaner.cleanup_jobs("argocd.argoproj.io/instance", limit=100, dry_run=False) + """ + def __init__(self, k8s_client: client.api_client.ApiClient): + """ + Initialize the JobCleaner with a Kubernetes API client. + + Args: + k8s_client (client.api_client.ApiClient): Kubernetes API client for cluster operations. + """ self.k8s_client = k8s_client self.batch_v1_api = client.BatchV1Api(self.k8s_client) self.logger = logging.getLogger(f"{__name__}.{self.__class__.__name__}") def _get_all_cleanup_groups(self, label: str, limit: int): + """ + Retrieve all unique cleanup groups across all namespaces. + + This internal method queries all jobs with the specified label and extracts + unique (namespace, group_id) pairs for processing. + + Args: + label (str): The label key used to identify and group jobs. + limit (int): Maximum number of jobs to retrieve per API call (pagination). + + Returns: + set: Set of tuples containing (namespace, cleanup_group_id) pairs. + + Note: + This method pages through all jobs to avoid loading everything into memory at once. + """ # set of tuples (namespace, cleanup_group_id) cleanup_groups = set() _continue = None @@ -44,6 +89,24 @@ def _get_all_cleanup_groups(self, label: str, limit: int): return cleanup_groups def _get_all_jobs(self, namespace: str, group_id: str, label: str, limit: int): + """ + Retrieve all jobs for a specific cleanup group in a namespace. + + This internal method pages through all jobs matching the group ID and chains + the results together for efficient iteration. + + Args: + namespace (str): The Kubernetes namespace to query. + group_id (str): The cleanup group identifier from the label value. + label (str): The label key used to filter jobs. + limit (int): Maximum number of jobs to retrieve per API call (pagination). + + Returns: + itertools.chain: Chained iterator of job items across all pages. + + Note: + Jobs are not loaded entirely into memory; iterators are chained for efficiency. + """ # page through all jobs in this namespace and group, and chain together all the resulting iterators job_items_iters = [] _continue = None @@ -60,6 +123,34 @@ def _get_all_jobs(self, namespace: str, group_id: str, label: str, limit: int): return itertools.chain(*job_items_iters) def cleanup_jobs(self, label: str, limit: int, dry_run: bool): + """ + Clean up old Kubernetes Jobs, retaining only the newest in each group. + + This method identifies all cleanup groups (by label), then for each group, + sorts jobs by creation timestamp and deletes all except the most recent one. + The cleanup process is eventually consistent and handles race conditions gracefully. + + Args: + label (str): The label key used to identify and group jobs (e.g., "argocd.argoproj.io/instance"). + limit (int): Maximum number of jobs to retrieve per API call for pagination. + dry_run (bool): If True, simulate the cleanup without actually deleting jobs. + + Returns: + None + + Note: + - Only the newest job in each group is retained + - Deletion uses "Foreground" propagation policy + - The process is eventually consistent; race conditions are handled gracefully + - Progress is logged for each cleanup group + + Example: + >>> cleaner.cleanup_jobs("argocd.argoproj.io/instance", limit=100, dry_run=True) + Found 5 unique (namespace, cleanup group ID) pairs, processing ... + 0) my-app-sync mas-inst1-core + SKIP my-app-sync-abc123 2024-01-15 10:30:00 + PURGE my-app-sync-xyz789 2024-01-14 09:20:00 SUCCESS + """ dry_run_param = None if dry_run: dry_run_param = "All" From 96417ac23fb93719084035b484cdd9ef38c9d6bf Mon Sep 17 00:00:00 2001 From: David Parker Date: Thu, 25 Dec 2025 19:13:23 +0000 Subject: [PATCH 4/4] [patch] ghpages build (with mkdocs) --- .github/workflows/docs.yml | 53 +++++ .gitignore | 1 + .secrets.baseline | 40 +++- bin/mas-devops-notify-slack | 4 +- docs/api/aiservice.md | 5 + docs/api/db2.md | 5 + docs/api/index.md | 98 +++++++++ docs/api/mas/apps.md | 5 + docs/api/mas/suite.md | 5 + docs/api/ocp.md | 5 + docs/api/olm.md | 5 + docs/api/saas/job_cleaner.md | 5 + docs/api/slack.md | 5 + docs/api/sls.md | 5 + docs/api/tekton.md | 5 + docs/api/users.md | 5 + docs/api/utils.md | 5 + docs/cli/create-initial-users.md | 169 +++++++++++++++ docs/cli/db2-validate-config.md | 74 +++++++ docs/cli/index.md | 98 +++++++++ docs/cli/notify-slack.md | 306 +++++++++++++++++++++++++++ docs/cli/saas-job-cleaner.md | 222 +++++++++++++++++++ docs/contributing.md | 294 +++++++++++++++++++++++++ docs/getting-started/installation.md | 93 ++++++++ docs/getting-started/quickstart.md | 205 ++++++++++++++++++ docs/index.md | 99 +++++++++ docs/license.md | 122 +++++++++++ mkdocs.yml | 116 ++++++++++ setup.py | 6 + src/mas/devops/aiservice.py | 35 --- src/mas/devops/data/__init__.py | 17 -- src/mas/devops/mas/apps.py | 25 --- src/mas/devops/mas/suite.py | 44 ---- src/mas/devops/olm.py | 3 - src/mas/devops/saas/job_cleaner.py | 14 -- src/mas/devops/sls.py | 14 -- src/mas/devops/users.py | 140 ------------ src/mas/devops/utils.py | 22 -- 38 files changed, 2057 insertions(+), 317 deletions(-) create mode 100644 .github/workflows/docs.yml create mode 100644 docs/api/aiservice.md create mode 100644 docs/api/db2.md create mode 100644 docs/api/index.md create mode 100644 docs/api/mas/apps.md create mode 100644 docs/api/mas/suite.md create mode 100644 docs/api/ocp.md create mode 100644 docs/api/olm.md create mode 100644 docs/api/saas/job_cleaner.md create mode 100644 docs/api/slack.md create mode 100644 docs/api/sls.md create mode 100644 docs/api/tekton.md create mode 100644 docs/api/users.md create mode 100644 docs/api/utils.md create mode 100644 docs/cli/create-initial-users.md create mode 100644 docs/cli/db2-validate-config.md create mode 100644 docs/cli/index.md create mode 100644 docs/cli/notify-slack.md create mode 100644 docs/cli/saas-job-cleaner.md create mode 100644 docs/contributing.md create mode 100644 docs/getting-started/installation.md create mode 100644 docs/getting-started/quickstart.md create mode 100644 docs/index.md create mode 100644 docs/license.md create mode 100644 mkdocs.yml diff --git a/.github/workflows/docs.yml b/.github/workflows/docs.yml new file mode 100644 index 00000000..269d36d2 --- /dev/null +++ b/.github/workflows/docs.yml @@ -0,0 +1,53 @@ +name: Build Documentation + +on: + push: + branches: + - '**' + tags-ignore: + - '**' + release: + types: [ published ] + +# Ensure only one build at a time for any branch, cancelling any in-progress builds +concurrency: + group: ${{ github.workflow }}-${{ github.ref }} + cancel-in-progress: true + +jobs: + deploy-docs: + runs-on: ubuntu-latest + steps: + - name: Checkout repository + uses: actions/checkout@v4 + + - name: Set up Python + uses: actions/setup-python@v5 + with: + python-version: '3.12' + + - name: Cache pip packages + uses: actions/cache@v4 + with: + path: ~/.cache/pip + key: ${{ runner.os }}-pip-${{ hashFiles('**/setup.py', '**/pyproject.toml') }} + restore-keys: | + ${{ runner.os }}-pip- + + - name: Install package and doc build dependencies + run: | + python -m pip install --upgrade pip + pip install -e ".[docs]" + + - name: Build documentation + run: | + mkdocs build + + - name: Deploy + uses: JamesIves/github-pages-deploy-action@4.1.7 + if: ${{ github.event_name == 'release' || contains(github.event.head_commit.message, '[doc]') }} + with: + branch: gh-pages + folder: site + +# Made with Bob diff --git a/.gitignore b/.gitignore index f182cf60..b018c32e 100644 --- a/.gitignore +++ b/.gitignore @@ -20,3 +20,4 @@ venv/ kubectl.exe /build /.vscode +/site diff --git a/.secrets.baseline b/.secrets.baseline index 21a8f1c5..9324636a 100644 --- a/.secrets.baseline +++ b/.secrets.baseline @@ -3,7 +3,7 @@ "files": "^.secrets.baseline$", "lines": null }, - "generated_at": "2025-12-15T15:57:18Z", + "generated_at": "2025-12-25T19:13:06Z", "plugins_used": [ { "name": "AWSKeyDetector" @@ -95,6 +95,44 @@ "verified_result": null } ], + "docs/cli/create-initial-users.md": [ + { + "hashed_secret": "33f220dd67f717cc949db63e21c90e130a6137da", + "is_secret": false, + "is_verified": false, + "line_number": 126, + "type": "Secret Keyword", + "verified_result": null + } + ], + "docs/cli/index.md": [ + { + "hashed_secret": "33f220dd67f717cc949db63e21c90e130a6137da", + "is_secret": false, + "is_verified": false, + "line_number": 56, + "type": "Secret Keyword", + "verified_result": null + } + ], + "docs/cli/notify-slack.md": [ + { + "hashed_secret": "1572c8dd915cf3bdecae817b1cb65847b4e94037", + "is_secret": false, + "is_verified": false, + "line_number": 61, + "type": "Secret Keyword", + "verified_result": null + }, + { + "hashed_secret": "2d8b1074eb78b85690ced3d2cc0aed0466f6f652", + "is_secret": false, + "is_verified": false, + "line_number": 207, + "type": "Secret Keyword", + "verified_result": null + } + ], "src/mas/devops/templates/ibm-entitlement-dockerconfig.json.j2": [ { "hashed_secret": "d2e2ab0f407e4ee3cf2ab87d61c31b25a74085e5", diff --git a/bin/mas-devops-notify-slack b/bin/mas-devops-notify-slack index a0043426..8750af3b 100755 --- a/bin/mas-devops-notify-slack +++ b/bin/mas-devops-notify-slack @@ -33,7 +33,7 @@ def _getToolchainLink() -> str: return "" -def notifyProvisionFyre(channels: list[str], rc: int, additionalMsg: str = None) -> bool: +def notifyProvisionFyre(channels: list[str], rc: int, additionalMsg: str | None = None) -> bool: """Send Slack notification about Fyre OCP cluster provisioning status.""" name = _getClusterName() toolchainLink = _getToolchainLink() @@ -67,7 +67,7 @@ def notifyProvisionFyre(channels: list[str], rc: int, additionalMsg: str = None) return response.data.get("ok", False) -def notifyProvisionRoks(channels: list[str], rc: int, additionalMsg: str = None) -> bool: +def notifyProvisionRoks(channels: list[str], rc: int, additionalMsg: str | None = None) -> bool: """Send Slack notification about ROKS cluster provisioning status.""" name = _getClusterName() toolchainLink = _getToolchainLink() diff --git a/docs/api/aiservice.md b/docs/api/aiservice.md new file mode 100644 index 00000000..9c36d223 --- /dev/null +++ b/docs/api/aiservice.md @@ -0,0 +1,5 @@ +# AI Service Module + +The `aiservice` module provides functions for AI/ML service management and configuration. + +::: mas.devops.aiservice \ No newline at end of file diff --git a/docs/api/db2.md b/docs/api/db2.md new file mode 100644 index 00000000..d482b6bf --- /dev/null +++ b/docs/api/db2.md @@ -0,0 +1,5 @@ +# DB2 Module + +The `db2` module provides functions for DB2 database configuration and validation. + +::: mas.devops.db2 \ No newline at end of file diff --git a/docs/api/index.md b/docs/api/index.md new file mode 100644 index 00000000..4f90a8c9 --- /dev/null +++ b/docs/api/index.md @@ -0,0 +1,98 @@ +# API Reference + +Welcome to the MAS DevOps API reference documentation. This section provides detailed documentation for all modules, classes, and functions in the `mas-devops` package. + +## Core Modules + +These modules provide fundamental operations for working with OpenShift/Kubernetes and related technologies: + +- **[OCP](ocp.md)**: OpenShift/Kubernetes cluster operations including namespace management, resource creation, and cluster interactions +- **[Tekton](tekton.md)**: Pipeline management, installation, and execution of Tekton pipelines for MAS automation +- **[OLM](olm.md)**: Operator Lifecycle Manager operations for installing and managing operators +- **[Utils](utils.md)**: Common utility functions and helper methods used throughout the library + +## MAS Modules + +Modules specifically designed for managing Maximo Application Suite: + +- **[Suite](mas/suite.md)**: Core MAS suite management including installation, configuration, and lifecycle operations +- **[Apps](mas/apps.md)**: MAS application management for deploying and configuring MAS applications like Manage, Monitor, etc. + +## Service Integration Modules + +Modules for integrating with various services and dependencies: + +- **[DB2](db2.md)**: Database operations including DB2 configuration validation and management +- **[SLS](sls.md)**: Suite License Service integration for license management +- **[AI Service](aiservice.md)**: AI/ML service management and configuration +- **[Slack](slack.md)**: Slack notification and alerting integration + +## SaaS Modules + +Modules for SaaS-specific operations: + +- **[Job Cleaner](saas/job_cleaner.md)**: Utilities for cleaning up completed jobs in SaaS environments + +## User Management + +- **[Users](users.md)**: User creation and management for MAS deployments + +## Module Overview + +### Core Operations + +The core modules provide the foundation for all MAS DevOps operations: + +```python +from mas.devops.ocp import createNamespace, getResource +from mas.devops.tekton import installOpenShiftPipelines +from mas.devops.olm import installOperator +from mas.devops.utils import waitForResource +``` + +### MAS Management + +Work with MAS suite and applications: + +```python +from mas.devops.mas.suite import installMAS, configureMAS +from mas.devops.mas.apps import installApp, configureApp +``` + +### Service Integration + +Integrate with external services: + +```python +from mas.devops.db2 import validateDB2Config +from mas.devops.sls import configureSLS +from mas.devops.slack import sendSlackNotification +``` + +## Navigation + +Use the navigation menu on the left to browse through the API documentation for each module. Each module page includes: + +- Module overview and purpose +- Class and function documentation with parameters and return types +- Usage examples +- Related modules and cross-references + +## Conventions + +Throughout the API documentation: + +- **Required parameters** are clearly marked +- **Optional parameters** include default values +- **Return types** are specified for all functions +- **Exceptions** that may be raised are documented +- **Examples** demonstrate common usage patterns + +## Getting Help + +If you need help using the API: + +1. Check the [Quick Start Guide](../getting-started/quickstart.md) for common usage patterns +2. Review the specific module documentation for detailed information +3. Look at the [CLI Tools](../cli/index.md) for command-line usage examples +4. Visit the [GitHub repository](https://github.com/ibm-mas/python-devops) to report issues or ask questions \ No newline at end of file diff --git a/docs/api/mas/apps.md b/docs/api/mas/apps.md new file mode 100644 index 00000000..0f06eb7e --- /dev/null +++ b/docs/api/mas/apps.md @@ -0,0 +1,5 @@ +# MAS Apps Module + +The `mas.apps` module provides functions for MAS application management. + +::: mas.devops.mas.apps \ No newline at end of file diff --git a/docs/api/mas/suite.md b/docs/api/mas/suite.md new file mode 100644 index 00000000..48bbbdd8 --- /dev/null +++ b/docs/api/mas/suite.md @@ -0,0 +1,5 @@ +# MAS Suite Module + +The `mas.suite` module provides functions for MAS core suite management. + +::: mas.devops.mas.suite \ No newline at end of file diff --git a/docs/api/ocp.md b/docs/api/ocp.md new file mode 100644 index 00000000..22f54066 --- /dev/null +++ b/docs/api/ocp.md @@ -0,0 +1,5 @@ +# OCP Module + +The `ocp` module provides functions for interacting with OpenShift/Kubernetes clusters. + +::: mas.devops.ocp \ No newline at end of file diff --git a/docs/api/olm.md b/docs/api/olm.md new file mode 100644 index 00000000..1f7f7c8c --- /dev/null +++ b/docs/api/olm.md @@ -0,0 +1,5 @@ +# OLM Module + +The `olm` module provides functions for managing Operator Lifecycle Manager (OLM) operations. + +::: mas.devops.olm \ No newline at end of file diff --git a/docs/api/saas/job_cleaner.md b/docs/api/saas/job_cleaner.md new file mode 100644 index 00000000..dfb85c9b --- /dev/null +++ b/docs/api/saas/job_cleaner.md @@ -0,0 +1,5 @@ +# SaaS Job Cleaner Module + +The `saas.job_cleaner` module provides utilities for cleaning up completed jobs in SaaS environments. + +::: mas.devops.saas.job_cleaner \ No newline at end of file diff --git a/docs/api/slack.md b/docs/api/slack.md new file mode 100644 index 00000000..c8ccd657 --- /dev/null +++ b/docs/api/slack.md @@ -0,0 +1,5 @@ +# Slack Module + +The `slack` module provides functions for Slack notification and alerting integration. + +::: mas.devops.slack \ No newline at end of file diff --git a/docs/api/sls.md b/docs/api/sls.md new file mode 100644 index 00000000..4e4eb70d --- /dev/null +++ b/docs/api/sls.md @@ -0,0 +1,5 @@ +# SLS Module + +The `sls` module provides functions for Suite License Service integration. + +::: mas.devops.sls \ No newline at end of file diff --git a/docs/api/tekton.md b/docs/api/tekton.md new file mode 100644 index 00000000..1d42d8db --- /dev/null +++ b/docs/api/tekton.md @@ -0,0 +1,5 @@ +# Tekton Module + +The `tekton` module provides functions for managing Tekton pipelines in OpenShift. + +::: mas.devops.tekton \ No newline at end of file diff --git a/docs/api/users.md b/docs/api/users.md new file mode 100644 index 00000000..77e4db23 --- /dev/null +++ b/docs/api/users.md @@ -0,0 +1,5 @@ +# Users Module + +The `users` module provides functions for user creation and management in MAS deployments. + +::: mas.devops.users \ No newline at end of file diff --git a/docs/api/utils.md b/docs/api/utils.md new file mode 100644 index 00000000..c42f5543 --- /dev/null +++ b/docs/api/utils.md @@ -0,0 +1,5 @@ +# Utils Module + +The `utils` module provides common utility functions and helper methods. + +::: mas.devops.utils \ No newline at end of file diff --git a/docs/cli/create-initial-users.md b/docs/cli/create-initial-users.md new file mode 100644 index 00000000..4aa5b5d5 --- /dev/null +++ b/docs/cli/create-initial-users.md @@ -0,0 +1,169 @@ +# Create Initial Users for SaaS + +The `mas-devops-create-initial-users-for-saas` tool creates initial users for MAS SaaS deployments. + +## Usage + +```bash +mas-devops-create-initial-users-for-saas [OPTIONS] +``` + +## Description + +This tool automates the creation of initial users in a MAS SaaS environment. It can read user information from either an AWS Secrets Manager secret or a local YAML file, and creates the users in both the MAS Core API and the Manage application. + +## Options + +### Required Options + +- `--mas-instance-id`: MAS instance identifier +- `--mas-workspace-id`: MAS workspace identifier +- `--log-level`: Logging level (DEBUG, INFO, WARNING, ERROR) + +### User Source Options (choose one) + +- `--initial-users-secret-name`: AWS Secrets Manager secret name containing user data +- `--initial-users-yaml-file`: Path to local YAML file containing user data + +### Port Configuration Options + +- `--manage-api-port`: Port for Manage API (default: 8443) +- `--coreapi-port`: Port for Core API (default: 8444) +- `--admin-dashboard-port`: Port for Admin Dashboard (default: 8445) + +## Examples + +### Using AWS Secrets Manager + +```bash +mas-devops-create-initial-users-for-saas \ + --mas-instance-id tgk01 \ + --mas-workspace-id masdev \ + --log-level INFO \ + --initial-users-secret-name "aws-dev/noble4/tgk01/initial_users" \ + --manage-api-port 8443 \ + --coreapi-port 8444 \ + --admin-dashboard-port 8445 +``` + +### Using Local YAML File + +```bash +mas-devops-create-initial-users-for-saas \ + --mas-instance-id tgk01 \ + --mas-workspace-id masdev \ + --log-level INFO \ + --initial-users-yaml-file /path/to/users.yaml \ + --manage-api-port 8443 \ + --coreapi-port 8444 \ + --admin-dashboard-port 8445 +``` + +## User Data Format + +### AWS Secrets Manager Format + +The secret should contain a JSON object with email addresses as keys and comma-separated values: + +```json +{ + "john.smith1@example.com": "primary,john1,smith1", + "john.smith2@example.com": "primary,john2,smith2", + "john.smith3@example.com": "secondary,john3,smith3" +} +``` + +Format: `"email": "role,firstName,lastName"` + +### YAML File Format + +```yaml +users: + - email: john.smith1@example.com + role: primary + firstName: john1 + lastName: smith1 + - email: john.smith2@example.com + role: primary + firstName: john2 + lastName: smith2 + - email: john.smith3@example.com + role: secondary + firstName: john3 + lastName: smith3 +``` + +## Prerequisites + +### Port Forwarding Setup + +Before running the tool, set up port forwarding for the required services: + +```bash +# Forward MAS services +oc port-forward service/admin-dashboard 8445:443 -n mas-tgk01-core +oc port-forward service/coreapi 8444:443 -n mas-tgk01-core +oc port-forward service/tgk01-masdev 8443:443 -n mas-tgk01-manage +``` + +### /etc/hosts Configuration + +Add the following entries to `/etc/hosts`: + +``` +127.0.0.1 tgk01-masdev.mas-tgk01-manage.svc.cluster.local +127.0.0.1 coreapi.mas-tgk01-core.svc.cluster.local +127.0.0.1 admin-dashboard.mas-tgk01-core.svc.cluster.local +``` + +### AWS Configuration (if using Secrets Manager) + +Configure AWS credentials: + +```bash +export SM_AWS_REGION="us-east-1" +export SM_AWS_ACCESS_KEY_ID="your-access-key" +export SM_AWS_SECRET_ACCESS_KEY="your-secret-key" + +aws configure set default.region ${SM_AWS_REGION} +aws configure set aws_access_key_id ${SM_AWS_ACCESS_KEY_ID} +aws configure set aws_secret_access_key ${SM_AWS_SECRET_ACCESS_KEY} +``` + +## What It Does + +The tool performs the following operations: + +1. **Reads User Data**: Retrieves user information from AWS Secrets Manager or local file +2. **Creates Core Users**: Creates users in the MAS Core API +3. **Creates Manage Users**: Creates users in the Manage application +4. **Assigns Roles**: Assigns appropriate roles (primary/secondary) to users +5. **Validates Creation**: Verifies that users were created successfully + +## Exit Codes + +- `0`: All users created successfully +- `1`: Error occurred during user creation + +## Troubleshooting + +### Connection Issues + +If you encounter connection issues: + +1. Verify port forwarding is active +2. Check `/etc/hosts` entries +3. Ensure you're logged into the OpenShift cluster + +### Authentication Issues + +If authentication fails: + +1. Verify AWS credentials (if using Secrets Manager) +2. Check that you have appropriate permissions +3. Ensure the secret/file exists and is readable + +## Related + +- [Users Module API](../api/users.md) +- [Quick Start Guide](../getting-started/quickstart.md) \ No newline at end of file diff --git a/docs/cli/db2-validate-config.md b/docs/cli/db2-validate-config.md new file mode 100644 index 00000000..7a89718d --- /dev/null +++ b/docs/cli/db2-validate-config.md @@ -0,0 +1,74 @@ +# DB2 Validate Config + +The `mas-devops-db2-validate-config` tool validates DB2 database configurations for MAS applications. + +## Usage + +```bash +mas-devops-db2-validate-config [OPTIONS] +``` + +## Description + +This tool validates that a DB2 database instance is properly configured for use with Maximo Application Suite applications. It checks various database parameters, settings, and configurations to ensure they meet MAS requirements. + +## Options + +- `--namespace`: Kubernetes namespace where the DB2 instance is deployed +- `--instance-name`: Name of the MAS instance +- `--app`: MAS application name (e.g., `manage`, `iot`, `monitor`) + +## Examples + +### Validate DB2 for Manage Application + +```bash +mas-devops-db2-validate-config \ + --namespace mas-myinstance-core \ + --instance-name myinstance \ + --app manage +``` + +### Validate DB2 for IoT Application + +```bash +mas-devops-db2-validate-config \ + --namespace mas-prod-core \ + --instance-name prod \ + --app iot +``` + +## What It Checks + +The tool validates: + +1. **Database Configuration Parameters**: Checks critical DB2 configuration settings +2. **Database Manager Configuration**: Validates DBM configuration parameters +3. **Registry Variables**: Verifies DB2 registry settings +4. **Resource Limits**: Ensures adequate resources are allocated +5. **Connection Settings**: Validates connection parameters + +## Exit Codes + +- `0`: Validation successful, configuration is valid +- `1`: Validation failed, configuration issues detected + +## Output + +The tool provides detailed output about: + +- Configuration parameters checked +- Any issues or warnings found +- Recommendations for fixing issues + +## Prerequisites + +- Access to the OpenShift/Kubernetes cluster +- Valid kubeconfig configured +- Appropriate permissions to access the namespace +- DB2 instance must be running + +## Related + +- [DB2 Module API](../api/db2.md) +- [Quick Start Guide](../getting-started/quickstart.md) \ No newline at end of file diff --git a/docs/cli/index.md b/docs/cli/index.md new file mode 100644 index 00000000..eebf4be9 --- /dev/null +++ b/docs/cli/index.md @@ -0,0 +1,98 @@ +# CLI Tools + +The `mas-devops` package includes several command-line tools for common MAS DevOps operations. + +## Available Tools + +### [DB2 Validate Config](db2-validate-config.md) +Validate DB2 database configurations for MAS applications. + +```bash +mas-devops-db2-validate-config --help +``` + +### [Create Initial Users](create-initial-users.md) +Create initial users for MAS SaaS deployments. + +```bash +mas-devops-create-initial-users-for-saas --help +``` + +### [SaaS Job Cleaner](saas-job-cleaner.md) +Clean up completed jobs in SaaS environments. + +```bash +mas-devops-saas-job-cleaner --help +``` + +### [Notify Slack](notify-slack.md) +Send notifications to Slack channels. + +```bash +mas-devops-notify-slack --help +``` + +## Common Usage Patterns + +### Port Forwarding for Local Development + +When working with MAS locally, you often need to set up port forwarding: + +```bash +# Forward MAS core services +oc port-forward service/admin-dashboard 8445:443 -n mas-instance-core +oc port-forward service/coreapi 8444:443 -n mas-instance-core +oc port-forward service/instance-app 8443:443 -n mas-instance-app +``` + +### Environment Setup + +Set up your environment variables: + +```bash +# AWS credentials for secrets management +export SM_AWS_REGION="us-east-1" +export SM_AWS_ACCESS_KEY_ID="your-access-key" +export SM_AWS_SECRET_ACCESS_KEY="your-secret-key" + +# Configure AWS CLI +aws configure set default.region ${SM_AWS_REGION} +aws configure set aws_access_key_id ${SM_AWS_ACCESS_KEY_ID} +aws configure set aws_secret_access_key ${SM_AWS_SECRET_ACCESS_KEY} +``` + +### OpenShift Login + +Log in to your OpenShift cluster: + +```bash +oc login --token=sha256~your-token --server=https://api.cluster.example.com:6443 +``` + +## Tool Installation + +All CLI tools are automatically installed when you install the `mas-devops` package: + +```bash +pip install mas-devops +``` + +After installation, the tools will be available in your PATH. + +## Getting Help + +Each tool provides detailed help information: + +```bash +# Get help for any tool +mas-devops-db2-validate-config --help +mas-devops-create-initial-users-for-saas --help +mas-devops-saas-job-cleaner --help +mas-devops-notify-slack --help +``` + +## Next Steps + +- Learn about each tool in detail by clicking on the links above +- Check the [API Reference](../api/index.md) for programmatic usage +- Review the [Quick Start Guide](../getting-started/quickstart.md) for examples \ No newline at end of file diff --git a/docs/cli/notify-slack.md b/docs/cli/notify-slack.md new file mode 100644 index 00000000..3b60633b --- /dev/null +++ b/docs/cli/notify-slack.md @@ -0,0 +1,306 @@ +# Notify Slack + +The `mas-devops-notify-slack` tool sends notifications to Slack channels for cluster provisioning events. + +## Usage + +```bash +mas-devops-notify-slack --action ACTION --rc RETURN_CODE [--msg MESSAGE] +``` + +## Description + +This tool sends notifications to Slack channels about OpenShift cluster provisioning status. It supports notifications for both Fyre and ROKS (IBM Cloud) cluster deployments. The tool uses the Slack API via a bot token to post formatted messages. + +## Environment Variables + +### Required + +- `SLACK_TOKEN`: Slack bot token for authentication +- `SLACK_CHANNEL`: Comma-separated list of Slack channels to notify (e.g., `#deployments,#alerts`) +- `CLUSTER_NAME`: Name of the cluster being provisioned + +### Action-Specific Variables + +#### For Successful Fyre Provisioning (`--action ocp-provision-fyre` with `--rc 0`) + +- `OCP_CONSOLE_URL`: OpenShift console URL +- `OCP_USERNAME`: OpenShift admin username +- `OCP_PASSWORD`: OpenShift admin password + +#### For Successful ROKS Provisioning (`--action ocp-provision-roks` with `--rc 0`) + +- `OCP_CONSOLE_URL`: OpenShift console URL + +#### Optional (for both actions) + +- `TOOLCHAIN_PIPELINERUN_URL`: URL to the pipeline run +- `TOOLCHAIN_TRIGGER_NAME`: Name of the pipeline trigger + +## Options + +### Required Options + +- `--action`: Action type (choices: `ocp-provision-fyre`, `ocp-provision-roks`) +- `--rc`: Return code (0 for success, non-zero for failure) + +### Optional Options + +- `--msg`: Additional message to include in the notification + +## Examples + +### Notify Fyre Cluster Provisioning Success + +```bash +export SLACK_TOKEN="xoxb-your-slack-token" +export SLACK_CHANNEL="#deployments" +export CLUSTER_NAME="dev-cluster-01" +export OCP_CONSOLE_URL="https://console-openshift-console.apps.dev-cluster-01.example.com" +export OCP_USERNAME="kubeadmin" +export OCP_PASSWORD="xxxxx-xxxxx-xxxxx-xxxxx" + +mas-devops-notify-slack \ + --action ocp-provision-fyre \ + --rc 0 \ + --msg "Additional deployment notes" +``` + +### Notify Fyre Cluster Provisioning Failure + +```bash +export SLACK_TOKEN="xoxb-your-slack-token" +export SLACK_CHANNEL="#alerts" +export CLUSTER_NAME="dev-cluster-01" + +mas-devops-notify-slack \ + --action ocp-provision-fyre \ + --rc 1 +``` + +### Notify ROKS Cluster Provisioning Success + +```bash +export SLACK_TOKEN="xoxb-your-slack-token" +export SLACK_CHANNEL="#deployments,#cloud-ops" +export CLUSTER_NAME="prod-roks-cluster" +export OCP_CONSOLE_URL="https://console-openshift-console.apps.prod-roks-cluster.us-south.containers.appdomain.cloud" + +mas-devops-notify-slack \ + --action ocp-provision-roks \ + --rc 0 +``` + +### Notify ROKS Cluster Provisioning Failure + +```bash +export SLACK_TOKEN="xoxb-your-slack-token" +export SLACK_CHANNEL="#alerts" +export CLUSTER_NAME="prod-roks-cluster" + +mas-devops-notify-slack \ + --action ocp-provision-roks \ + --rc 1 +``` + +## Slack Bot Setup + +### Creating a Slack Bot + +1. Go to https://api.slack.com/apps +2. Click "Create New App" → "From scratch" +3. Name your app (e.g., "MAS DevOps Bot") and select your workspace +4. Navigate to "OAuth & Permissions" +5. Add the following Bot Token Scopes: + - `chat:write` - Send messages + - `chat:write.public` - Send messages to channels without joining +6. Install the app to your workspace +7. Copy the "Bot User OAuth Token" (starts with `xoxb-`) + +### Storing Slack Token Securely + +Store the token as an environment variable or secret: + +```bash +# Environment variable +export SLACK_TOKEN="xoxb-your-slack-token" + +# Or use a secret management system +export SLACK_TOKEN=$(aws secretsmanager get-secret-value --secret-id slack-token --query SecretString --output text) +``` + +## Message Format + +The tool sends formatted Slack messages with: + +- **Success messages** include: + - Cluster name + - Console URL + - Credentials (for Fyre) + - Links to management dashboards + - Optional additional message + +- **Failure messages** include: + - Cluster name + - Links to management dashboards + - Pipeline run link (if available) + +## Integration Examples + +### GitHub Actions + +```yaml +- name: Notify Slack on Fyre Provisioning + if: always() + env: + SLACK_TOKEN: ${{ secrets.SLACK_TOKEN }} + SLACK_CHANNEL: "#deployments" + CLUSTER_NAME: ${{ steps.provision.outputs.cluster_name }} + OCP_CONSOLE_URL: ${{ steps.provision.outputs.console_url }} + OCP_USERNAME: ${{ steps.provision.outputs.username }} + OCP_PASSWORD: ${{ steps.provision.outputs.password }} + run: | + mas-devops-notify-slack \ + --action ocp-provision-fyre \ + --rc ${{ steps.provision.outcome == 'success' && '0' || '1' }} +``` + +### Tekton Pipeline + +```yaml +- name: notify-slack + image: your-registry/mas-devops:latest + env: + - name: SLACK_TOKEN + valueFrom: + secretKeyRef: + name: slack-credentials + key: token + - name: SLACK_CHANNEL + value: "#deployments" + - name: CLUSTER_NAME + value: "$(params.cluster-name)" + - name: OCP_CONSOLE_URL + value: "$(tasks.provision.results.console-url)" + script: | + mas-devops-notify-slack \ + --action ocp-provision-roks \ + --rc 0 \ + --msg "Provisioned via Tekton pipeline" +``` + +### Shell Script + +```bash +#!/bin/bash +# provision-and-notify.sh + +export SLACK_TOKEN="xoxb-your-slack-token" +export SLACK_CHANNEL="#deployments" +export CLUSTER_NAME="dev-cluster-01" + +# Provision cluster +if provision_fyre_cluster.sh; then + # Set success environment variables + export OCP_CONSOLE_URL="https://console.example.com" + export OCP_USERNAME="kubeadmin" + export OCP_PASSWORD="xxxxx-xxxxx" + + # Notify success + mas-devops-notify-slack \ + --action ocp-provision-fyre \ + --rc 0 \ + --msg "Cluster provisioned successfully" +else + # Notify failure + mas-devops-notify-slack \ + --action ocp-provision-fyre \ + --rc 1 + exit 1 +fi +``` + +## Best Practices + +1. **Secure Token Storage**: Always store `SLACK_TOKEN` as a secret, never in code +2. **Multiple Channels**: Use comma-separated channels for different audiences +3. **Include Context**: Use `--msg` to add deployment-specific information +4. **Error Handling**: Always check return codes and notify on failures +5. **Environment Cleanup**: Clear sensitive environment variables after use + +## Troubleshooting + +### No Notification Sent + +If no notification is sent, the tool exits silently when: + +1. `SLACK_TOKEN` environment variable is not set +2. `SLACK_CHANNEL` environment variable is not set + +This is by design to allow the tool to be used in environments where Slack notifications are optional. + +### Missing Required Environment Variables + +If you get an error about missing environment variables: + +1. Verify `CLUSTER_NAME` is set +2. For success notifications (`--rc 0`): + - Fyre: Check `OCP_CONSOLE_URL`, `OCP_USERNAME`, `OCP_PASSWORD` + - ROKS: Check `OCP_CONSOLE_URL` + +### Authentication Errors + +If you get Slack API authentication errors: + +1. Verify your `SLACK_TOKEN` is valid and starts with `xoxb-` +2. Check that the bot has the required permissions +3. Ensure the bot is installed in your workspace + +### Channel Not Found + +If messages aren't appearing in the expected channel: + +1. Verify channel names include the `#` prefix +2. Check that the bot has access to the channel +3. For private channels, invite the bot to the channel first + +## Exit Codes + +- `0`: Notification sent successfully +- `1`: Error sending notification + +## Related + +- [Slack Module API](../api/slack.md) +- [Quick Start Guide](../getting-started/quickstart.md) + +## What It Does + +The tool performs the following operations: + +1. **Checks Environment**: Verifies required environment variables are set +2. **Parses Channels**: Splits comma-separated channel list +3. **Builds Message**: Creates formatted Slack message blocks based on action and return code +4. **Posts to Slack**: Sends message to all specified channels using Slack API +5. **Returns Status**: Exits with appropriate code based on success/failure + +## Supported Actions + +### `ocp-provision-fyre` + +Sends notifications about Fyre (IBM DevIT) OpenShift cluster provisioning: + +- **Success** (`--rc 0`): Includes console URL, credentials, and Fyre dashboard link +- **Failure** (`--rc 1`): Includes Fyre dashboard link for troubleshooting + +### `ocp-provision-roks` + +Sends notifications about ROKS (IBM Cloud) OpenShift cluster provisioning: + +- **Success** (`--rc 0`): Includes console URL and IBM Cloud dashboard link +- **Failure** (`--rc 1`): Includes IBM Cloud dashboard link for troubleshooting + +## Exit Codes + +- `0`: Notification sent successfully (or silently skipped if SLACK_TOKEN/SLACK_CHANNEL not set) +- `1`: Error occurred (missing required environment variables for the action) \ No newline at end of file diff --git a/docs/cli/saas-job-cleaner.md b/docs/cli/saas-job-cleaner.md new file mode 100644 index 00000000..1de36ec3 --- /dev/null +++ b/docs/cli/saas-job-cleaner.md @@ -0,0 +1,222 @@ +# SaaS Job Cleaner + +The `mas-devops-saas-job-cleaner` tool cleans up completed jobs in SaaS environments. + +## Usage + +```bash +mas-devops-saas-job-cleaner [OPTIONS] +``` + +## Description + +This tool automatically cleans up completed Kubernetes jobs in MAS SaaS environments. It helps maintain a clean cluster by removing old job resources that are no longer needed, preventing resource accumulation and improving cluster performance. + +## Options + +- `--namespace`: Kubernetes namespace to clean jobs from +- `--max-age-hours`: Maximum age in hours for completed jobs (default: 24) +- `--dry-run`: Preview what would be deleted without actually deleting +- `--log-level`: Logging level (DEBUG, INFO, WARNING, ERROR) + +## Examples + +### Clean Jobs Older Than 24 Hours + +```bash +mas-devops-saas-job-cleaner \ + --namespace mas-myinstance-core \ + --max-age-hours 24 +``` + +### Clean Jobs Older Than 48 Hours + +```bash +mas-devops-saas-job-cleaner \ + --namespace mas-prod-core \ + --max-age-hours 48 \ + --log-level INFO +``` + +### Dry Run (Preview Only) + +```bash +mas-devops-saas-job-cleaner \ + --namespace mas-myinstance-core \ + --max-age-hours 24 \ + --dry-run \ + --log-level DEBUG +``` + +### Clean Multiple Namespaces + +```bash +# Clean jobs in multiple namespaces +for ns in mas-inst1-core mas-inst2-core mas-inst3-core; do + mas-devops-saas-job-cleaner \ + --namespace $ns \ + --max-age-hours 24 \ + --log-level INFO +done +``` + +## What It Does + +The tool performs the following operations: + +1. **Scans Namespace**: Lists all jobs in the specified namespace +2. **Filters Completed Jobs**: Identifies jobs that have completed (succeeded or failed) +3. **Checks Age**: Determines which jobs are older than the specified age +4. **Deletes Jobs**: Removes jobs that meet the criteria +5. **Reports Results**: Provides summary of deleted jobs + +## Job Selection Criteria + +Jobs are selected for deletion if they meet ALL of the following criteria: + +- Job status is "Completed" (either succeeded or failed) +- Job completion time is older than `--max-age-hours` +- Job is in the specified namespace + +## Prerequisites + +- Access to the OpenShift/Kubernetes cluster +- Valid kubeconfig configured +- Appropriate permissions to list and delete jobs in the namespace +- Must be logged into the cluster + +## Safety Features + +### Dry Run Mode + +Use `--dry-run` to preview what would be deleted: + +```bash +mas-devops-saas-job-cleaner \ + --namespace mas-myinstance-core \ + --max-age-hours 24 \ + --dry-run +``` + +This will show you which jobs would be deleted without actually deleting them. + +### Age Protection + +The tool only deletes jobs older than the specified age, protecting recent jobs from accidental deletion. + +## Automation + +### Cron Job Setup + +You can automate job cleanup using a Kubernetes CronJob: + +```yaml +apiVersion: batch/v1 +kind: CronJob +metadata: + name: job-cleaner + namespace: mas-myinstance-core +spec: + schedule: "0 2 * * *" # Run daily at 2 AM + jobTemplate: + spec: + template: + spec: + containers: + - name: job-cleaner + image: your-registry/mas-devops:latest + command: + - mas-devops-saas-job-cleaner + - --namespace + - mas-myinstance-core + - --max-age-hours + - "24" + - --log-level + - INFO + restartPolicy: OnFailure +``` + +### Script Automation + +Create a script to clean multiple namespaces: + +```bash +#!/bin/bash +# cleanup-jobs.sh + +NAMESPACES=( + "mas-inst1-core" + "mas-inst2-core" + "mas-inst3-core" +) + +MAX_AGE=24 +LOG_LEVEL="INFO" + +for ns in "${NAMESPACES[@]}"; do + echo "Cleaning jobs in namespace: $ns" + mas-devops-saas-job-cleaner \ + --namespace "$ns" \ + --max-age-hours "$MAX_AGE" \ + --log-level "$LOG_LEVEL" +done +``` + +## Output + +The tool provides detailed output including: + +- Number of jobs scanned +- Number of jobs eligible for deletion +- List of deleted jobs +- Any errors encountered + +Example output: + +``` +INFO: Scanning namespace mas-myinstance-core for completed jobs +INFO: Found 15 completed jobs +INFO: 8 jobs are older than 24 hours +INFO: Deleting job: data-import-job-20231201 +INFO: Deleting job: backup-job-20231202 +... +INFO: Successfully deleted 8 jobs +``` + +## Exit Codes + +- `0`: Cleanup completed successfully +- `1`: Error occurred during cleanup + +## Best Practices + +1. **Start with Dry Run**: Always test with `--dry-run` first +2. **Conservative Age**: Start with a longer age (e.g., 48 hours) and adjust as needed +3. **Regular Schedule**: Run regularly (e.g., daily) to prevent accumulation +4. **Monitor Logs**: Review logs to ensure expected behavior +5. **Namespace Specific**: Run separately for each namespace to maintain control + +## Troubleshooting + +### Permission Denied + +If you get permission errors: + +```bash +# Verify you have the correct permissions +oc auth can-i delete jobs -n mas-myinstance-core +``` + +### No Jobs Deleted + +If no jobs are deleted: + +1. Check that jobs exist: `oc get jobs -n mas-myinstance-core` +2. Verify job completion status +3. Check job age against `--max-age-hours` +4. Use `--dry-run` to see what would be deleted + +## Related + +- [SaaS Job Cleaner Module API](../api/saas/job_cleaner.md) +- [Quick Start Guide](../getting-started/quickstart.md) \ No newline at end of file diff --git a/docs/contributing.md b/docs/contributing.md new file mode 100644 index 00000000..74897e81 --- /dev/null +++ b/docs/contributing.md @@ -0,0 +1,294 @@ +# Contributing to MAS DevOps + +Thank you for your interest in contributing to the MAS DevOps project! This guide will help you get started. + +## Getting Started + +### Prerequisites + +- Python 3.12 or higher +- Git +- Access to an OpenShift/Kubernetes cluster (for testing) +- Familiarity with Python development + +### Setting Up Development Environment + +1. **Fork and Clone the Repository** + +```bash +git clone https://github.com/YOUR_USERNAME/python-devops.git +cd python-devops +``` + +2. **Create a Virtual Environment** + +```bash +python -m venv .venv +source .venv/bin/activate # On Windows: .venv\Scripts\activate +``` + +3. **Install Development Dependencies** + +```bash +pip install -e ".[dev]" +``` + +4. **Install Pre-commit Hooks** + +```bash +pre-commit install +``` + +## Development Workflow + +### 1. Create a Branch + +Create a new branch for your feature or bug fix: + +```bash +git checkout -b feature/your-feature-name +# or +git checkout -b fix/your-bug-fix +``` + +### 2. Make Changes + +- Write clean, readable code following PEP 8 style guidelines +- Add docstrings to all functions, classes, and modules +- Include type hints where appropriate +- Write tests for new functionality + +### 3. Run Tests + +```bash +# Run all tests +pytest + +# Run with coverage +pytest --cov=mas.devops + +# Run specific test file +pytest test/src/test_ocp.py +``` + +### 4. Check Code Quality + +```bash +# Run flake8 linter +flake8 src/ + +# Run pre-commit checks +pre-commit run --all-files +``` + +### 5. Commit Changes + +Write clear, descriptive commit messages: + +```bash +git add . +git commit -m "Add feature: description of your changes" +``` + +### 6. Push and Create Pull Request + +```bash +git push origin feature/your-feature-name +``` + +Then create a pull request on GitHub. + +## Code Style Guidelines + +### Python Style + +- Follow [PEP 8](https://peps.python.org/pep-0008/) style guide +- Use 4 spaces for indentation (no tabs) +- Maximum line length: 120 characters +- Use meaningful variable and function names + +### Docstring Format + +Use Google-style docstrings: + +```python +def example_function(param1: str, param2: int) -> bool: + """Brief description of the function. + + Longer description if needed, explaining what the function does, + any important details, or usage notes. + + Args: + param1: Description of param1 + param2: Description of param2 + + Returns: + Description of return value + + Raises: + ValueError: Description of when this error is raised + + Examples: + >>> result = example_function("test", 42) + >>> print(result) + True + """ + # Implementation + return True +``` + +### Type Hints + +Use type hints for function parameters and return values: + +```python +from typing import List, Dict, Optional + +def process_data(items: List[str], config: Dict[str, any]) -> Optional[str]: + """Process data with configuration.""" + # Implementation + pass +``` + +## Testing Guidelines + +### Writing Tests + +- Place tests in the `test/` directory +- Mirror the source structure in test directory +- Use descriptive test names: `test_function_name_expected_behavior` +- Use pytest fixtures for common setup +- Mock external dependencies + +Example test: + +```python +import pytest +from mas.devops.ocp import createNamespace + +def test_create_namespace_success(mock_client): + """Test successful namespace creation.""" + namespace = "test-namespace" + result = createNamespace(mock_client, namespace) + assert result is True +``` + +### Running Specific Tests + +```bash +# Run tests for a specific module +pytest test/src/test_ocp.py + +# Run a specific test function +pytest test/src/test_ocp.py::test_create_namespace_success + +# Run with verbose output +pytest -v + +# Run with coverage report +pytest --cov=mas.devops --cov-report=html +``` + +## Documentation + +### Updating Documentation + +When adding new features or making changes: + +1. Update relevant docstrings +2. Add or update documentation in `docs/` +3. Update README.md if needed +4. Add examples to demonstrate usage + +### Building Documentation Locally + +```bash +# Install documentation dependencies +pip install -e ".[docs]" + +# Build documentation +mkdocs build + +# Serve documentation locally +mkdocs serve +``` + +Then visit http://localhost:8000 to view the documentation. + +## Pull Request Process + +1. **Ensure All Tests Pass**: Run the full test suite +2. **Update Documentation**: Add or update relevant documentation +3. **Follow Code Style**: Ensure code passes flake8 checks +4. **Write Clear Description**: Explain what your PR does and why +5. **Link Related Issues**: Reference any related issues +6. **Request Review**: Tag appropriate reviewers + +### PR Checklist + +- [ ] Tests added/updated and passing +- [ ] Documentation updated +- [ ] Code follows style guidelines +- [ ] Commit messages are clear +- [ ] No merge conflicts +- [ ] Pre-commit hooks pass + +## Reporting Issues + +### Bug Reports + +When reporting bugs, include: + +- Clear description of the issue +- Steps to reproduce +- Expected behavior +- Actual behavior +- Environment details (Python version, OS, etc.) +- Error messages or logs + +### Feature Requests + +When requesting features, include: + +- Clear description of the feature +- Use case and motivation +- Proposed implementation (if any) +- Examples of how it would be used + +## Code Review Process + +All contributions go through code review: + +1. Automated checks run on PR creation +2. Maintainers review code and provide feedback +3. Address feedback and update PR +4. Once approved, PR is merged + +## Community Guidelines + +- Be respectful and inclusive +- Provide constructive feedback +- Help others learn and grow +- Follow the project's code of conduct + +## Getting Help + +If you need help: + +- Check existing documentation +- Search existing issues +- Ask questions in pull requests +- Contact maintainers + +## License + +By contributing, you agree that your contributions will be licensed under the Eclipse Public License v1.0. + +## Additional Resources + +- [Project README](../README.md) +- [API Documentation](api/index.md) +- [Quick Start Guide](getting-started/quickstart.md) +- [GitHub Repository](https://github.com/ibm-mas/python-devops) + +Thank you for contributing to MAS DevOps! 🎉 \ No newline at end of file diff --git a/docs/getting-started/installation.md b/docs/getting-started/installation.md new file mode 100644 index 00000000..76fa840f --- /dev/null +++ b/docs/getting-started/installation.md @@ -0,0 +1,93 @@ +# Installation + +## Requirements + +- Python 3.12 or higher +- pip package manager + +## Install from PyPI + +The easiest way to install `mas-devops` is from PyPI: + +```bash +pip install mas-devops +``` + +## Install from Source + +To install the latest development version from source: + +```bash +git clone https://github.com/ibm-mas/python-devops.git +cd python-devops +pip install -e . +``` + +## Install with Development Dependencies + +If you want to contribute to the project, install with development dependencies: + +```bash +pip install -e ".[dev]" +``` + +This will install additional tools for testing and development: + +- `build`: Build tool for creating distributions +- `flake8`: Code linting +- `pytest`: Testing framework +- `pytest-mock`: Mocking support for tests +- `requests-mock`: HTTP request mocking + +## Install with Documentation Dependencies + +To build the documentation locally: + +```bash +pip install -e ".[docs]" +``` + +This installs: + +- `mkdocs`: Documentation site generator +- `mkdocs-material`: Material theme for MkDocs +- `mkdocstrings[python]`: Automatic documentation from docstrings +- `pymdown-extensions`: Markdown extensions + +## Verify Installation + +After installation, verify that the package is installed correctly: + +```python +import mas.devops +print(mas.devops.__version__) +``` + +You should also be able to run the CLI tools: + +```bash +mas-devops-db2-validate-config --help +mas-devops-create-initial-users-for-saas --help +mas-devops-saas-job-cleaner --help +mas-devops-notify-slack --help +``` + +## Dependencies + +The package requires the following runtime dependencies: + +- `pyyaml`: YAML parsing and generation +- `openshift`: OpenShift/Kubernetes client +- `kubernetes`: Kubernetes Python client +- `kubeconfig`: Kubeconfig file handling +- `jinja2`: Template engine +- `jinja2-base64-filters`: Base64 filters for Jinja2 +- `semver`: Semantic versioning +- `boto3`: AWS SDK for Python +- `slack_sdk`: Slack API client + +All dependencies will be automatically installed when you install the package. + +## Next Steps + +Once installed, proceed to the [Quick Start Guide](quickstart.md) to learn how to use the library. \ No newline at end of file diff --git a/docs/getting-started/quickstart.md b/docs/getting-started/quickstart.md new file mode 100644 index 00000000..5676d9b2 --- /dev/null +++ b/docs/getting-started/quickstart.md @@ -0,0 +1,205 @@ +# Quick Start Guide + +This guide will help you get started with the `mas-devops` library quickly. + +## Prerequisites + +Before you begin, ensure you have: + +1. Python 3.12 or higher installed +2. Access to an OpenShift/Kubernetes cluster +3. Valid kubeconfig file configured +4. The `mas-devops` package installed (see [Installation](installation.md)) + +## Basic Usage + +### 1. Import Required Modules + +```python +from openshift import dynamic +from kubernetes import config +from kubernetes.client import api_client + +from mas.devops.ocp import createNamespace +from mas.devops.tekton import installOpenShiftPipelines, updateTektonDefinitions +``` + +### 2. Create an OpenShift Client + +```python +# Load kubeconfig and create a dynamic client +dynClient = dynamic.DynamicClient( + api_client.ApiClient(configuration=config.load_kube_config()) +) +``` + +### 3. Perform Operations + +#### Create a Namespace + +```python +namespace = "my-mas-namespace" +createNamespace(dynClient, namespace) +print(f"Namespace '{namespace}' created successfully") +``` + +#### Install OpenShift Pipelines + +```python +installOpenShiftPipelines(dynClient) +print("OpenShift Pipelines installed successfully") +``` + +#### Update Tekton Definitions + +```python +pipelinesNamespace = "mas-myinstance-pipelines" +tektonYamlPath = "/path/to/ibm-mas-tekton.yaml" + +updateTektonDefinitions(pipelinesNamespace, tektonYamlPath) +print("Tekton definitions updated successfully") +``` + +## Complete Example: MAS Upgrade Pipeline + +Here's a complete example that sets up and launches a MAS upgrade pipeline: + +```python +from openshift import dynamic +from kubernetes import config +from kubernetes.client import api_client + +from mas.devops.ocp import createNamespace +from mas.devops.tekton import ( + installOpenShiftPipelines, + updateTektonDefinitions, + launchUpgradePipeline +) + +# Configuration +instanceId = "mymas" +pipelinesNamespace = f"mas-{instanceId}-pipelines" +tektonYamlPath = "/mascli/templates/ibm-mas-tekton.yaml" + +# Create OpenShift client +dynClient = dynamic.DynamicClient( + api_client.ApiClient(configuration=config.load_kube_config()) +) + +# Install OpenShift Pipelines Operator +print("Installing OpenShift Pipelines...") +installOpenShiftPipelines(dynClient) + +# Create pipelines namespace +print(f"Creating namespace '{pipelinesNamespace}'...") +createNamespace(dynClient, pipelinesNamespace) + +# Update Tekton definitions +print("Updating Tekton definitions...") +updateTektonDefinitions(pipelinesNamespace, tektonYamlPath) + +# Launch upgrade pipeline +print("Launching upgrade pipeline...") +pipelineURL = launchUpgradePipeline(dynClient, instanceId) +print(f"Pipeline launched successfully!") +print(f"View pipeline run at: {pipelineURL}") +``` + +## Using CLI Tools + +The package includes several command-line tools for common operations. + +### Validate DB2 Configuration + +```bash +mas-devops-db2-validate-config \ + --namespace mas-myinstance-core \ + --instance-name myinstance \ + --app manage +``` + +### Create Initial Users for SaaS + +```bash +mas-devops-create-initial-users-for-saas \ + --mas-instance-id myinstance \ + --mas-workspace-id workspace1 \ + --log-level INFO \ + --initial-users-yaml-file /path/to/users.yaml \ + --manage-api-port 8443 \ + --coreapi-port 8444 \ + --admin-dashboard-port 8445 +``` + +### Clean Up SaaS Jobs + +```bash +mas-devops-saas-job-cleaner \ + --namespace mas-myinstance-core \ + --max-age-hours 24 +``` + +### Send Slack Notification + +```bash +mas-devops-notify-slack \ + --webhook-url "https://hooks.slack.com/services/YOUR/WEBHOOK/URL" \ + --message "Deployment completed successfully" \ + --channel "#deployments" +``` + +## Working with DB2 + +Validate DB2 configuration for a MAS application: + +```python +from mas.devops.db2 import validateDB2Config + +namespace = "mas-myinstance-core" +instanceName = "myinstance" +app = "manage" + +result = validateDB2Config(namespace, instanceName, app) +if result: + print("DB2 configuration is valid") +else: + print("DB2 configuration validation failed") +``` + +## Working with Slack Notifications + +Send notifications to Slack: + +```python +from mas.devops.slack import sendSlackNotification + +webhookUrl = "https://hooks.slack.com/services/YOUR/WEBHOOK/URL" +message = "MAS deployment completed successfully" +channel = "#deployments" + +sendSlackNotification(webhookUrl, message, channel) +``` + +## Error Handling + +Always wrap your operations in try-except blocks: + +```python +from mas.devops.ocp import createNamespace + +try: + createNamespace(dynClient, "my-namespace") + print("Namespace created successfully") +except Exception as e: + print(f"Error creating namespace: {e}") +``` + +## Next Steps + +- Explore the [API Reference](../api/index.md) for detailed documentation +- Check out the [CLI Tools](../cli/index.md) documentation +- Learn about specific modules: + - [OCP Operations](../api/ocp.md) + - [Tekton Pipelines](../api/tekton.md) + - [MAS Suite Management](../api/mas/suite.md) + - [DB2 Operations](../api/db2.md) \ No newline at end of file diff --git a/docs/index.md b/docs/index.md new file mode 100644 index 00000000..5aaa1746 --- /dev/null +++ b/docs/index.md @@ -0,0 +1,99 @@ +# MAS DevOps + +Welcome to the **MAS DevOps** documentation! This Python library provides tools and utilities for managing Maximo Application Suite (MAS) deployments and operations. + +[![Code style: PEP8](https://img.shields.io/badge/code%20style-PEP--8-blue.svg)](https://peps.python.org/pep-0008/) +[![Flake8: checked](https://img.shields.io/badge/flake8-checked-blueviolet)](https://flake8.pycqa.org/en/latest/) +![GitHub Actions Workflow Status](https://img.shields.io/github/actions/workflow/status/ibm-mas/python-devops/python-release.yml) +[![PyPI - Version](https://img.shields.io/pypi/v/mas.devops)](https://pypi.org/project/mas-devops) +![PyPI - Python Version](https://img.shields.io/pypi/pyversions/mas.devops) +![PyPI - Downloads](https://img.shields.io/pypi/dm/mas.devops) + +## Overview + +The `mas-devops` package provides a comprehensive set of Python modules and CLI tools for: + +- **OpenShift/Kubernetes Operations**: Manage namespaces, resources, and deployments +- **Tekton Pipelines**: Install and manage OpenShift Pipelines for MAS automation +- **Operator Lifecycle Manager (OLM)**: Handle operator installations and subscriptions +- **MAS Suite Management**: Configure and manage MAS instances and applications +- **Database Operations**: Validate and configure DB2 instances +- **SaaS Operations**: Clean up jobs and manage SaaS-specific tasks +- **User Management**: Create and manage initial users for MAS +- **Notifications**: Send alerts and notifications via Slack + +## Quick Example + +```python +from openshift import dynamic +from kubernetes import config +from kubernetes.client import api_client + +from mas.devops.ocp import createNamespace +from mas.devops.tekton import installOpenShiftPipelines, updateTektonDefinitions, launchUpgradePipeline + +instanceId = "mymas" +pipelinesNamespace = f"mas-{instanceId}-pipelines" + +# Create an OpenShift client +dynClient = dynamic.DynamicClient( + api_client.ApiClient(configuration=config.load_kube_config()) +) + +# Install OpenShift Pipelines Operator +installOpenShiftPipelines(dynamicClient) + +# Create the pipelines namespace and install the MAS tekton definitions +createNamespace(dynamicClient, pipelinesNamespace) +updateTektonDefinitions(pipelinesNamespace, "/mascli/templates/ibm-mas-tekton.yaml") + +# Launch the upgrade pipeline and print the URL to view the pipeline run +pipelineURL = launchUpgradePipeline(self.dynamicClient, instanceId) +print(pipelineURL) +``` + +## Features + +### Core Modules + +- **OCP**: OpenShift/Kubernetes cluster operations +- **Tekton**: Pipeline management and execution +- **OLM**: Operator lifecycle management +- **Utils**: Common utilities and helper functions + +### MAS Modules + +- **Suite**: MAS core suite management +- **Apps**: MAS application configuration and deployment + +### Service Integrations + +- **DB2**: Database validation and configuration +- **SLS**: Suite License Service integration +- **AI Service**: AI/ML service management +- **Slack**: Notification and alerting + +### CLI Tools + +The package includes several command-line tools: + +- `mas-devops-db2-validate-config`: Validate DB2 configurations +- `mas-devops-create-initial-users-for-saas`: Create initial users for SaaS deployments +- `mas-devops-saas-job-cleaner`: Clean up completed jobs in SaaS environments +- `mas-devops-notify-slack`: Send notifications to Slack channels + +## Getting Started + +Check out the [Installation Guide](getting-started/installation.md) to get started, or jump straight to the [Quick Start](getting-started/quickstart.md) guide. + +## API Reference + +Browse the complete [API Reference](api/index.md) for detailed documentation of all modules, classes, and functions. + +## Contributing + +We welcome contributions! Please see our [Contributing Guide](contributing.md) for details on how to get involved. + +## License + +This project is licensed under the Eclipse Public License v1.0. See the [License](license.md) page for details. \ No newline at end of file diff --git a/docs/license.md b/docs/license.md new file mode 100644 index 00000000..ab674e98 --- /dev/null +++ b/docs/license.md @@ -0,0 +1,122 @@ +# License + +## Eclipse Public License - v1.0 + +Copyright (c) 2024, 2025 IBM Corporation and other Contributors. + +All rights reserved. This program and the accompanying materials are made available under the terms of the Eclipse Public License v1.0 which accompanies this distribution, and is available at: + +**http://www.eclipse.org/legal/epl-v10.html** + +## Summary + +The Eclipse Public License (EPL) is a permissive open source license that allows you to: + +- **Use** the software for any purpose +- **Modify** the software +- **Distribute** the software +- **Sublicense** the software under certain conditions + +### Key Points + +- You can use this software in commercial applications +- You must include the license and copyright notice +- Modified versions must be clearly marked as such +- You must make source code available for modifications you distribute +- The license includes patent grants from contributors + +## Full License Text + +THE ACCOMPANYING PROGRAM IS PROVIDED UNDER THE TERMS OF THIS ECLIPSE PUBLIC LICENSE ("AGREEMENT"). ANY USE, REPRODUCTION OR DISTRIBUTION OF THE PROGRAM CONSTITUTES RECIPIENT'S ACCEPTANCE OF THIS AGREEMENT. + +### 1. DEFINITIONS + +"Contribution" means: + +a) in the case of the initial Contributor, the initial code and documentation distributed under this Agreement, and + +b) in the case of each subsequent Contributor: + - i) changes to the Program, and + - ii) additions to the Program; + +where such changes and/or additions to the Program originate from and are distributed by that particular Contributor. A Contribution 'originates' from a Contributor if it was added to the Program by such Contributor itself or anyone acting on such Contributor's behalf. + +"Contributor" means any person or entity that distributes the Program. + +"Licensed Patents" mean patent claims licensable by a Contributor which are necessarily infringed by the use or sale of its Contribution alone or when combined with the Program. + +"Program" means the Contributions distributed in accordance with this Agreement. + +"Recipient" means anyone who receives the Program under this Agreement, including all Contributors. + +### 2. GRANT OF RIGHTS + +a) Subject to the terms of this Agreement, each Contributor hereby grants Recipient a non-exclusive, worldwide, royalty-free copyright license to reproduce, prepare derivative works of, publicly display, publicly perform, distribute and sublicense the Contribution of such Contributor, if any, and such derivative works, in source code and object code form. + +b) Subject to the terms of this Agreement, each Contributor hereby grants Recipient a non-exclusive, worldwide, royalty-free patent license under Licensed Patents to make, use, sell, offer to sell, import and otherwise transfer the Contribution of such Contributor, if any, in source code and object code form. + +### 3. REQUIREMENTS + +A Contributor may choose to distribute the Program in object code form under its own license agreement, provided that: + +a) it complies with the terms and conditions of this Agreement; and + +b) its license agreement: + - i) effectively disclaims on behalf of all Contributors all warranties and conditions, express and implied, including warranties or conditions of title and non-infringement, and implied warranties or conditions of merchantability and fitness for a particular purpose; + - ii) effectively excludes on behalf of all Contributors all liability for damages, including direct, indirect, special, incidental and consequential damages, such as lost profits; + - iii) states that any provisions which differ from this Agreement are offered by that Contributor alone and not by any other party; and + - iv) states that source code for the Program is available from such Contributor, and informs licensees how to obtain it in a reasonable manner on or through a medium customarily used for software exchange. + +### 4. COMMERCIAL DISTRIBUTION + +Commercial distributors of software may accept certain responsibilities with respect to end users, business partners and the like. While this license is intended to facilitate the commercial use of the Program, the Contributor who includes the Program in a commercial product offering should do so in a manner which does not create potential liability for other Contributors. + +### 5. NO WARRANTY + +EXCEPT AS EXPRESSLY SET FORTH IN THIS AGREEMENT, THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT, MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. + +### 6. DISCLAIMER OF LIABILITY + +EXCEPT AS EXPRESSLY SET FORTH IN THIS AGREEMENT, NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. + +### 7. GENERAL + +If any provision of this Agreement is invalid or unenforceable under applicable law, it shall not affect the validity or enforceability of the remainder of the terms of this Agreement, and without further action by the parties hereto, such provision shall be reformed to the minimum extent necessary to make such provision valid and enforceable. + +## Third-Party Dependencies + +This project uses several third-party libraries, each with their own licenses: + +- **pyyaml** - MIT License +- **openshift** - Apache Software License +- **kubernetes** - Apache Software License +- **kubeconfig** - BSD License +- **jinja2** - BSD License +- **jinja2-base64-filters** - MIT License +- **semver** - BSD License +- **boto3** - Apache Software License +- **slack_sdk** - MIT License + +Development dependencies: + +- **build** - MIT License +- **flake8** - MIT License +- **pytest** - MIT License +- **pytest-mock** - MIT License +- **requests-mock** - Apache Software License +- **setuptools** - MIT License + +Documentation dependencies: + +- **mkdocs** - BSD License +- **mkdocs-material** - MIT License +- **mkdocstrings** - ISC License +- **pymdown-extensions** - MIT License + +## Questions? + +If you have questions about the license, please: + +- Review the full license text at http://www.eclipse.org/legal/epl-v10.html +- Contact the project maintainers +- Visit the [GitHub repository](https://github.com/ibm-mas/python-devops) \ No newline at end of file diff --git a/mkdocs.yml b/mkdocs.yml new file mode 100644 index 00000000..adec8bab --- /dev/null +++ b/mkdocs.yml @@ -0,0 +1,116 @@ +site_name: MAS DevOps Documentation +site_description: Python library for Maximo Application Suite Dev/Ops +site_author: IBM Maximo Team +site_url: https://ibm-mas.github.io/python-devops/ + +repo_name: ibm-mas/python-devops +repo_url: https://github.com/ibm-mas/python-devops +edit_uri: edit/main/docs/ + +theme: + name: material + palette: + - media: "(prefers-color-scheme)" + toggle: + icon: material/link + name: Switch to light mode + - media: "(prefers-color-scheme: light)" + scheme: default + primary: indigo + accent: indigo + toggle: + icon: material/toggle-switch + name: Switch to dark mode + - media: "(prefers-color-scheme: dark)" + scheme: slate + primary: black + accent: indigo + toggle: + icon: material/toggle-switch-off + name: Switch to system preference + features: + - navigation.tabs + - navigation.sections + - navigation.top + - search.suggest + - search.highlight + - content.tabs.link + - content.code.annotation + - content.code.copy + language: en + +plugins: + - search + - mkdocstrings: + handlers: + python: + options: + docstring_style: google + show_source: true + show_root_heading: true + show_root_full_path: false + show_symbol_type_heading: true + show_symbol_type_toc: true + members_order: source + group_by_category: true + show_category_heading: true + show_if_no_docstring: false + inherited_members: false + filters: + - "!^_" + +markdown_extensions: + - pymdownx.highlight: + anchor_linenums: true + - pymdownx.inlinehilite + - pymdownx.snippets + - admonition + - pymdownx.arithmatex: + generic: true + - footnotes + - pymdownx.details + - pymdownx.superfences + - pymdownx.mark + - attr_list + - pymdownx.emoji: + emoji_index: !!python/name:material.extensions.emoji.twemoji + emoji_generator: !!python/name:material.extensions.emoji.to_svg + +nav: + - Home: index.md + - Getting Started: + - Installation: getting-started/installation.md + - Quick Start: getting-started/quickstart.md + - API Reference: + - Overview: api/index.md + - Core Modules: + - OCP: api/ocp.md + - Tekton: api/tekton.md + - OLM: api/olm.md + - Utils: api/utils.md + - MAS Modules: + - Suite: api/mas/suite.md + - Apps: api/mas/apps.md + - Services: + - DB2: api/db2.md + - SLS: api/sls.md + - AI Service: api/aiservice.md + - Slack: api/slack.md + - SaaS: + - Job Cleaner: api/saas/job_cleaner.md + - Users: api/users.md + - CLI Tools: + - Overview: cli/index.md + - DB2 Validate Config: cli/db2-validate-config.md + - Create Initial Users: cli/create-initial-users.md + - SaaS Job Cleaner: cli/saas-job-cleaner.md + - Notify Slack: cli/notify-slack.md + - Contributing: contributing.md + - License: license.md + +extra: + social: + - icon: fontawesome/brands/github + link: https://github.com/ibm-mas/python-devops + +# Made with Bob diff --git a/setup.py b/setup.py index 5a0281d0..e914b8e5 100644 --- a/setup.py +++ b/setup.py @@ -73,6 +73,12 @@ def get_version(rel_path): 'pytest-mock', # MIT License 'requests-mock', # Apache Software License 'setuptools', # MIT License + ], + 'docs': [ + 'mkdocs', # BSD License + 'mkdocs-material', # MIT License + 'mkdocstrings[python]', # ISC License + 'pymdown-extensions', # MIT License ] }, classifiers=[ diff --git a/src/mas/devops/aiservice.py b/src/mas/devops/aiservice.py index 26a37a09..4580c52a 100644 --- a/src/mas/devops/aiservice.py +++ b/src/mas/devops/aiservice.py @@ -31,13 +31,6 @@ def listAiServiceInstances(dynClient: DynamicClient) -> list: Returns: list: A list of dictionaries representing AI Service instances. Returns an empty list if no instances are found or if errors occur. - - Example: - >>> from openshift.dynamic import DynamicClient - >>> client = DynamicClient(...) - >>> instances = listAiServiceInstances(client) - >>> for instance in instances: - ... print(f"Instance: {instance['metadata']['name']}") """ return listInstances(dynClient, "aiservice.ibm.com/v1", "AIServiceApp") @@ -58,13 +51,6 @@ def verifyAiServiceInstance(dynClient: DynamicClient, instanceId: str) -> bool: bool: True if the instance exists and is accessible, False otherwise. Returns False if the instance is not found, the CRD doesn't exist, or authorization fails. - - Example: - >>> from openshift.dynamic import DynamicClient - >>> client = DynamicClient(...) - >>> exists = verifyAiServiceInstance(client, "aiservice-inst1") - >>> if exists: - ... print("AI Service instance found") """ try: aiserviceAPI = dynClient.resources.get(api_version="aiservice.ibm.com/v1", kind="AIServiceApp") @@ -95,13 +81,6 @@ def listAiServiceTenantInstances(dynClient: DynamicClient) -> list: Returns: list: A list of dictionaries representing AI Service Tenant instances. Returns an empty list if no tenant instances are found or if errors occur. - - Example: - >>> from openshift.dynamic import DynamicClient - >>> client = DynamicClient(...) - >>> tenants = listAiServiceTenantInstances(client) - >>> for tenant in tenants: - ... print(f"Tenant: {tenant['metadata']['name']}") """ return listInstances(dynClient, "aiservice.ibm.com/v1", "AIServiceTenant") @@ -123,13 +102,6 @@ def verifyAiServiceTenantInstance(dynClient: DynamicClient, instanceId: str, ten bool: True if the tenant exists and is accessible, False otherwise. Returns False if the tenant is not found, the CRD doesn't exist, or authorization fails. - - Example: - >>> from openshift.dynamic import DynamicClient - >>> client = DynamicClient(...) - >>> exists = verifyAiServiceTenantInstance(client, "aiservice-inst1", "tenant1") - >>> if exists: - ... print("AI Service Tenant found") """ try: aiserviceTenantAPI = dynClient.resources.get(api_version="aiservice.ibm.com/v1", kind="AIServiceTenant") @@ -161,13 +133,6 @@ def getAiserviceChannel(dynClient: DynamicClient, instanceId: str) -> str | None Returns: str: The channel name (e.g., "v1.0", "stable") if the subscription exists, None if the subscription is not found. - - Example: - >>> from openshift.dynamic import DynamicClient - >>> client = DynamicClient(...) - >>> channel = getAiserviceChannel(client, "aiservice-inst1") - >>> if channel: - ... print(f"AI Service is on channel: {channel}") """ aiserviceSubscription = getSubscription(dynClient, f"aiservice-{instanceId}", "ibm-aiservice") if aiserviceSubscription is None: diff --git a/src/mas/devops/data/__init__.py b/src/mas/devops/data/__init__.py index 36f4ef2f..c4f4ce43 100644 --- a/src/mas/devops/data/__init__.py +++ b/src/mas/devops/data/__init__.py @@ -33,11 +33,6 @@ def getCatalog(name: str) -> dict | None: Returns: dict: The catalog definition dictionary containing operator versions and metadata. Returns None if the catalog file doesn't exist. - - Example: - >>> catalog = getCatalog("v9-241205-amd64") - >>> if catalog: - ... print(f"Catalog version: {catalog.get('version')}") """ moduleFile = path.abspath(__file__) modulePath = path.dirname(moduleFile) @@ -65,12 +60,6 @@ def listCatalogTags(arch="amd64") -> list: Returns: list: Sorted list of catalog tag strings (e.g., ["v8-240528-amd64", "v9-241205-amd64"]). Returns empty list if no catalogs are found for the architecture. - - Example: - >>> tags = listCatalogTags("amd64") - >>> print(f"Available catalogs: {len(tags)}") - >>> for tag in tags[-3:]: # Show last 3 - ... print(tag) """ moduleFile = path.abspath(__file__) modulePath = path.dirname(moduleFile) @@ -95,12 +84,6 @@ def getNewestCatalogTag(arch="amd64") -> str | None: Returns: str: The newest catalog tag (e.g., "v9-241205-amd64"). Returns None if no catalogs are found for the architecture. - - Example: - >>> newest = getNewestCatalogTag("amd64") - >>> if newest: - ... print(f"Latest catalog: {newest}") - ... catalog = getCatalog(newest) """ catalogs = listCatalogTags(arch) if len(catalogs) == 0: diff --git a/src/mas/devops/mas/apps.py b/src/mas/devops/mas/apps.py index 307d17bc..8483ea7d 100644 --- a/src/mas/devops/mas/apps.py +++ b/src/mas/devops/mas/apps.py @@ -70,12 +70,6 @@ def getAppResource(dynClient: DynamicClient, instanceId: str, applicationId: str Returns: ResourceInstance: The custom resource object if found, None otherwise. Returns None if the resource doesn't exist, CRD is missing, or authorization fails. - - Example: - >>> # Get application CR - >>> app = getAppResource(client, "inst1", "manage") - >>> # Get workspace CR - >>> workspace = getAppResource(client, "inst1", "manage", "masdev") """ apiVersion = APP_API_VERSIONS[applicationId] if applicationId in APP_API_VERSIONS else "apps.mas.ibm.com/v1" @@ -110,10 +104,6 @@ def verifyAppInstance(dynClient: DynamicClient, instanceId: str, applicationId: Returns: bool: True if the application instance exists, False otherwise. - - Example: - >>> if verifyAppInstance(client, "inst1", "manage"): - ... print("Manage application is installed") """ return getAppResource(dynClient, instanceId, applicationId) is not None @@ -147,14 +137,6 @@ def waitForAppReady( Returns: bool: True if the resource reaches ready state within the retry limit, False otherwise. - - Example: - >>> # Wait for Manage application to be ready - >>> if waitForAppReady(client, "inst1", "manage", retries=50, delay=300): - ... print("Manage is ready") - >>> # Wait for Manage workspace to be ready - >>> if waitForAppReady(client, "inst1", "manage", "masdev", retries=50): - ... print("Manage workspace is ready") """ resourceName = f"{APP_KINDS[applicationId]}/{instanceId}" @@ -219,13 +201,6 @@ def getAppsSubscriptionChannel(dynClient: DynamicClient, instanceId: str) -> lis Returns: list: List of dictionaries with 'appId' and 'channel' keys for each installed app. Returns empty list if no apps are found or if errors occur. - - Example: - >>> apps = getAppsSubscriptionChannel(client, "inst1") - >>> for app in apps: - ... print(f"{app['appId']}: {app['channel']}") - manage: 8.7.x - iot: 8.8.x """ try: installedApps = [] diff --git a/src/mas/devops/mas/suite.py b/src/mas/devops/mas/suite.py index 64ee66a5..df91c537 100644 --- a/src/mas/devops/mas/suite.py +++ b/src/mas/devops/mas/suite.py @@ -39,10 +39,6 @@ def isAirgapInstall(dynClient: DynamicClient, checkICSP: bool = False) -> bool: Returns: bool: True if air-gap configuration is detected, False otherwise. - - Example: - >>> if isAirgapInstall(client): - ... print("Air-gapped installation detected") """ if checkICSP: try: @@ -76,12 +72,6 @@ def getDefaultStorageClasses(dynClient: DynamicClient) -> dict: - rwo (str): Storage class name for RWO volumes - rwx (str): Storage class name for RWX volumes All attributes are None if no recognized provider is found. - - Example: - >>> storage = getDefaultStorageClasses(client) - >>> if storage.provider: - ... print(f"Provider: {storage.providerName}") - ... print(f"RWO: {storage.rwo}, RWX: {storage.rwx}") """ result = SimpleNamespace( provider=None, @@ -156,12 +146,6 @@ def getCurrentCatalog(dynClient: DynamicClient) -> dict: - image (str): Catalog image reference - catalogId (str): Parsed catalog identifier (e.g., "v9-241205-amd64") Returns None if the catalog is not found. - - Example: - >>> catalog = getCurrentCatalog(client) - >>> if catalog: - ... print(f"Catalog: {catalog['catalogId']}") - ... print(f"Image: {catalog['image']}") """ catalogsAPI = dynClient.resources.get(api_version="operators.coreos.com/v1alpha1", kind="CatalogSource") try: @@ -201,11 +185,6 @@ def listMasInstances(dynClient: DynamicClient) -> list: Returns: list: A list of dictionaries representing MAS Suite instances. Returns an empty list if no instances are found or if errors occur. - - Example: - >>> instances = listMasInstances(client) - >>> for instance in instances: - ... print(f"MAS Instance: {instance['metadata']['name']}") """ return listInstances(dynClient, "core.mas.ibm.com/v1", "Suite") @@ -223,11 +202,6 @@ def getWorkspaceId(dynClient: DynamicClient, instanceId: str) -> str: Returns: str: The workspace ID if found, None if no workspaces exist for the instance. - - Example: - >>> workspace_id = getWorkspaceId(client, "inst1") - >>> if workspace_id: - ... print(f"Workspace ID: {workspace_id}") """ workspaceId = None workspacesAPI = dynClient.resources.get(api_version="core.mas.ibm.com/v1", kind="Workspace") @@ -251,10 +225,6 @@ def verifyMasInstance(dynClient: DynamicClient, instanceId: str) -> bool: bool: True if the instance exists and is accessible, False otherwise. Returns False if the instance is not found, the CRD doesn't exist, or authorization fails. - - Example: - >>> if verifyMasInstance(client, "inst1"): - ... print("MAS instance found") """ try: suitesAPI = dynClient.resources.get(api_version="core.mas.ibm.com/v1", kind="Suite") @@ -284,11 +254,6 @@ def getMasChannel(dynClient: DynamicClient, instanceId: str) -> str: Returns: str: The channel name (e.g., "8.11.x", "9.0.x") if the subscription exists, None if the subscription is not found. - - Example: - >>> channel = getMasChannel(client, "inst1") - >>> if channel: - ... print(f"MAS is on channel: {channel}") """ masSubscription = getSubscription(dynClient, f"mas-{instanceId}-core", "ibm-mas") if masSubscription is None: @@ -315,15 +280,6 @@ def updateIBMEntitlementKey(dynClient: DynamicClient, namespace: str, icrUsernam Returns: ResourceInstance: The created or updated Secret resource. - - Example: - >>> secret = updateIBMEntitlementKey( - ... client, - ... "mas-inst1-core", - ... "cp", - ... "your-entitlement-key" - ... ) - >>> print(f"Secret {secret.metadata.name} updated") """ if secretName is None: secretName = "ibm-entitlement" diff --git a/src/mas/devops/olm.py b/src/mas/devops/olm.py index 5b0967b2..d9351031 100644 --- a/src/mas/devops/olm.py +++ b/src/mas/devops/olm.py @@ -140,9 +140,6 @@ def applySubscription(dynClient: DynamicClient, namespace: str, packageName: str Raises: OLMException: If the package is not available in any catalog NotFoundError: If resources cannot be created - - Example: - applySubscription(dynClient, "my-namespace", "ibm-sls") # use defaults """ if catalogSourceNamespace is None: catalogSourceNamespace = "openshift-marketplace" diff --git a/src/mas/devops/saas/job_cleaner.py b/src/mas/devops/saas/job_cleaner.py index 1e56aaa6..ff6c3833 100644 --- a/src/mas/devops/saas/job_cleaner.py +++ b/src/mas/devops/saas/job_cleaner.py @@ -33,13 +33,6 @@ class JobCleaner: k8s_client (client.api_client.ApiClient): Kubernetes API client. batch_v1_api (client.BatchV1Api): Kubernetes Batch V1 API interface. logger (logging.Logger): Logger instance for this class. - - Example: - >>> from kubernetes import client, config - >>> config.load_kube_config() - >>> k8s_client = client.ApiClient() - >>> cleaner = JobCleaner(k8s_client) - >>> cleaner.cleanup_jobs("argocd.argoproj.io/instance", limit=100, dry_run=False) """ def __init__(self, k8s_client: client.api_client.ApiClient): @@ -143,13 +136,6 @@ def cleanup_jobs(self, label: str, limit: int, dry_run: bool): - Deletion uses "Foreground" propagation policy - The process is eventually consistent; race conditions are handled gracefully - Progress is logged for each cleanup group - - Example: - >>> cleaner.cleanup_jobs("argocd.argoproj.io/instance", limit=100, dry_run=True) - Found 5 unique (namespace, cleanup group ID) pairs, processing ... - 0) my-app-sync mas-inst1-core - SKIP my-app-sync-abc123 2024-01-15 10:30:00 - PURGE my-app-sync-xyz789 2024-01-14 09:20:00 SUCCESS """ dry_run_param = None if dry_run: diff --git a/src/mas/devops/sls.py b/src/mas/devops/sls.py index 119d436d..4927ed9a 100644 --- a/src/mas/devops/sls.py +++ b/src/mas/devops/sls.py @@ -33,12 +33,6 @@ def listSLSInstances(dynClient: DynamicClient) -> list: Raises: No exceptions are raised; all errors are caught and logged internally. - - Example: - >>> from openshift.dynamic import DynamicClient - >>> client = DynamicClient(...) - >>> instances = listSLSInstances(client) - >>> print(f"Found {len(instances)} SLS instances") """ try: slsAPI = dynClient.resources.get(api_version="sls.ibm.com/v1", kind="LicenseService") @@ -72,14 +66,6 @@ def findSLSByNamespace(namespace: str, instances: list = None, dynClient: Dynami Returns: bool: True if an SLS instance is found in the specified namespace, False otherwise. Also returns False if neither instances nor dynClient is provided. - - Example: - >>> # Using pre-fetched instances - >>> instances = listSLSInstances(client) - >>> exists = findSLSByNamespace("ibm-sls", instances=instances) - >>> - >>> # Using dynamic client - >>> exists = findSLSByNamespace("ibm-sls", dynClient=client) """ if not instances and not dynClient: return False diff --git a/src/mas/devops/users.py b/src/mas/devops/users.py index a6019099..a4c6f997 100644 --- a/src/mas/devops/users.py +++ b/src/mas/devops/users.py @@ -39,13 +39,6 @@ class MASUserUtils(): mas_workspace_id (str): The workspace identifier within the MAS instance. mas_core_namespace (str): Kubernetes namespace for MAS core components. manage_namespace (str): Kubernetes namespace for Manage application. - - Example: - >>> from kubernetes import client, config - >>> config.load_kube_config() - >>> k8s_client = client.ApiClient() - >>> mas_utils = MASUserUtils("inst1", "masdev", k8s_client) - >>> user = mas_utils.get_user("user@example.com") """ MAXADMIN = "MAXADMIN" @@ -208,11 +201,6 @@ def get_user(self, user_id): Raises: Exception: If the API returns an unexpected status code. - - Example: - >>> user = mas_utils.get_user("user@example.com") - >>> if user: - ... print(f"User: {user['displayName']}") """ self.logger.debug(f"Getting user {user_id}") url = f"{self.mas_api_url_internal}/v3/users/{user_id}" @@ -251,34 +239,6 @@ def get_or_create_user(self, payload): Raises: Exception: If user creation fails with an unexpected status code. - - Example: - >>> user_payload = { - ... "id": "user@example.com", - ... "status": {"active": True}, - ... "username": "user@example.com", - ... "owner": "local", - ... "emails": [{ - ... "value": "user@example.com", - ... "type": "Work", - ... "primary": True - ... }], - ... "displayName": "John Doe", - ... "issuer": "local", - ... "permissions": { - ... "systemAdmin": False, - ... "userAdmin": True, - ... "apikeyAdmin": False - ... }, - ... "entitlement": { - ... "application": "PREMIUM", - ... "admin": "ADMIN_BASE", - ... "alwaysReserveLicense": True - ... }, - ... "givenName": "John", - ... "familyName": "Doe" - ... } - >>> user = mas_utils.get_or_create_user(user_payload) """ existing_user = self.get_user(payload["id"]) @@ -324,10 +284,6 @@ def update_user(self, payload): Raises: Exception: If the update fails or user doesn't exist. - - Example: - >>> updated_payload = {"id": "user@example.com", "displayName": "Jane Doe"} - >>> user = mas_utils.update_user(updated_payload) """ user_id = payload["id"] self.logger.debug(f"Updating user {user_id}") @@ -364,9 +320,6 @@ def update_user_display_name(self, user_id, display_name): Raises: Exception: If the update fails or user doesn't exist. - - Example: - >>> user = mas_utils.update_user_display_name("user@example.com", "Jane Smith") """ self.logger.debug(f"Updating user display name {user_id} to {display_name}") url = f"{self.mas_api_url_internal}/v3/users/{user_id}" @@ -410,9 +363,6 @@ def link_user_to_local_idp(self, user_id, email_password=False): Note: The API response contains a generated user token which is intentionally not logged or returned for security reasons. - - Example: - >>> mas_utils.link_user_to_local_idp("user@example.com", email_password=True) """ # For the sake of idempotency, check if the user already has a local identity @@ -462,11 +412,6 @@ def get_user_workspaces(self, user_id): Raises: Exception: If the user doesn't exist (404) or the API call fails. - - Example: - >>> workspaces = mas_utils.get_user_workspaces("user@example.com") - >>> for ws in workspaces: - ... print(f"Workspace: {ws['id']}") """ self.logger.debug(f"Getting workspaces for user {user_id}") url = f"{self.mas_api_url_internal}/v3/users/{user_id}/workspaces" @@ -505,9 +450,6 @@ def add_user_to_workspace(self, user_id, is_workspace_admin=False): Raises: Exception: If the operation fails. - - Example: - >>> mas_utils.add_user_to_workspace("user@example.com", is_workspace_admin=True) """ workspaces = self.get_user_workspaces(user_id) for workspace in workspaces: @@ -553,11 +495,6 @@ def get_user_application_permissions(self, user_id, application_id): Raises: Exception: If the API call fails with an unexpected status code. - - Example: - >>> perms = mas_utils.get_user_application_permissions("user@example.com", "manage") - >>> if perms: - ... print(f"Role: {perms.get('role')}") """ self.logger.debug(f"Getting user {user_id} permissions for application {application_id}") url = f"{self.mas_api_url_internal}/workspaces/{self.mas_workspace_id}/applications/{application_id}/users/{user_id}" @@ -596,9 +533,6 @@ def set_user_application_permission(self, user_id, application_id, role): Raises: Exception: If the operation fails. - - Example: - >>> mas_utils.set_user_application_permission("user@example.com", "manage", "ADMIN") """ existing_permissions = self.get_user_application_permissions(user_id, application_id) @@ -649,9 +583,6 @@ def check_user_sync(self, user_id, application_id, timeout_secs=60 * 10, retry_i Raises: Exception: If sync doesn't complete within the timeout period. - - Example: - >>> mas_utils.check_user_sync("user@example.com", "manage", timeout_secs=300) """ t_end = time.time() + timeout_secs self.logger.info(f"Awaiting user {user_id} sync status \"SUCCESS\" for app {application_id}: {t_end - time.time():.2f} seconds remaining") @@ -692,9 +623,6 @@ def resync_users(self, user_ids): Note: The "/v3/users/utils/resync" API is only available in MAS Core >= 9.1. This implementation uses a no-op profile update for backward compatibility. - - Example: - >>> mas_utils.resync_users(["user1@example.com", "user2@example.com"]) """ self.logger.info(f"Issuing resync request(s) for user(s) {user_ids}") @@ -726,10 +654,6 @@ def create_or_get_manage_api_key_for_user(self, user_id, temporary=False): Raises: Exception: If API key creation/retrieval fails or if the key is unexpectedly not found. - - Example: - >>> api_key = mas_utils.create_or_get_manage_api_key_for_user("MAXADMIN", temporary=True) - >>> print(f"API Key: {api_key['apikey']}") """ self.logger.debug(f"Attempting to create Manage API Key for user {user_id}") url = f"{self.manage_api_url_internal}/maximo/api/os/mxapiapikey" @@ -800,11 +724,6 @@ def get_manage_api_key_for_user(self, user_id): Raises: Exception: If the API call fails. - - Example: - >>> api_key = mas_utils.get_manage_api_key_for_user("user@example.com") - >>> if api_key: - ... print(f"Key expires: {api_key.get('expiration')}") """ self.logger.debug(f"Getting Manage API Key for user {user_id}") url = f"{self.manage_api_url_internal}/maximo/api/os/mxapiapikey" @@ -848,11 +767,6 @@ def delete_manage_api_key(self, manage_api_key): Raises: Exception: If deletion fails (except for 404 which is treated as success). - - Example: - >>> api_key = mas_utils.get_manage_api_key_for_user("user@example.com") - >>> if api_key: - ... mas_utils.delete_manage_api_key(api_key) """ self.logger.info(f"Deleting Manage API Key for user {manage_api_key['userid']}") @@ -895,10 +809,6 @@ def get_manage_group_id(self, group_name, manage_api_key): Raises: Exception: If the API call fails. - - Example: - >>> api_key = mas_utils.create_or_get_manage_api_key_for_user("MAXADMIN") - >>> group_id = mas_utils.get_manage_group_id("MAXADMIN", api_key) """ self.logger.debug(f"Getting ID for Manage group with name {group_name}") url = f"{self.manage_api_url_internal}/maximo/api/os/mxapigroup" @@ -942,10 +852,6 @@ def is_user_in_manage_group(self, group_name, user_id, manage_api_key): Raises: Exception: If the group doesn't exist or the API call fails. - - Example: - >>> api_key = mas_utils.create_or_get_manage_api_key_for_user("MAXADMIN") - >>> is_member = mas_utils.is_user_in_manage_group("MAXADMIN", "user@example.com", api_key) """ self.logger.debug(f"Checking if {user_id} is a member of Manage group with name {group_name}") @@ -994,10 +900,6 @@ def add_user_to_manage_group(self, user_id, group_name, manage_api_key): Raises: Exception: If the operation fails. - - Example: - >>> api_key = mas_utils.create_or_get_manage_api_key_for_user("MAXADMIN") - >>> mas_utils.add_user_to_manage_group("user@example.com", "MAXADMIN", api_key) """ if self.is_user_in_manage_group(group_name, user_id, manage_api_key): @@ -1047,11 +949,6 @@ def get_mas_applications_in_workspace(self): Raises: Exception: If the API call fails. - - Example: - >>> apps = mas_utils.get_mas_applications_in_workspace() - >>> for app in apps: - ... print(f"App: {app['id']}") """ self.logger.debug(f"Getting MAS Applications in workspace {self.mas_workspace_id}") url = f"{self.mas_api_url_internal}/workspaces/{self.mas_workspace_id}/applications" @@ -1080,10 +977,6 @@ def get_mas_application_availability(self, mas_application_id): Raises: Exception: If the API call fails. - - Example: - >>> app_status = mas_utils.get_mas_application_availability("manage") - >>> print(f"Ready: {app_status['ready']}, Available: {app_status['available']}") """ self.logger.debug(f"Getting availability of MAS Application {mas_application_id} in workspace {self.mas_workspace_id}") url = f"{self.mas_api_url_internal}/workspaces/{self.mas_workspace_id}/applications/{mas_application_id}" @@ -1116,9 +1009,6 @@ def await_mas_application_availability(self, mas_application_id, timeout_secs=60 Raises: Exception: If the application doesn't become available within the timeout period. - - Example: - >>> mas_utils.await_mas_application_availability("manage", timeout_secs=300) """ t_end = time.time() + timeout_secs self.logger.info(f"Waiting for {mas_application_id} to become ready and available: {t_end - time.time():.2f} seconds remaining") @@ -1148,14 +1038,6 @@ def parse_initial_users_from_aws_secret_json(self, secret_json): Raises: Exception: If CSV format is invalid or user_type is not "primary" or "secondary". - - Example: - >>> secret = { - ... "admin@example.com": "primary,John,Doe", - ... "user@example.com": "secondary,Jane,Smith,jsmith" - ... } - >>> users = mas_utils.parse_initial_users_from_aws_secret_json(secret) - >>> print(len(users['users']['primary'])) # 1 """ primary = [] secondary = [] @@ -1220,20 +1102,6 @@ def create_initial_users_for_saas(self, initial_users): Raises: Exception: If input validation fails. - - Example: - >>> initial_users = { - ... "users": { - ... "primary": [ - ... {"email": "admin@example.com", "given_name": "John", "family_name": "Doe"} - ... ], - ... "secondary": [ - ... {"email": "user@example.com", "given_name": "Jane", "family_name": "Smith"} - ... ] - ... } - ... } - >>> result = mas_utils.create_initial_users_for_saas(initial_users) - >>> print(f"Completed: {len(result['completed'])}, Failed: {len(result['failed'])}") """ # Validate input @@ -1331,14 +1199,6 @@ def create_initial_user_for_saas(self, user, user_type): - Regular workspace access - USER role for most apps, MANAGEUSER for Manage - No security group memberships - - Example: - >>> user = { - ... "email": "admin@example.com", - ... "given_name": "John", - ... "family_name": "Doe" - ... } - >>> mas_utils.create_initial_user_for_saas(user, "PRIMARY") """ if "email" not in user: raise Exception("'email' not found in at least one of the user defs") diff --git a/src/mas/devops/utils.py b/src/mas/devops/utils.py index ae745e87..6ac821e4 100644 --- a/src/mas/devops/utils.py +++ b/src/mas/devops/utils.py @@ -29,16 +29,6 @@ def isVersionBefore(_compare_to_version, _current_version): Note: This differs from strict semantic versioning where pre-release versions are considered less than their base version. - - Example: - >>> isVersionBefore("8.6.0", "8.5.0") - False - >>> isVersionBefore("8.6.0", "8.7.0") - True - >>> isVersionBefore("8.6.0", "8.6.0-pre.m1dev86") - False - >>> isVersionBefore("8.6.0", "8.6.x") - False """ if _current_version is None: print("Version is not informed. Returning False") @@ -73,18 +63,6 @@ def isVersionEqualOrAfter(_compare_to_version, _current_version): Note: This differs from strict semantic versioning where pre-release versions are considered less than their base version. - - Example: - >>> isVersionEqualOrAfter("8.6.0", "8.7.0") - True - >>> isVersionEqualOrAfter("8.6.0", "8.5.0") - False - >>> isVersionEqualOrAfter("8.6.0", "8.6.0") - True - >>> isVersionEqualOrAfter("8.6.0", "8.6.0-pre.m1dev86") - True - >>> isVersionEqualOrAfter("8.6.0", "8.6.x") - True """ if _current_version is None: print("Version is not informed. Returning False")