diff --git a/mpsd-software-environment.py b/mpsd-software-environment.py index 6ef8e4f8c784243508c8b5c8f632a5586ce02ce4..d350d0a3047eb64d6021a678f2e4ec07dd3c78f0 100755 --- a/mpsd-software-environment.py +++ b/mpsd-software-environment.py @@ -31,19 +31,75 @@ for given system architecture and MPSD software stack version.\n The toolchains are built using the bash script spack_setup.sh, and the results are logged. """ +call_date_iso = ( + datetime.datetime.now().replace(microsecond=0).isoformat().replace(":", "-") +) config_vars = { - "cmd_log_file": "install.log", - "build_log_file": ( - "logs/mpsd_spack_ver_toolchains_" - f"{datetime.datetime.now().replace(microsecond=0).isoformat()}.log" - ), + # kept inside the mpsd_release folder + "cmd_log_file": "script_execution_summary.log", + # Metadata tags "metadata_tag_open": "!<meta>", "metadata_tag_close": "</meta>!", - # TODO: modify toolchains,mpsd_spack_ver when the variable is available "spack_environments_repo": "https://gitlab.gwdg.de/mpsd-cs/spack-environments.git", } +def create_log_file_names( + mpsd_release: str, + microarch: str, + action: str, + date: str = call_date_iso, + toolchain: str = None, +) -> Union[str, None]: + """Create log file names. + + This function creates the log file names for either the installer or + the build log files. + + If a toolchain is given, then the build log file name is created. + if no toolchain is given, then the installer log file name is created. + The installer log file hosts the logs of the installer script, while + the build log file hosts the logs of the build process as generated by the + spack_setup.sh script. + + Parameters + ---------- + mpsd_release : str + MPSD software stack version + microarch : str + system architecture + date : str + date of the call ins iso format + action : str + action performed (install,remove,reinstall,prepare,status) + only install and remove are valid for build log file. + toolchain : str + toolchain name (only for build log file) + + Returns + ------- + str or None + log file name + installer_log_file_name or build_log_file_name depending on the + parameters given. + If the action is not one that changes the files on disk ( info only actions) + then None is returned. + """ + if toolchain: + # if toolchain is given, then we build the build_log_file_name + if action in ["install", "remove"]: + log_file_name = ( + f"{mpsd_release}_{microarch}_{date}_BUILD_{toolchain}_{action}.log" + ) + else: + return None + else: + # if toolchain is not given, then we build the installer_log_file_name + log_file_name = f"{mpsd_release}_{microarch}_{date}_APEX_{action}.log" + + return log_file_name + + def log_metadata(key: str, value: str) -> None: """Log metadata to the log file. @@ -91,7 +147,29 @@ def read_metadata_from_logfile(logfile: Union[str, Path]) -> dict: } -def set_up_logging(loglevel="warning", filename=None): +def get_installer_log_file_path(mpsd_release: str, cmd: str, root_dir: str) -> str: + """Get installer log file path.""" + # Get machine configs + os.environ.get("MPSD_OS", "UNKNOWN_OS") + microarch = get_native_microarchitecture() + # parse logging first + # decide the log_file_name + installer_log_name = create_log_file_names( + mpsd_release=mpsd_release, microarch=microarch, action=cmd + ) + log_folder = root_dir / mpsd_release / "logs" + # if the log_folder dosent exist, dont log this message if + # the command is a info-only command + if cmd not in ["status", "available"]: + if not os.path.exists(log_folder): + os.makedirs(log_folder) + installer_log_file = log_folder / installer_log_name + else: + installer_log_file = None + return installer_log_file + + +def set_up_logging(loglevel="warning", file_path=None): """Set up logging. This function sets up the logging configuration for the script. @@ -106,7 +184,7 @@ def set_up_logging(loglevel="warning", filename=None): - warning (default): only print statements if something is unexpected - info (show more detailed progress) - debug (show very detailed output) - filename : str + file_path : str - filename to save logging messages into If loglevel is 'debug', save line numbers in log messages. @@ -188,11 +266,16 @@ def set_up_logging(loglevel="warning", filename=None): shell_handler.setFormatter(shell_formatter) # use the log_level_numeric to decide how much logging is sent to shell shell_handler.setLevel(log_level_numeric) - logger.addHandler(shell_handler) + + # Here we set the handlers of the RootLogger to be just the one we want. + # The reason is that the logging module will add a <StreamHandler <stderr> + # (NOTSET)> handler if logging.info/logging.debug/... is used before we + # come across this line. And we do not want that additional handler. + logger.handlers = [shell_handler] # if filename provided, write log messages to that file, too. - if filename: - file_handler = logging.FileHandler(filename) + if file_path: + file_handler = logging.FileHandler(file_path) # if we have a file, we write all information in there. # We could change the level, for example restrict to only DEBUG and above with # file_handler.setLevel(logging.DEBUG) @@ -216,9 +299,9 @@ def set_up_logging(loglevel="warning", filename=None): print_log.addHandler(ch) # if filename provided, write output of print_log to that file, too - if filename: + if file_path: # create, format and add file handler - fh = logging.FileHandler(filename) + fh = logging.FileHandler(file_path) fh.setFormatter(formatter) print_log.addHandler(fh) @@ -226,8 +309,8 @@ def set_up_logging(loglevel="warning", filename=None): # short message # logging.debug( - f"Logging has been setup, loglevel={loglevel.upper()}" - + f"{filename=} {rich_available=}" + f"Logging has been setup, loglevel={loglevel.upper()} " + + f"{file_path=} {rich_available=}" ) @@ -327,20 +410,21 @@ def run(*args, counter=[0], **kwargs): logging.debug( f"{token} Starting subprocess.run('{command}') with options {options}" ) - logging.debug(f"{token} getcwd={os.getcwd()}") - logging.debug(f"{token} exact call: subprocess.run({arg})") + logging.debug(f"""{token} getcwd={os.getcwd()}""") + logging.debug(f"""{token} subprocess.run("{arg}")""") time_start = time.time() process = subprocess.run(*args, **kwargs) execution_time = time.time() - time_start + logging.debug(f"{token} {process=}") logging.debug(f"{token} Completed in {execution_time:.4f}s.") logging.debug(f"{token}") # near-empty line to make reading logs easier return process -def setup_log_cmd( - mpsd_release: str, script_dir: str, msg: str = None, **kwargs +def record_script_execution_summary( + mpsd_release: str, root_dir: str, msg: str = None, **kwargs ) -> None: """ Log the command used to build the toolchains. @@ -354,7 +438,7 @@ def setup_log_cmd( ---------- - mpsd_release : str The name of the release to install toolchains for. - - script_dir : str + - root_dir : str The path to the directory where the scripts are located. - msg : str, optional An optional message to log in the command log file. @@ -369,7 +453,7 @@ def setup_log_cmd( ------- - None """ - release_base_dir = script_dir / mpsd_release + release_base_dir = root_dir / mpsd_release # Write to the log file with the following format # -------------------------------------------------- @@ -391,7 +475,7 @@ def setup_log_cmd( # call statement: cmd_line = " ".join(sys.argv) # script branch and commit hash - with os_chdir(script_dir): + with os_chdir(root_dir): script_branch = ( run( ["git", "rev-parse", "--abbrev-ref", "HEAD"], @@ -426,53 +510,52 @@ def setup_log_cmd( ) -def create_dir_structure(mpsd_release: str, script_dir: Path) -> None: - """ - Create the directory structure and clone spack environments repo. - - The create_dir_structure function creates the directory structure for - the specified release and clones the Spack environments repository if it - doesn't exist. +def clone_repo(target_path: Path, repo_url: str, branch=None) -> None: + """Clone repo locally. Optionally checkout a branch. Parameters ---------- - - mpsd_release: A string representing the MPSD release version. - - script_dir: A Path object representing the path to the scripts directory. - - Returns - ------- - - None + target_path : Path + Where to check the repository out to + repo_url: str + where to clone the git repository from + branch: str (defaults to None) + if provided, checkout this branch after cloning """ - # Create the directory structure for the release - release_base_dir = script_dir / mpsd_release - release_base_dir.mkdir(parents=True, exist_ok=True) + if not target_path.exists(): + target_path.mkdir() - with os_chdir(release_base_dir): - # Clone the spack-environments repo if it doesn't exist - if not os.path.exists("spack-environments"): - run( - [ - "git", - "clone", - config_vars["spack_environments_repo"], - ], - check=True, - ) - with os_chdir("spack-environments"): + with os_chdir(target_path): + run( + ["git", "clone", repo_url, "."], + check=True, + ) + if branch: + with os_chdir(target_path): # Git fetch and checkout the release branch and git pull # to be sure that the resulting repo is up to date run(["git", "fetch", "--all"], check=True) - checkout_result = run(["git", "checkout", mpsd_release], check=True) + checkout_result = run(["git", "checkout", branch]) if checkout_result.returncode != 0: - raise Exception( - "Release branch does not exist in spack-environment repo \n." - "Check for typos." + msg = f"Couldn't find {branch=}\n" + + branches_result = run( + ["git", "branch", "-a"], check=True, capture_output=True ) - run(["git", "pull"], check=True) + branches_list = branches_result.stdout.decode().split("\n") + # strip off 'remotes/origin' (needs Python 3.9): + branches_list = [ + b.strip().removeprefix("remotes/origin/") for b in branches_list + ] + msg += f"Available branches are {branches_list}" + logging.error(msg) + raise Exception(msg, branches_result) + else: + run(["git", "pull"], check=True) -def get_release_info(mpsd_release: str, script_dir: Path) -> Tuple[str, str, List[str]]: +def get_release_info(mpsd_release: str, root_dir: Path) -> Tuple[str, str, List[str]]: """ Get information about the specified release. @@ -483,7 +566,7 @@ def get_release_info(mpsd_release: str, script_dir: Path) -> Tuple[str, str, Lis ---------- mpsd_release : str The name of the release to get information for. - script_dir : pathlib.Path + root_dir : pathlib.Path The base directory where releases are stored. Returns @@ -498,13 +581,15 @@ def get_release_info(mpsd_release: str, script_dir: Path) -> Tuple[str, str, Lis Raises ------ FileNotFoundError - If the release directory does not exist. Run `create_dir_structure()` first. + If the release directory does not exist. """ # Get the info for release - release_base_dir = script_dir / mpsd_release + release_base_dir = root_dir / mpsd_release if not os.path.exists(release_base_dir): + logging.debug(f"get_release_info({mpsd_release=}, {root_dir=})") raise FileNotFoundError( - "Release directory does not exist. Run create_dir_structure() first." + f"{release_base_dir} does not exist.\n" + f"Hint: `prepare {mpsd_release}` may fix this." ) with os_chdir(release_base_dir): with os_chdir("spack-environments"): @@ -527,7 +612,7 @@ def get_release_info(mpsd_release: str, script_dir: Path) -> Tuple[str, str, Lis return spe_branch, spe_commit_hash, available_toolchains -def prepare_environment(mpsd_release: str, script_dir: Path) -> List[str]: +def prepare_environment(mpsd_release: str, root_dir: Path) -> List[str]: """ Create the directory structure for the given MPSD release. @@ -541,7 +626,7 @@ def prepare_environment(mpsd_release: str, script_dir: Path) -> List[str]: ---------- mpsd_release : str The name of the MPSD release to prepare the environment for. - script_dir : pathlib.Path + root_dir : pathlib.Path The base directory to create the release folder and clone the spack-environments repository into. @@ -551,12 +636,33 @@ def prepare_environment(mpsd_release: str, script_dir: Path) -> List[str]: A list of available toolchains for the given MPSD release. """ logging.info(f"Preparing {mpsd_release=}") - create_dir_structure(mpsd_release, script_dir) + + # Creates the directory structure for the specified release and clone the + # Spack environments repository if it doesn't exist: + + # Create the directory structure for the release + release_base_dir = root_dir / mpsd_release + release_base_dir.mkdir(parents=True, exist_ok=True) + repo_path = release_base_dir / "spack-environments" + if repo_path.exists(): + logging.debug(f"directory {repo_path} exists already, not touching") + logging.debug( + "XXX TODO: should we run a git pull here to get the latest version? XXX" + ) + else: + repo_url = config_vars["spack_environments_repo"] + logging.info(f"cloning repository {repo_path} from {repo_url}") + clone_repo(repo_path, repo_url, branch=mpsd_release) + + logging.getLogger("print").info( + f"Release {mpsd_release} is prepared in {release_base_dir}" + ) + spe_branch, spe_commit_hash, available_toolchains = get_release_info( - mpsd_release, script_dir + mpsd_release, root_dir ) - setup_log_cmd( - mpsd_release, script_dir, spe_branch=spe_branch, spe_commit_hash=spe_commit_hash + record_script_execution_summary( + mpsd_release, root_dir, spe_branch=spe_branch, spe_commit_hash=spe_commit_hash ) return available_toolchains @@ -564,13 +670,13 @@ def prepare_environment(mpsd_release: str, script_dir: Path) -> List[str]: def get_native_microarchitecture(): """Return native microarchitecture. - On MPSD machines, there should be an environment variable "MPSD_MICROARCH". + On MPSD machines, there should be an environment variable "microarch". We try to read that. If it fails, we use the 'archspec cpu' command. If that fails, we ask the user to install it. Returns ------- - MPSD_MICROARCH : str + microarch : str Example ------- @@ -619,7 +725,7 @@ def get_native_microarchitecture(): def install_environment( mpsd_release: str, toolchains: List[str], - script_dir: Path, + root_dir: Path, force_reinstall: bool = False, enable_build_cache: bool = False, ) -> None: @@ -635,7 +741,7 @@ def install_environment( toolchains : list of str A list of strings representing the toolchains to install (e.g., "foss2021a-mpi", "global_generic", "ALL"). - script_dir : pathlib.Path + root_dir : pathlib.Path A Path object representing the path to the directory where the release and toolchains will be installed. force_reinstall : bool, optional @@ -656,14 +762,13 @@ def install_environment( """ logging.info( f"Installing release {mpsd_release} with toolchains {toolchains} " - f"to {script_dir}" + f"to {root_dir}" ) # Set required variables - release_base_dir = script_dir / mpsd_release - os.environ.get("MPSD_OS", "UNKNOWN_OS") - mpsd_microarch = os.environ.get("MPSD_MICROARCH", "UNKNOWN_MICROARCH") - toolchain_dir = release_base_dir / mpsd_microarch + release_base_dir = root_dir / mpsd_release + microarch = get_native_microarchitecture() + toolchain_dir = release_base_dir / microarch toolchain_dir.mkdir(parents=True, exist_ok=True) spack_setup_script = release_base_dir / "spack-environments" / "spack_setup.sh" install_flags = [] @@ -671,7 +776,7 @@ def install_environment( install_flags.append("-b") # run the prepare_environment function - available_toolchains = prepare_environment(mpsd_release, script_dir) + available_toolchains = prepare_environment(mpsd_release, root_dir) # Ensure that the requested toolchains are available in the release if toolchains == "ALL": toolchains = available_toolchains @@ -700,33 +805,35 @@ def install_environment( if not os.path.exists("logs"): os.mkdir("logs") for toolchain in toolchains: - # Set the install log file name to config_vars["install_log_file"] - # and replace _toolchains_ with the toolchain name and - # _mpsd_spack_ver_ with mpsd_release + # Set the install log file name from create_log_file_names + build_log_file_name = create_log_file_names( + mpsd_release, microarch, "install", toolchain=toolchain + ) + build_log_folder = release_base_dir / "logs" + build_log_path = build_log_folder / build_log_file_name + # if logs folder dosent exist, create it + if not os.path.exists(build_log_folder): + os.makedirs(build_log_folder) logging.info(f"Installing toolchain {toolchain} to {toolchain_dir}") - install_log_file = ( - config_vars["build_log_file"] - .replace("mpsd_spack_ver_", f"{mpsd_release}_") - .replace("_toolchains_", f"_{toolchain}_") - ) + # log the command - setup_log_cmd( + record_script_execution_summary( mpsd_release, - script_dir, - msg=f"installing {toolchain} and logging at {install_log_file}", + root_dir, + msg=f"installing {toolchain} and logging at {build_log_path}", ) - setup_log_cmd( + record_script_execution_summary( mpsd_release, - script_dir, + root_dir, msg=( - f"CMD: bash {spack_setup_script} {' '.join(install_flags)}" - "{toolchain}" + f"CMD: bash {spack_setup_script} {' '.join(install_flags)} " + f"{toolchain}" ), ) run( f"bash {spack_setup_script} {' '.join(install_flags)} {toolchain} 2>&1 " - f"| tee -a {install_log_file} ", + f"| tee -a {build_log_path} ", shell=True, check=True, ) @@ -827,24 +934,25 @@ def main(): # Carry out the action args = parser.parse_args() - # parse logging first - set_up_logging(args.loglevel) - # target dir is the place where this script exists. the - # release `dev` in script_dir/dev-23a - script_dir = Path(os.path.dirname(os.path.realpath(__file__))) + root_dir = Path(os.path.dirname(os.path.realpath(__file__))) + + set_up_logging( + args.loglevel, + get_installer_log_file_path(args.release, args.action, root_dir), + ) # Check the command and run related function if args.action == "remove": - remove_environment(args.release, args.toolchains, script_dir) + remove_environment(args.release, args.toolchains, root_dir) elif args.action == "start-new": - start_new_environment(args.from_release, args.to_release, script_dir) + start_new_environment(args.from_release, args.to_release, root_dir) elif args.action == "install": install_environment( - args.release, args.toolchains, script_dir, False, args.enable_build_cache + args.release, args.toolchains, root_dir, False, args.enable_build_cache ) elif args.action == "prepare": - prepare_environment(args.release, script_dir) + prepare_environment(args.release, root_dir) if __name__ == "__main__": diff --git a/tests.py b/tests.py index c7103b4f04ca2ef93f716c2d7e95379da33118c2..685ce134653ab52da9b67c82d52e6d786862b8ef 100644 --- a/tests.py +++ b/tests.py @@ -6,13 +6,15 @@ import shutil import subprocess from pathlib import Path import logging +import datetime + import pytest mod = importlib.import_module("mpsd-software-environment") # set loglevel to debug - useful for understanding problems. # (if the tests pass, pytest doesn't show any output) -mod.set_up_logging(loglevel="debug", filename="tests.log") +mod.set_up_logging(loglevel="debug", file_path="tests.log") logging.debug(f"We have set up logging from {__file__}") @@ -110,23 +112,23 @@ def test_prepare_environment(tmp_path): prepare_env is run when cmd is not specified, we can test cmd='prepare' and cmd=None to check both cases """ - script_dir = tmp_path / "mpsd_opt" / "linux_debian_11" + root_dir = tmp_path / "mpsd_opt" / "linux_debian_11" spack_environments = "spack-environments" mpsd_release_to_test = "dev-23a" - release_base_dir = script_dir / mpsd_release_to_test + release_base_dir = root_dir / mpsd_release_to_test # check that the test directory does not exist - assert not script_dir.exists() + assert not root_dir.exists() # prepare_environment expects to be executed in git repository # (mpsd-software-environments). It queries the commit on which we are to # log that information. For this to work, we need to execute the command # within a directory tree that has a git repository at the same or high # level. Let's create one: - create_mock_git_repository(script_dir) + create_mock_git_repository(root_dir) # now call the function we want to test result = mod.prepare_environment( - mpsd_release=mpsd_release_to_test, script_dir=script_dir + mpsd_release=mpsd_release_to_test, root_dir=root_dir ) # check if the directory now is created @@ -153,35 +155,35 @@ def test_prepare_environment(tmp_path): # Expect an Exception when wrong mpsd_release is provided with pytest.raises(Exception): result = mod.prepare_environment( - mpsd_release="wrong-mpsd-release", script_dir=(script_dir) + mpsd_release="wrong-mpsd-release", root_dir=(root_dir) ) -def test_setup_log_cmd(tmp_path): +def test_record_script_execution_summary(tmp_path): """Check that log is updated. Check that logs/install-software-environment.log is updated when the module is run """ - log_file = "install.log" + cmd_log_file = mod.config_vars["cmd_log_file"] - script_dir = tmp_path / "test_prepare_env" + root_dir = tmp_path / "test_prepare_env" mpsd_release_to_test = "dev-23a" - release_base_dir = script_dir / mpsd_release_to_test - if os.path.exists(release_base_dir / log_file): - initial_bytes = os.path.getsize(log_file) + release_base_dir = root_dir / mpsd_release_to_test + if os.path.exists(release_base_dir / cmd_log_file): + initial_bytes = os.path.getsize(cmd_log_file) else: initial_bytes = 0 # run the prepare_env functionality - create_mock_git_repository(target_directory=script_dir, create_directory=True) - mod.prepare_environment(mpsd_release=mpsd_release_to_test, script_dir=(script_dir)) + create_mock_git_repository(target_directory=root_dir, create_directory=True) + mod.prepare_environment(mpsd_release=mpsd_release_to_test, root_dir=(root_dir)) # check that logs/install-software-environment.log is updated - assert os.path.exists(release_base_dir / log_file) - assert os.path.getsize(release_base_dir / log_file) > initial_bytes + assert os.path.exists(release_base_dir / cmd_log_file) + assert os.path.getsize(release_base_dir / cmd_log_file) > initial_bytes # Check that the log file has "Spack environments branch: dev-23a " in the last line - with open(release_base_dir / log_file, "r") as f: + with open(release_base_dir / cmd_log_file, "r") as f: last_line = f.readlines()[-1] assert "Spack environments branch: dev-23a " in last_line @@ -193,7 +195,7 @@ def test_install_environment_wrong_toolchain(tmp_path): mod.install_environment( mpsd_release="dev-23a", toolchains=["wrong-toolchain"], - script_dir=(tmp_path), + root_dir=(tmp_path), ) @@ -205,7 +207,7 @@ def test_install_environment_wrong_mpsd_release(tmp_path): mod.install_environment( mpsd_release="wrong-mpsd-release", toolchains=["foss2021a-mpi"], - script_dir=(tmp_path), + root_dir=(tmp_path), ) @@ -219,16 +221,17 @@ def test_install_environment_zlib(): # pytest -s # for this installation avoid tmp_path as # the length of the path becomes too long and spack complains - script_dir = Path("/tmp/test_global_generic") - if script_dir.exists(): - shutil.rmtree(script_dir) - script_dir.mkdir(exist_ok=True, parents=True) + root_dir = Path("/tmp/test_global_generic") + if root_dir.exists(): + shutil.rmtree(root_dir) + root_dir.mkdir(exist_ok=True, parents=True) mpsd_release_to_test = "dev-23a" toolchain_to_test = "global_generic" - mpsd_microarch = os.getenv("MPSD_MICROARCH", "UNKNOWN_MICROARCH") - release_base_dir = script_dir / mpsd_release_to_test - create_mock_git_repository(target_directory=script_dir, create_directory=False) - mod.prepare_environment(mpsd_release=mpsd_release_to_test, script_dir=(script_dir)) + cmd_log_file = mod.config_vars["cmd_log_file"] + microarch = mod.get_native_microarchitecture() + release_base_dir = root_dir / mpsd_release_to_test + create_mock_git_repository(target_directory=root_dir, create_directory=False) + mod.prepare_environment(mpsd_release=mpsd_release_to_test, root_dir=(root_dir)) # Patch the spack environments to create a fake global_generic # create a test toolchain toolchain_src_dir = release_base_dir / "spack-environments" / "toolchains" @@ -266,60 +269,85 @@ def test_install_environment_zlib(): ) with open(setup_file, "w") as f: f.write(lines) + # install global_generic toolchain + mod.set_up_logging( + "WARNING", + mod.get_installer_log_file_path(mpsd_release_to_test, "install", root_dir), + ) mod.install_environment( mpsd_release=mpsd_release_to_test, toolchains=[toolchain_to_test], - script_dir=script_dir, + root_dir=root_dir, enable_build_cache=False, ) # test that the build log is created correctly # check that a file with glob build_globale_generic_dev-23a*.log exists at - # release_base_dir/mpsd_microarch + # release_base_dir/microarch # print("Debug here ") # time.sleep(10) + build_log = list( - (release_base_dir / mpsd_microarch / "logs").glob( - f"{mpsd_release_to_test}_{toolchain_to_test}_*.log" + (release_base_dir / "logs").glob( + f"{mpsd_release_to_test}_{microarch}_*_install.log" ) ) - assert len(build_log) > 0 + assert len(build_log) == 2 # take the most recent build log - build_log = sorted(build_log)[0] + build_log = sorted(build_log)[1] # check that the build log contains statement ##### Installation finished with open(build_log, "r") as f: lines = f.read() assert "##### Installation finished" in lines - build_log_file_name = os.path.basename(build_log) + os.path.basename(build_log) # assert that install log files exists - assert os.path.exists(release_base_dir / "install.log") + assert os.path.exists(release_base_dir / cmd_log_file) # assert that the build log is written to the install log file os.path.basename(build_log) - with open(release_base_dir / "install.log", "r") as f: + with open(release_base_dir / cmd_log_file, "r") as f: lines = f.read() assert ( - f"installing {toolchain_to_test} and logging at logs/{build_log_file_name}" - in lines + f"installing {toolchain_to_test} and logging at {str(build_log)}" in lines ) # assert that the module files are created correctly - assert os.path.exists(release_base_dir / mpsd_microarch) - assert os.path.exists(release_base_dir / mpsd_microarch / "lmod") + assert os.path.exists(release_base_dir / microarch) + assert os.path.exists(release_base_dir / microarch / "lmod") # assert that lmod/module-index.yaml contains zlib - with open( - release_base_dir / mpsd_microarch / "lmod" / "module-index.yaml", "r" - ) as f: + with open(release_base_dir / microarch / "lmod" / "module-index.yaml", "r") as f: lines = f.read() assert "zlib" in lines + # install again to ensure that + # commands that skip creation of folders when + # they are already present works as expected + # reload the module to ensure that date changes + importlib.reload(mod) + mod.set_up_logging( + "WARNING", + mod.get_installer_log_file_path(mpsd_release_to_test, "install", root_dir), + ) + mod.install_environment( + mpsd_release=mpsd_release_to_test, + toolchains=[toolchain_to_test], + root_dir=root_dir, + enable_build_cache=False, + ) + build_log = list( + (release_base_dir / "logs").glob( + f"{mpsd_release_to_test}_{microarch}_*_install.log" + ) + ) + assert len(build_log) == 4 + def test_metadata_logging(tmp_path): """Test that metadata is logged and read correctly.""" # Test that the metadata is logged correctly filename = tmp_path / "test-metadata.log" print(f"Writing to {filename}") - mod.set_up_logging(loglevel="debug", filename=filename) + mod.set_up_logging(loglevel="debug", file_path=filename) # our test data keys = ["important_key", "important_key2"] @@ -353,6 +381,47 @@ def test_metadata_logging(tmp_path): assert len(read_dict) == len(keys) +def test_create_log_file_names(): + """Test that the log file names are created correctly.""" + create_log_file_names = mod.create_log_file_names + mpsd_release = "dev-23a" + microarch = "sandybridge" + date = datetime.datetime.now().replace(microsecond=0).isoformat() + action = "install" + toolchain = "foss2021a" + # test build_log_file_name generation + build_log_file_name = create_log_file_names( + microarch=microarch, + mpsd_release=mpsd_release, + date=date, + action=action, + toolchain=toolchain, + ) + assert ( + build_log_file_name + == f"{mpsd_release}_{microarch}_{date}_BUILD_{toolchain}_{action}.log" + ) + installer_log_file_name = create_log_file_names( + microarch=microarch, + mpsd_release=mpsd_release, + date=date, + action=action, + ) + assert ( + installer_log_file_name + == f"{mpsd_release}_{microarch}_{date}_APEX_{action}.log" + ) + # test no build log file for incorrect action + build_log_file_name = create_log_file_names( + microarch=microarch, + mpsd_release=mpsd_release, + date=date, + action="status", + toolchain=toolchain, + ) + assert build_log_file_name is None + + def test_interface(tmp_path): """Test other things (not implemented yet).""" pass