From 729199a1f1def403ff31dac763ec48f511b7738a Mon Sep 17 00:00:00 2001 From: Kaivalya Date: Mon, 13 Jan 2025 21:40:20 +0530 Subject: [PATCH 1/2] Script and readme added --- scripts/tuning_scripts/README.md | 167 ++++++++++++++++ scripts/tuning_scripts/env.yaml | 235 ++++++++++++++++++++++ scripts/tuning_scripts/hypertuning.py | 271 ++++++++++++++++++++++++++ scripts/tuning_scripts/mlp_model.py | 23 +++ 4 files changed, 696 insertions(+) create mode 100644 scripts/tuning_scripts/README.md create mode 100644 scripts/tuning_scripts/env.yaml create mode 100644 scripts/tuning_scripts/hypertuning.py create mode 100644 scripts/tuning_scripts/mlp_model.py diff --git a/scripts/tuning_scripts/README.md b/scripts/tuning_scripts/README.md new file mode 100644 index 0000000..c62486a --- /dev/null +++ b/scripts/tuning_scripts/README.md @@ -0,0 +1,167 @@ +# Hyperparameter Tuning Script Documentation + +This README provides an overview of the hyperparameter tuning script. + +## Table of Contents + +1. [Prerequisites](#prerequisites) +2. [Script Overview](#script-overview) +3. [Setup and Usage](#setup-and-usage) +4. [Configuration Details](#configuration-details) +5. [Output and Results](#output-and-results) +6. [Edits and Customizations](#edits-and-customizations) +7. [FAQs](#faqs) + +--- + +## Prerequisites + +- Python 3.8 or later +- Dependencies can be installed using the `env.yaml` file: + +```bash +conda env create -f env.yaml +conda activate rllib_env_2.2.0 +``` + +--- + +## Script Overview + +The script performs hyperparameter tuning on a Multi-Layer Perceptron (MLP) model. Key functionalities include: + +- Loading datasets from CSV files. +- Defining and training an MLP model. +- Conducting hyperparameter tuning with Ray Tune using the ASHAScheduler for early stopping. +- Logging and saving the best model and hyperparameters. + +--- + +## Setup and Usage + +### Clone the Repository +Ensure the script and required files are in the same directory. + +```bash +git clone https://github.com/IITH-Compilers/IR2Vec-Classification.git +cd IR2Vec-Classification +``` + +### Dataset Preparation +Prepare the training, validation, and test datasets. Set the paths in python code: + +```bash +python hyperparameter_tuning.py +``` + +### Run the Script + +Execute the script: + +```bash +python hyperparameter_tuning.py +``` + +--- + +## Configuration Details + +### Hyperparameter Search Space + +The script explores the following hyperparameter configurations: + +- **Number of Layers (`num_layers`)**: Random integer between 3 and 8. +- **Units Per Layer (`units_per_layer`)**: Random choice from `[64, 128, 256, 512]` for each layer. +- **Dropout (`dropout`)**: Uniformly sampled between `0.0` and `0.3`. +- **Normalize Input (`normalize_input`)**: Boolean (`True` or `False`). +- **Activation Function (`activation`)**: Choice of `ReLU`, `LeakyReLU`, `Tanh`, or `SiLU`. +- **Optimizer (`optimizer`)**: Currently set to `Adam`. +- **Learning Rate (`lr`)**: Log-uniform sampling between `1e-4` and `1e-2`. +- **Batch Size (`batch_size`)**: Choice of `[32, 64, 128, 256, 512, 1024]`. +- **Epochs (`epochs`)**: Set to `5000`. + +### Scheduler + +The ASHAScheduler is used for early stopping based on validation accuracy. + +### Resource Allocation + +- **CPU**: 10 cores per trial +- **GPU**: 0.125 per trial + +--- + +## Output and Results + +- **Checkpoints**: Saved periodically during training in the directory: + ``` + /Pramana/IR2Vec/tuned_models_ir2vec/tmp/ray_results + ``` +- **Best Model and Hyperparameters**: + The best model and hyperparameters are logged and saved in JSON format. + +Example output: + +```json +{ + "best_config": { + "num_layers": 5, + "units_per_layer": [256, 128, 128, 64, 64], + "dropout": 0.1, + "normalize_input": true, + "activation": "ReLU", + "optimizer": "Adam", + "lr": 0.001, + "batch_size": 128, + "epochs": 5000 + }, + "best_results": { + "val_accuracy": 0.85, + "train_accuracy": 0.88 + } +} +``` + +--- + +## Edits and Customizations + +### Paths to Datasets +Update paths to your dataset files: + +```python +train_dataset_path = "/path/to/training.csv" +val_dataset_path = "/path/to/val.csv" +test_dataset_path = "/path/to/testing.csv" +``` + +### Input and Output Dimensions +Update the `input_dim` and `num_classes` to match your dataset: + +```python +input_dim = 56 # Number of features +num_classes = 98 # Number of classes +``` + +### Logging and Temporary Directory +Modify logging and temporary directory settings if needed: + +```python +ray.init(_temp_dir="/custom/path/to/tmp") +``` + +--- + +## FAQs + +1. **How do I use a specific GPU for training?** + Set the CUDA visibility environment variable to the index of the GPU you want to use: + ```bash + CUDA_VISIBLE_DEVICES=0 python hyperparameter_tuning.py + ``` + +2. **What should I do if I encounter CUDA/Torch errors?** + Check the compatibility of your Torch and CUDA versions and adjust the Torch version accordingly. + +3. **How do I manage logs and outputs for different runs?** + Change the log and CSV file names before running the script to match your configuration. diff --git a/scripts/tuning_scripts/env.yaml b/scripts/tuning_scripts/env.yaml new file mode 100644 index 0000000..4dc6a8b --- /dev/null +++ b/scripts/tuning_scripts/env.yaml @@ -0,0 +1,235 @@ +name: rllib_env_2.2.0 +channels: + - pytorch + - anaconda + - conda-forge + - defaults +dependencies: + - _libgcc_mutex=0.1 + - _pytorch_select=0.2 + - aiohttp=3.6.2 + - argon2-cffi=20.1.0 + - async-timeout=3.0.1 + - async_generator=1.10 + - attrs=20.2.0 + - backcall=0.2.0 + - blas=1.0 + - bleach=3.2.1 + - blinker=1.4 + - brotlipy=0.7.0 + - c-ares=1.16.1 + - ca-certificates=2020.11.8 + - cachetools=4.1.1 + - certifi=2020.11.8 + - cffi=1.14.2 + - chardet=3.0.4 + - click=7.1.2 + - cryptography=3.1.1 + - cudatoolkit=11.0.221 + - cycler=0.10.0 + - dbus=1.13.16 + - decorator=4.4.2 + - defusedxml=0.6.0 + - entrypoints=0.3 + - expat=2.2.9 + - fontconfig=2.13.0 + - freetype=2.10.2 + - glib=2.65.0 + - google-auth-oauthlib=0.4.1 + - gst-plugins-base=1.14.0 + - gstreamer=1.14.0 + - icu=58.2 + - idna=2.10 + - importlib_metadata=1.7.0 + - intel-openmp=2020.2 + - ipykernel=5.3.4 + - ipython=7.19.0 + - ipython_genutils=0.2.0 + - ipywidgets=7.5.1 + - jedi=0.17.2 + - jinja2=2.11.2 + - jpeg=9b + - jsonschema=3.2.0 + - jupyter_client=6.1.7 + - jupyter_core=4.6.3 + - jupyterlab_pygments=0.1.2 + - kiwisolver=1.2.0 + - lcms2=2.11 + - ld_impl_linux-64=2.33.1 + - libedit=3.1.20191231 + - libffi=3.3 + - libgcc-ng=9.1.0 + - libpng=1.6.37 + - libprotobuf=3.13.0 + - libsodium=1.0.18 + - libstdcxx-ng=9.1.0 + - libtiff=4.1.0 + - libuuid=1.0.3 + - libuv=1.40.0 + - libxcb=1.14 + - libxml2=2.9.10 + - lz4-c=1.9.2 + - markupsafe=1.1.1 + - matplotlib=3.3.1 + - matplotlib-base=3.3.1 + - mistune=0.8.4 + - mkl=2020.2 + - mkl-service=2.3.0 + - mkl_fft=1.1.0 + - mkl_random=1.1.1 + - multidict=4.7.6 + - nbclient=0.5.1 + - nbconvert=6.0.7 + - nbformat=5.0.8 + - ncurses=6.2 + - nest-asyncio=1.4.3 + - networkx=2.5 + - ninja=1.10.1 + - notebook=6.1.5 + - oauthlib=3.1.0 + - olefile=0.46 + - openssl=1.1.1h + - packaging=20.4 + - pandas=1.1.1 + - pandoc=2.11.2 + - parso=0.7.1 + - pcre=8.44 + - pexpect=4.8.0 + - pickleshare=0.7.5 + - pillow=7.2.0 + - pip=20.2.3 + - prometheus_client=0.9.0 + - prompt-toolkit=3.0.8 + - ptyprocess=0.6.0 + - pyasn1=0.4.8 + - pyasn1-modules=0.2.8 + - pycparser=2.20 + - pydot=1.3.0 + - pygments=2.7.2 + - pyjwt=1.7.1 + - pyopenssl=19.1.0 + - pyparsing=2.4.7 + - pyqt=5.9.2 + - pyrsistent=0.17.3 + - pysocks=1.7.1 + - python=3.7.9 + - python-dateutil=2.8.1 + - python_abi=3.7 + - pytz=2020.1 + - pyzmq=20.0.0 + - qt=5.9.7 + - readline=8.0 + - requests=2.24.0 + - requests-oauthlib=1.3.0 + - rsa=4.6 + - send2trash=1.5.0 + - setuptools=49.6.0 + - sip=4.19.24 + - six=1.15.0 + - sqlite=3.33.0 + - tensorboard-plugin-wit=1.6.0 + - terminado=0.9.1 + - testpath=0.4.4 + - tk=8.6.10 + - torchaudio=0.7.0 + - torchvision=0.8.1 + - tornado=6.0.4 + - tqdm=4.51.0 + - traitlets=5.0.5 + - urllib3=1.25.10 + - wcwidth=0.2.5 + - webencodings=0.5.1 + - werkzeug=1.0.1 + - wheel=0.35.1 + - widgetsnbextension=3.5.1 + - xz=5.2.5 + - yarl=1.5.1 + - zeromq=4.3.3 + - zipp=3.1.0 + - zlib=1.2.11 + - zstd=1.4.5 + - pip: + - absl-py==1.4.0 + - aiohttp-cors==0.7.0 + - aioredis==1.3.1 + - aiosignal==1.3.1 + - astunparse==1.6.3 + - blessings==1.7 + - cached-property==1.5.2 + - cloudpickle==1.6.0 + - colorama==0.4.4 + - compilerinterface==0.0.2 + - dataclasses==0.6 + - distlib==0.3.6 + - dm-tree==0.1.6 + - farama-notifications==0.0.4 + - filelock==3.0.12 + - flatbuffers==23.5.9 + - frozenlist==1.3.3 + - future==0.18.2 + - gast==0.4.0 + - gensim==4.2.0 + - google-api-core==1.30.0 + - google-auth==1.32.0 + - google-pasta==0.2.0 + - googleapis-common-protos==1.53.0 + - gpustat==0.6.0 + - grpcio==1.53.0 + - gym==0.18.3 + - gymnasium==0.28.1 + - h5py==3.1.0 + - hiredis==2.0.0 + - importlib-metadata==6.1.0 + - jax-jumpy==1.0.0 + - joblib==0.17.0 + - json5==0.9.5 + - jupyterlab==2.2.9 + - jupyterlab-server==1.2.0 + - keras==2.11.0 + - libclang==16.0.0 + - lz4==3.1.3 + - markdown==3.1.1 + - msgpack==1.0.2 + - numpy==1.21.6 + - nvidia-cublas-cu11==11.10.3.66 + - nvidia-cuda-nvrtc-cu11==11.7.99 + - nvidia-cuda-runtime-cu11==11.7.99 + - nvidia-cudnn-cu11==8.5.0.96 + - nvidia-ml-py3==7.352.0 + - online-triplet-loss==0.0.4 + - opencensus==0.7.13 + - opencensus-context==0.1.2 + - opencv-python-headless==4.3.0.36 + - opt-einsum==3.3.0 + - pandocfilters==1.4.3 + - platformdirs==3.2.0 + - protobuf==3.17.3 + - psutil==5.8.0 + - py-spy==0.3.7 + - pydantic==1.8.2 + - pyglet==1.5.15 + - pyyaml==5.3.1 + - ray==2.2.0 + - redis==3.5.3 + - scikit-learn==0.23.2 + - scipy==1.5.4 + - sklearn==0.0 + - smart-open==6.3.0 + - stellargraph==1.2.1 + - tabulate==0.8.9 + - tensorboard==2.11.2 + - tensorboard-data-server==0.6.1 + - tensorboardx==2.3 + - tensorflow==2.11.0 + - tensorflow-estimator==2.11.0 + - tensorflow-io-gcs-filesystem==0.32.0 + - tensorflow-probability==0.19.0 + - termcolor==2.3.0 + - threadpoolctl==2.1.0 + - torch==1.12.0 + - torch-geometric==2.3.0 + - torchsummary==1.5.1 + - typing-extensions==4.5.0 + - virtualenv==20.21.0 + - wrapt==1.15.0 + - python-decouple==3.8 \ No newline at end of file diff --git a/scripts/tuning_scripts/hypertuning.py b/scripts/tuning_scripts/hypertuning.py new file mode 100644 index 0000000..66fe0fd --- /dev/null +++ b/scripts/tuning_scripts/hypertuning.py @@ -0,0 +1,271 @@ +import ray.train +import torch +import torch.nn as nn +import torch.optim as optim +from torch.utils.data import DataLoader, TensorDataset, Dataset +import ray +from ray import tune +from ray.tune.schedulers import ASHAScheduler +from ray.tune.search.optuna import OptunaSearch +import pandas as pd +import logging +import json +import os +import numpy as np +import random +import tempfile +from ray import train, tune +import sys +sys.path.append("/home/intern24009/IR2Vec-Classification/tune-ir2vec/") +from mlp_model import MLP +from datetime import datetime + + +logging.basicConfig( + level=logging.INFO, + format="%(asctime)s - %(levelname)s - %(message)s", + datefmt="%Y-%m-%d %H:%M:%S" +) +logger = logging.getLogger(__name__) + +class CSVDataset(Dataset): + def __init__(self, file_path): + print(f"Loading dataset from: {file_path}") + + try: + self.data = pd.read_csv(file_path, delimiter='\t', header=None) + except Exception as e: + print(f"Error reading CSV: {e}") + return + + try: + self.labels = torch.tensor(self.data.iloc[:, 0].values, dtype=torch.long) + self.features = torch.tensor(self.data.iloc[:, 1:].values, dtype=torch.float32) + except Exception as e: + print(f"Error processing data: {e}") + return + + if not pd.api.types.is_numeric_dtype(self.data.iloc[:, 0]): + print("Error: Non-numeric labels detected in the first column.") + return + + # Adjust labels to be 0-based (subtract 1 for 1-based labels) + self.labels = self.labels - 1 # Make labels 0-based + + print("Dataset loaded successfully.") + + def __len__(self): + return len(self.data) + + def __getitem__(self, idx): + return self.features[idx], self.labels[idx] + +# Training function +def train_model(config, checkpoint_dir=None): + # logger.info(f"Trial Config: num_layers={config['num_layers']}, units_per_layer={config['units_per_layer']}") + + logger.info("Starting training process...") + input_dim = 56 #Update according to input dim + num_classes = 98 #Change according to num of classes + + # Change it with your respective csv paths + # Simulated dataset (replace with your dataset) + train_dataset_path="/home/intern24009/tune-ir2vec/histogram-10.x/training.csv" + test_dataset_path="/home/intern24009/tune-ir2vec/histogram-10.x/testing.csv" + val_dataset_path="/home/intern24009/tune-ir2vec/histogram-10.x/val.csv" + + train_dataset = CSVDataset(train_dataset_path) + val_dataset = CSVDataset(val_dataset_path) + test_dataset = CSVDataset(test_dataset_path) + + train_loader = DataLoader(train_dataset, batch_size=config["batch_size"], shuffle=True) + val_loader = DataLoader(val_dataset, batch_size=config["batch_size"], shuffle=False) + test_loader = DataLoader(test_dataset, batch_size=config["batch_size"], shuffle=False) + + logger.info("Datasets and DataLoaders prepared for poj-IR2Vec-fa. gpu cuda:1") + + # Initialize model + model = MLP( + input_dim=input_dim, + num_classes=num_classes, + num_layers=config["num_layers"], + units_per_layer=config["units_per_layer"], + dropout=config["dropout"], + normalize_input=config["normalize_input"], + activation=config["activation"] + ) + + device = "cuda" if torch.cuda.is_available() else "cpu" + print(f"Using device: {device}") + logger.info("This is cuda:0") + + model.to(device) + + # Define loss and optimizer + criterion = nn.CrossEntropyLoss() + optimizer = getattr(optim, config["optimizer"])( + model.parameters(), lr=config["lr"] + ) + + best_val_accuracy = 0.0 + + # Training loop + logger.info("Starting training loop...") + for epoch in range(config["epochs"]): + model.train() + running_loss = 0.0 + correct_train = 0 + total_train = 0 + + # Train the model + for batch in train_loader: + inputs, labels = batch + inputs, labels = inputs.to(device), labels.to(device) + + optimizer.zero_grad() + outputs = model(inputs) + loss = criterion(outputs, labels) + loss.backward() + optimizer.step() + + running_loss += loss.item() + + # Calculate train accuracy + _, predicted = torch.max(outputs, 1) + total_train += labels.size(0) + correct_train += (predicted == labels).sum().item() + + train_loss = running_loss / len(train_loader) + train_accuracy = correct_train / total_train + + # Evaluate on validation data + model.eval() + running_val_loss = 0.0 + correct_val = 0 + total_val = 0 + + with torch.no_grad(): + for batch in val_loader: + inputs, labels = batch + inputs, labels = inputs.to(device), labels.to(device) + + outputs = model(inputs) + loss = criterion(outputs, labels) + running_val_loss += loss.item() + + # Calculate validation accuracy + _, predicted = torch.max(outputs, 1) + total_val += labels.size(0) + correct_val += (predicted == labels).sum().item() + + val_loss = running_val_loss / len(val_loader) + val_accuracy = correct_val / total_val + + logger.info(f"Epoch [{epoch+1}/{config['epochs']}]: Train Loss: {train_loss:.4f}, Train Accuracy: {train_accuracy:.4f}, " + f"Val Loss: {val_loss:.4f}, Val Accuracy: {val_accuracy:.4f}") + + with tune.checkpoint_dir(step=epoch) as checkpoint_dir: + model_path = os.path.join(checkpoint_dir, "model_checkpoint.model") + torch.save(model, model_path) + print(f"Model checkpoint saved at {model_path}") + + tune.report(train_loss=train_loss, val_loss=val_loss, train_accuracy=train_accuracy, val_accuracy=val_accuracy) + +def custom_serializer(obj): + if isinstance(obj, torch.Tensor): + return obj.tolist() + return str(obj) + +# Main function to run Ray Tune +def main(): + input_dim = 56 # Example input dimension + num_classes = 98 # Example number of classes + epochs = 5000 + + # Hyperparameter search space + config = { + "input_dim": input_dim, + "num_classes": num_classes, + "num_layers": tune.randint(3, 8), + "units_per_layer": tune.sample_from(lambda spec: [ random.choice([64, 128, 256, 512]) for _ in range(spec.config["num_layers"])]), + "dropout": tune.uniform(0.0, 0.3), + "normalize_input": tune.choice([True, False]), + "activation": tune.choice([nn.ReLU(), nn.LeakyReLU(), nn.Tanh(), nn.SiLU()]), + "optimizer": tune.choice(["Adam"]), + "lr": tune.loguniform(1e-4, 1e-2), + "batch_size": tune.choice([32, 64, 128, 256, 512, 1024]), + "epochs": epochs, + } + + # Define scheduler and search algorithm + scheduler = ASHAScheduler( + max_t=epochs, + grace_period=10, + reduction_factor=3 + ) + + ray.init(_temp_dir="/Pramana/IR2Vec/tuned_models_ir2vec/tmp/ray") + analysis = tune.run( + train_model, + config=config, + metric="val_accuracy", + mode="max", + keep_checkpoints_num=4, + scheduler=scheduler, + num_samples=1000, + max_concurrent_trials=4, + resources_per_trial={"cpu": 10, "gpu": 0.125}, + local_dir="/Pramana/IR2Vec/tuned_models_ir2vec/tmp/ray_results" + ) + + best_trial = analysis.get_best_trial(metric="val_accuracy", mode="max", scope="all") + best_checkpoint = analysis.get_best_checkpoint(best_trial, metric="val_accuracy", mode="max") + print(f"Best checkpoint saved at: {best_checkpoint}") + + timestamp = datetime.now().strftime("%Y-%m-%d_%H-%M-%S") + + best_config = analysis.best_config + logger.info("Best hyperparameters found were:") + logger.info(best_config) + + best_trial = analysis.get_best_trial(metric="val_accuracy", mode="max", scope="all") + best_results = best_trial.last_result + logger.info(f"Best results: {best_results}") + + results = { + "best_config": best_config, + "best_results": best_results, + "input_csv_paths": { + "train": "/home/intern24009/tune-ir2vec/histogram-10.x/training.csv", + "val": "/home/intern24009/tune-ir2vec/histogram-10.x/val.csv", + "test": "/home/intern24009/tune-ir2vec/histogram-10.x/testing.csv", + }, + } + trials_data = [] + for trial in analysis.trials: + trial_data = trial.config + trial_data.update(trial.last_result) + trials_data.append(trial_data) + + trials_df = pd.DataFrame(trials_data) + + trials_table_path = os.path.join("results", f"{timestamp}_ir2vec_O3_poj_historgram_hyperparameter_tuning_results_sample_1000_epoch_5000.csv") + os.makedirs("results", exist_ok=True) + + trials_df.to_csv(trials_table_path, index=False) + + results["all_trials"] = trials_data + + output_dir = "results" + os.makedirs(output_dir, exist_ok=True) + + # Save the results to a JSON file + result_file_path = os.path.join(output_dir, f"{timestamp}_ir2vec_O3_poj_histogram_tune_results_sample_1000_epoch_5000.json") + with open(result_file_path, "w") as f: + json.dump(results, f, indent=4, default=custom_serializer) + + logger.info(f"Results saved to {result_file_path}") + logger.info(f"Trials table saved to {trials_table_path}") + +if __name__ == "__main__": + main() \ No newline at end of file diff --git a/scripts/tuning_scripts/mlp_model.py b/scripts/tuning_scripts/mlp_model.py new file mode 100644 index 0000000..21d83b2 --- /dev/null +++ b/scripts/tuning_scripts/mlp_model.py @@ -0,0 +1,23 @@ +class MLP(nn.Module): + def __init__(self, input_dim, num_classes, num_layers, units_per_layer, dropout, normalize_input, activation): + super(MLP, self).__init__() + + logger.info("Initializing MLP model...") + + layers = [] + for i in range(num_layers): + in_features = input_dim if i == 0 else units_per_layer + layers.append(nn.Linear(in_features, units_per_layer)) + layers.append(nn.BatchNorm1d(units_per_layer)) # Always use BatchNorm + layers.append(activation) + if dropout > 0: + layers.append(nn.Dropout(dropout)) + layers.append(nn.Linear(units_per_layer, num_classes)) + self.net = nn.Sequential(*layers) + self.normalize_input = normalize_input + logger.info("MLP model initialized.") + + def forward(self, x): + if self.normalize_input: + x = nn.functional.normalize(x, p=2, dim=1) # L2 Normalization + return self.net(x) \ No newline at end of file From f6ffe2877cd26770a40b65fc641e0148fd59f731 Mon Sep 17 00:00:00 2001 From: Kaivalya Date: Mon, 13 Jan 2025 21:41:15 +0530 Subject: [PATCH 2/2] Script and readme added --- scripts/tuning_scripts/hypertuning.py | 2 +- scripts/tuning_scripts/mlp_model.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/scripts/tuning_scripts/hypertuning.py b/scripts/tuning_scripts/hypertuning.py index 66fe0fd..4a3b1b5 100644 --- a/scripts/tuning_scripts/hypertuning.py +++ b/scripts/tuning_scripts/hypertuning.py @@ -268,4 +268,4 @@ def main(): logger.info(f"Trials table saved to {trials_table_path}") if __name__ == "__main__": - main() \ No newline at end of file + main() diff --git a/scripts/tuning_scripts/mlp_model.py b/scripts/tuning_scripts/mlp_model.py index 21d83b2..35fbd35 100644 --- a/scripts/tuning_scripts/mlp_model.py +++ b/scripts/tuning_scripts/mlp_model.py @@ -20,4 +20,4 @@ def __init__(self, input_dim, num_classes, num_layers, units_per_layer, dropout, def forward(self, x): if self.normalize_input: x = nn.functional.normalize(x, p=2, dim=1) # L2 Normalization - return self.net(x) \ No newline at end of file + return self.net(x)