-
Notifications
You must be signed in to change notification settings - Fork 3
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
refactor tests directoryto root directory
add tests for model trainer on GPU and its workflow
- Loading branch information
Showing
16 changed files
with
163 additions
and
21 deletions.
There are no files selected for viewing
2 changes: 1 addition & 1 deletion
2
.github/workflows/ruff.yaml → .github/workflows/lint_codebase.yaml
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -1,4 +1,4 @@ | ||
name: Ruff formatting | ||
name: Run Linting with Ruff on Multiple OS Environments | ||
|
||
on: [pull_request] | ||
|
||
|
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,27 @@ | ||
name: Test training.model_trainer Module with GPU. | ||
|
||
on: [pull_request] | ||
|
||
jobs: | ||
run_kaggle_script_action: | ||
runs-on: ubuntu-latest | ||
|
||
steps: | ||
- name: Checkout Repository | ||
uses: actions/checkout@v3 | ||
|
||
- name: Execute Tests with GPU Support | ||
uses: KevKibe/kaggle-script-action@v1.0.1 | ||
env: | ||
HF_TOKEN: ${{ secrets.HF_TOKEN }} | ||
WANDB_TOKEN: ${{ secrets.WANDB_TOKEN }} | ||
with: | ||
username: ${{ secrets.KAGGLE_USERNAME }} | ||
key: ${{ secrets.KAGGLE_KEY }} | ||
title: "Test PEFT Finetuning" | ||
custom_script: | | ||
pytest -vv tests/test_model_trainer.py | ||
enable_internet: true | ||
enable_gpu: true | ||
enable_tpu: false | ||
sleep_time: 60 |
File renamed without changes.
File renamed without changes.
6 changes: 3 additions & 3 deletions
6
src/tests/test_audio_processor.py → tests/test_audio_processor.py
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
2 changes: 1 addition & 1 deletion
2
src/tests/test_model_optimization.py → tests/test_model_optimization.py
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,115 @@ | ||
import unittest | ||
from src.training.model_trainer import Trainer | ||
from src.training.data_prep import DataPrep | ||
import os | ||
from dotenv import load_dotenv | ||
load_dotenv() | ||
|
||
class TestTrainerManager(unittest.TestCase): | ||
"""Test cases for the Trainer class.""" | ||
|
||
def setUp(self) -> None: | ||
# Common setup for both test cases | ||
self.model_id = "openai/whisper-tiny" | ||
process = DataPrep( | ||
huggingface_token=os.environ.get("HF_TOKEN"), | ||
dataset_name="mozilla-foundation/common_voice_16_1", | ||
language_abbr=["af"], | ||
model_id=self.model_id, | ||
processing_task="transcribe", | ||
use_peft=True, | ||
) | ||
tokenizer, feature_extractor, feature_processor, model = process.prepare_model() | ||
|
||
# Load datasets | ||
self.dataset_streaming = process.load_dataset( | ||
feature_extractor, tokenizer, feature_processor, streaming=True, | ||
train_num_samples=10, test_num_samples=10 | ||
) | ||
self.dataset_batch = process.load_dataset( | ||
feature_extractor, tokenizer, feature_processor, streaming=False, | ||
train_num_samples=10, test_num_samples=10 | ||
) | ||
|
||
# Check if train/test samples exist in both streaming and batch datasets | ||
self._validate_dataset(self.dataset_streaming, "streaming") | ||
self._validate_dataset(self.dataset_batch, "batch") | ||
|
||
# Set up trainers for both streaming and batch datasets | ||
self.trainer_streaming = Trainer( | ||
language=["af"], | ||
huggingface_token=os.environ.get("HF_TOKEN"), | ||
model_id=self.model_id, | ||
dataset=self.dataset_streaming, | ||
model=model, | ||
feature_processor=feature_processor, | ||
feature_extractor=feature_extractor, | ||
tokenizer=tokenizer, | ||
wandb_api_key=os.environ.get("WANDB_TOKEN"), | ||
use_peft=False, | ||
processing_task="transcribe" | ||
) | ||
self.trainer_batch = Trainer( | ||
language =["af"], | ||
huggingface_token="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", | ||
model_id=self.model_id, | ||
dataset=self.dataset_batch, | ||
model=model, | ||
feature_processor=feature_processor, | ||
feature_extractor=feature_extractor, | ||
tokenizer=tokenizer, | ||
wandb_api_key="e0fda284061622e0f7858d6c684281d48fa05ecf", | ||
use_peft=False, | ||
processing_task="transcribe" | ||
) | ||
|
||
return super().setUp() | ||
|
||
def _validate_dataset(self, dataset, dataset_type): | ||
"""Helper function to validate that datasets are not empty.""" | ||
has_train_sample = any(True for _ in dataset["train"]) | ||
assert has_train_sample, f"Train dataset for {dataset_type} is empty!" | ||
|
||
has_test_sample = any(True for _ in dataset["test"]) | ||
assert has_test_sample, f"Test dataset for {dataset_type} is empty!" | ||
|
||
def test_01_train_streaming(self): | ||
"""Test case for training with the streaming dataset.""" | ||
self.trainer_streaming.train( | ||
max_steps=15, | ||
learning_rate=1e-5, | ||
save_steps=10, | ||
eval_steps=10, | ||
logging_steps=10, | ||
output_dir=f"../{self.model_id}-finetuned", | ||
report_to=None, | ||
push_to_hub=False, | ||
use_cpu=False, | ||
optim="adamw_hf", | ||
per_device_train_batch_size=4 | ||
) | ||
# Check if output files exist after training | ||
assert os.path.exists(f"../{self.model_id}-finetuned/preprocessor_config.json") | ||
assert os.path.exists(f"../{self.model_id}-finetuned/tokenizer_config.json") | ||
|
||
def test_02_train_batch(self): | ||
"""Test case for training with the batch dataset.""" | ||
self.trainer_batch.train( | ||
max_steps=10, | ||
learning_rate=1e-5, | ||
save_steps=10, | ||
eval_steps=10, | ||
logging_steps=10, | ||
output_dir=f"../{self.model_id}-finetuned", | ||
report_to=None, | ||
push_to_hub=False, | ||
use_cpu=True, | ||
optim="adamw_hf" | ||
) | ||
# Check if output files exist after training | ||
assert os.path.exists(f"../{self.model_id}-finetuned/preprocessor_config.json") | ||
assert os.path.exists(f"../{self.model_id}-finetuned/tokenizer_config.json") | ||
|
||
|
||
if __name__ == '__main__': | ||
unittest.main() |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters