A Python client to seamlessly integrate with MonsterTuner, enabling efficient and cost-effective fine-tuning of AI models on MonsterAPI
MonsterTuner
MonsterTuner is a new service from MonsterAPI designed to streamline the finetuning of popular AI models on our advanced computing infrastructure. With just one request, you can easily customize AI models for your business needs, making the process up to 10X more efficient and cost-effective.
Supported Models for Finetuning:
- LLM (Large Language Model) - For use-cases like chat completion, summary generation, sentiment analysis, etc.
- Whisper - For speech-to-text transcription improvement.
- SDXL Dreambooth - Fine-tune Stable Diffusion model for customized image generation.
Demo Colab Notebooks with Python Client
- Install and Initiate Client Object
pip install monsterapi
import os
from monsterapi import client as mclient
os.environ["HF_TOKEN"] = 'PROVIDE_HUGGINGFACE_READ_KEY'
os.environ['MONSTER_API_KEY'] = 'PROVIDE_MONSTER_API_KEY'
- Prepare and send payload to launch a LLM finetuning job.
client = mclient(api_key=os.environ.get("MONSTER_API_KEY"))
launch_payload = {
"pretrainedmodel_config": {
"model_path": "mistralai/Mistral-7B-v0.1",
"use_lora": True,
"lora_r": 8,
"lora_alpha": 16,
"lora_dropout": 0,
"lora_bias": "none",
"use_quantization": False,
"use_gradient_checkpointing": False,
"parallelization": "nmp"
},
"data_config": {
"data_path": "tatsu-lab/alpaca",
"data_subset": "default",
"data_source_type": "hub_link",
"prompt_template": "Here is an example on how to use tatsu-lab/alpaca dataset ### Input: {instruction} ### Output: {output}",
"cutoff_len": 512,
"prevalidated": False
},
"training_config": {
"early_stopping_patience": 5,
"num_train_epochs": 1,
"gradient_accumulation_steps": 1,
"warmup_steps": 50,
"learning_rate": 0.001,
"lr_scheduler_type": "reduce_lr_on_plateau",
"group_by_length": False
},
"logging_config": { "use_wandb": False }
}
# Launch a deployment
ret = client.finetune(service="llm", params=launch_payload)
deployment_id = ret.get("deployment_id")
print(ret)
# Get deployment status
status_ret = client.get_deployment_status(deployment_id)
print(status_ret)
# Get deployment logs
logs_ret = client.get_deployment_logs(deployment_id)
print(logs_ret)
# Terminate Deployment
terminate_return = client.terminate_deployment(deployment_id)
print(terminate_return)
hf_login_key = os.environ.get("HF_TOKEN")
client = mclient(api_key=os.environ.get("MONSTER_API_KEY"))
launch_payload = {
"hf_login_key": hf_login_key,
"pretrainedmodel_config": {
"model_path": "OpenAI/whisper-large-v3",
"task": "transcribe",
"language": "Hindi"
},
"data_config": {
"hf_login_key": hf_login_key,
"data_path": "mozilla-foundation/common_voice_11_0",
"data_subset": "hi",
"data_source_type": "hub_link"
},
"training_config": {
"gradient_accumulation_steps": 4,
"learning_rate": 0.001,
"warmup_steps": 50,
"num_train_epochs": 1,
"per_device_train_batch_size": 8,
"generation_max_length": 128,
"lr_scheduler_type": "reduce_lr_on_plateau"
},
"logging_config": { "use_wandb": False }
}
# Launch a deployment
ret = client.finetune(service="speech2text/whisper", params=launch_payload)
deployment_id = ret.get("deployment_id")
print(ret)
# Get deployment status
status_ret = client.get_deployment_status(deployment_id)
print(status_ret)
# Get deployment logs
logs_ret = client.get_deployment_logs(deployment_id)
print(logs_ret)
# Terminate Deployment
terminate_return = client.terminate_deployment(deployment_id)
print(terminate_return)
client = mclient(api_key=os.environ.get("MONSTER_API_KEY"))
launch_payload = {
"dreambooth_config": {
"model_name": "stabilityai/stable-diffusion-xl-base-1.0",
"prompt": "Wolf in Full Moon Light",
"learning_rate": 0.0001,
"num_steps": 500,
"gradient_accumulation": 4,
"resolution": 1024,
"scheduler": "constant"
},
"huggingface_config": { "push_to_hub": False },
"dataset_config": {
"data_source_type": "s3_presigned_link",
"s3_presigned_url": 'https://finetuning-service.s3.us-east-2.amazonaws.com/test_bucket/sdxl_dreambooth_test_images.zip'
}
}
# Launch a deployment
ret = client.finetune("text2img/sdxl-dreambooth", launch_payload)
deployment_id = ret.get("deployment_id")
print(ret)
# Get deployment status
status_ret = client.get_deployment_status(deployment_id)
print(status_ret)
# Get deployment logs
logs_ret = client.get_deployment_logs(deployment_id)
print(logs_ret)
# Terminate Deployment
terminate_return = client.terminate_deployment(deployment_id)
print(terminate_return)