trainers#

class datadreamer.trainers.Trainer(name, force=False, verbose=None, log_level=None)[source]#

Bases: ABC

abstract train()[source]#
Return type:

Trainer

property model[source]#
property model_path: str[source]#
unload_model()[source]#
class datadreamer.trainers.TrainHFClassifier(name, model_name, revision=None, trust_remote_code=False, device=None, device_map=None, dtype=None, load_in_4bit=False, load_in_8bit=False, quantization_config=None, peft_config=None, distributed_config=AUTO, fsdp=AUTO, fsdp_config=AUTO, force=False, verbose=None, log_level=None, **kwargs)[source]#

Bases: _TrainHFBase

train(train_input, train_output, validation_input, validation_output, truncate=True, epochs=3.0, batch_size=8, optim='adamw_torch', learning_rate=0.001, weight_decay=0.01, lr_scheduler_type='linear', warmup_steps=0, neftune_noise_alpha=None, seed=42, **kwargs)[source]#
Return type:

TrainHFClassifier

export_to_disk(path, adapter_only=False)[source]#
Return type:

PreTrainedModel

publish_to_hf_hub(repo_id, branch=None, private=False, token=None, adapter_only=False, is_synthetic=True, **kwargs)[source]#
Return type:

str

property model[source]#
property tokenizer: PreTrainedTokenizer[source]#
class datadreamer.trainers.TrainHFFineTune(name, model_name, chat_prompt_template=AUTO, system_prompt=AUTO, revision=None, trust_remote_code=False, device=None, device_map=None, dtype=None, load_in_4bit=False, load_in_8bit=False, quantization_config=None, peft_config=None, distributed_config=AUTO, fsdp=AUTO, fsdp_config=AUTO, force=False, verbose=None, log_level=None, **kwargs)[source]#

Bases: _TrainHFBase

train(train_input, train_output, validation_input, validation_output, truncate=True, epochs=3.0, batch_size=8, optim='adamw_torch', learning_rate=0.001, weight_decay=0.01, lr_scheduler_type='linear', warmup_steps=0, neftune_noise_alpha=None, seed=42, **kwargs)[source]#
Return type:

TrainHFFineTune

export_to_disk(path, adapter_only=False)[source]#
Return type:

PreTrainedModel

publish_to_hf_hub(repo_id, branch=None, private=False, token=None, adapter_only=False, is_synthetic=True, **kwargs)[source]#
Return type:

str

property model[source]#
property tokenizer: PreTrainedTokenizer[source]#
class datadreamer.trainers.TrainSentenceTransformer(name, model_name, device=None, dtype=None, peft_config=None, distributed_config=AUTO, fsdp=AUTO, fsdp_config=AUTO, force=False, verbose=None, log_level=None, **kwargs)[source]#

Bases: _TrainHFBase

train_with_triplets(train_anchors, train_positives, train_negatives, validation_anchors, validation_positives, validation_negatives, truncate=True, margin=DEFAULT, epochs=3.0, batch_size=8, loss=AUTO, optim='adamw_torch', learning_rate=0.001, weight_decay=0.01, lr_scheduler_type='linear', warmup_steps=0, neftune_noise_alpha=None, seed=42, **kwargs)[source]#
Return type:

TrainSentenceTransformer

train_with_positive_pairs(train_anchors, train_positives, validation_anchors, validation_positives, truncate=True, epochs=3.0, batch_size=8, loss=AUTO, optim='adamw_torch', learning_rate=0.001, weight_decay=0.01, lr_scheduler_type='linear', warmup_steps=0, neftune_noise_alpha=None, seed=42, **kwargs)[source]#
Return type:

TrainSentenceTransformer

train_with_labeled_pairs(train_anchors, train_positives, train_labels, validation_anchors, validation_positives, validation_labels, truncate=True, margin=DEFAULT, epochs=3.0, batch_size=8, loss=AUTO, optim='adamw_torch', learning_rate=0.001, weight_decay=0.01, lr_scheduler_type='linear', warmup_steps=0, neftune_noise_alpha=None, seed=42, **kwargs)[source]#
Return type:

TrainSentenceTransformer

export_to_disk(path, adapter_only=False)[source]#
Return type:

SentenceTransformer

publish_to_hf_hub(repo_id, private=False, token=None, adapter_only=False, is_synthetic=True, **kwargs)[source]#
Return type:

str

property model[source]#
property tokenizer: PreTrainedTokenizer[source]#
class datadreamer.trainers.TrainHFDPO(name, model_name, chat_prompt_template=AUTO, system_prompt=AUTO, revision=None, trust_remote_code=False, device=None, device_map=None, ref_model_device=None, ref_model_device_map=None, dtype=None, load_in_4bit=False, load_in_8bit=False, quantization_config=None, peft_config=None, distributed_config=AUTO, fsdp=AUTO, fsdp_config=AUTO, force=False, verbose=None, log_level=None, **kwargs)[source]#

Bases: TrainHFFineTune

train(train_prompts, train_chosen, train_rejected, validation_prompts, validation_chosen, validation_rejected, truncate=True, epochs=3.0, batch_size=8, optim='adamw_torch', learning_rate=0.001, weight_decay=0.01, lr_scheduler_type='linear', warmup_steps=0, neftune_noise_alpha=None, dpo_beta=0.1, loss_type='kto_pair', disable_dropout=True, seed=42, **kwargs)[source]#
Return type:

TrainHFDPO

export_to_disk(path, adapter_only=False)[source]#
Return type:

PreTrainedModel

publish_to_hf_hub(repo_id, branch=None, private=False, token=None, adapter_only=False, is_synthetic=True, **kwargs)[source]#
Return type:

str

property model[source]#
property tokenizer: PreTrainedTokenizer[source]#
class datadreamer.trainers.TrainHFRewardModel(name, model_name, chat_prompt_template=AUTO, system_prompt=AUTO, revision=None, trust_remote_code=False, device=None, device_map=None, dtype=None, load_in_4bit=False, load_in_8bit=False, quantization_config=None, peft_config=None, distributed_config=AUTO, fsdp=AUTO, fsdp_config=AUTO, force=False, verbose=None, log_level=None, **kwargs)[source]#

Bases: TrainHFClassifier

train_with_pairs(train_prompts, train_chosen, train_rejected, validation_prompts, validation_chosen, validation_rejected, truncate=True, epochs=3.0, batch_size=8, optim='adamw_torch', learning_rate=0.001, weight_decay=0.01, lr_scheduler_type='linear', warmup_steps=0, neftune_noise_alpha=None, seed=42, **kwargs)[source]#
Return type:

TrainHFRewardModel

train_with_pairs_and_scores(train_prompts, train_chosen, train_chosen_scores, train_rejected, train_rejected_scores, validation_prompts, validation_chosen, validation_chosen_scores, validation_rejected, validation_rejected_scores, truncate=True, epochs=3.0, batch_size=8, optim='adamw_torch', learning_rate=0.001, weight_decay=0.01, lr_scheduler_type='linear', warmup_steps=0, neftune_noise_alpha=None, seed=42, **kwargs)[source]#
Return type:

TrainHFRewardModel

train_with_scores(train_prompts, train_generations, train_scores, validation_prompts, validation_generations, validation_scores, truncate=True, epochs=3.0, batch_size=8, optim='adamw_torch', learning_rate=0.001, weight_decay=0.01, lr_scheduler_type='linear', warmup_steps=0, neftune_noise_alpha=None, seed=42, **kwargs)[source]#
Return type:

TrainHFRewardModel

export_to_disk(path, adapter_only=False)[source]#
Return type:

PreTrainedModel

publish_to_hf_hub(repo_id, branch=None, private=False, token=None, adapter_only=False, is_synthetic=True, **kwargs)[source]#
Return type:

str

property model[source]#
property tokenizer: PreTrainedTokenizer[source]#
class datadreamer.trainers.TrainHFPPO(name, model_name, chat_prompt_template=AUTO, system_prompt=AUTO, revision=None, trust_remote_code=False, device=None, device_map=None, ref_model_device=None, ref_model_device_map=None, dtype=None, load_in_4bit=False, load_in_8bit=False, quantization_config=None, peft_config=None, distributed_config=AUTO, fsdp=AUTO, fsdp_config=AUTO, force=False, verbose=None, log_level=None, **kwargs)[source]#

Bases: TrainHFFineTune

train(train_prompts, validation_prompts, reward_model, reward_model_tokenizer=None, max_new_tokens=None, temperature=1.0, top_p=1.0, generation_kwargs=None, truncate=True, epochs=3.0, batch_size=8, optimizer=AUTO, learning_rate=1e-05, weight_decay=0.01, lr_scheduler=None, seed=42, length_sampler=None, init_kl_coef=0.2, adap_kl_ctrl=True, adap_kl_target=6.0, max_kl=AUTO, **kwargs)[source]#
Return type:

TrainHFPPO

export_to_disk(path, adapter_only=False)[source]#
Return type:

PreTrainedModel

publish_to_hf_hub(repo_id, branch=None, private=False, token=None, adapter_only=False, is_synthetic=True, **kwargs)[source]#
Return type:

str

property model[source]#
property tokenizer: PreTrainedTokenizer[source]#
class datadreamer.trainers.TrainSetFitClassifier(name, model_name, multi_target_strategy=AUTO, device=None, dtype=None, peft_config=None, force=False, verbose=None, log_level=None, **kwargs)[source]#

Bases: TrainHFClassifier

train(train_input, train_output, validation_input, validation_output, truncate=True, sampling_strategy='oversampling', end_to_end=False, epochs=3.0, batch_size=8, body_learning_rate=0.001, head_learning_rate=0.001, weight_decay=0.01, warmup_steps=0, seed=42, **kwargs)[source]#
Return type:

TrainSetFitClassifier

export_to_disk(path)[source]#
Return type:

PreTrainedModel

publish_to_hf_hub(repo_id, branch=None, private=False, token=None, is_synthetic=True, **kwargs)[source]#
Return type:

str

property model[source]#
property tokenizer: PreTrainedTokenizer[source]#