Home > AI Solutions > Gen AI > Guides > Generative AI in the Enterprise with AMD Accelerators > Example configuration file
The following example shows a configuration file for fine-tuning with torchtune.
File: 70B_lora.yaml # Model Arguments model: _component_: torchtune.models.llama3.lora_llama3_70b lora_attn_modules: ['q_proj', 'k_proj', 'v_proj'] apply_lora_to_mlp: False apply_lora_to_output: False lora_rank: 16 lora_alpha: 32 tokenizer: _component_: torchtune.models.llama3.llama3_tokenizer path: /work/torchtune/Meta-Llama-3-70B/original/tokenizer.model checkpointer: _component_: torchtune.utils.FullModelHFCheckpointer checkpoint_dir: /work/torchtune/Meta-Llama-3-70B checkpoint_files: [ model-00001-of-00030.safetensors, model-00002-of-00030.safetensors, model-00003-of-00030.safetensors, model-00004-of-00030.safetensors, model-00005-of-00030.safetensors, model-00006-of-00030.safetensors, model-00007-of-00030.safetensors, model-00008-of-00030.safetensors, model-00009-of-00030.safetensors, model-00010-of-00030.safetensors, model-00011-of-00030.safetensors, model-00012-of-00030.safetensors, model-00013-of-00030.safetensors, model-00014-of-00030.safetensors, model-00015-of-00030.safetensors, model-00016-of-00030.safetensors, model-00017-of-00030.safetensors, model-00018-of-00030.safetensors, model-00019-of-00030.safetensors, model-00020-of-00030.safetensors, model-00021-of-00030.safetensors, model-00022-of-00030.safetensors, model-00023-of-00030.safetensors, model-00024-of-00030.safetensors, model-00025-of-00030.safetensors, model-00026-of-00030.safetensors, model-00027-of-00030.safetensors, model-00028-of-00030.safetensors, model-00029-of-00030.safetensors, model-00030-of-00030.safetensors, ] recipe_checkpoint: null output_dir: /tmp/Meta-Llama-3-70B-Instruct model_type: LLAMA3 resume_from_checkpoint: False # Dataset and Sampler dataset: _component_: torchtune.datasets.databricks_dolly_15k train_on_input: True seed: null shuffle: True batch_size: 2 # Optimizer and Scheduler optimizer: _component_: torch.optim.AdamW weight_decay: 0.01 lr: 3e-4 lr_scheduler: _component_: torchtune.modules.get_cosine_schedule_with_warmup num_warmup_steps: 100 loss: _component_: torch.nn.CrossEntropyLoss # Training epochs: 1 max_steps_per_epoch: null gradient_accumulation_steps: 1 # Logging output_dir: /tmp/Meta-Llama-3-70B-Instruct metric_logger: _component_: torchtune.utils.metric_logging.TensorBoardLogger log_dir: ${output_dir} organize_logs: true log_every_n_steps: 1 log_peak_memory_stats: False # Environment device: cuda dtype: bf16 enable_activation_checkpointing: True