|
| 1 | +#!/bin/bash |
| 2 | + |
| 3 | + |
| 4 | +models=( |
| 5 | + "Llama-3.2-1B-Instruct" |
| 6 | +) |
| 7 | +trainers_experiments=( |
| 8 | + "UNDIAL unlearn/tofu/default.yaml" |
| 9 | +) |
| 10 | +forget_retain_splits=( |
| 11 | + "forget05 retain95" |
| 12 | + "forget10 retain90" |
| 13 | + "forget01 retain99" |
| 14 | +) |
| 15 | + |
| 16 | +per_device_train_batch_size=4 # on two gpus would make effective batch size 32 |
| 17 | +gradient_accumulation_steps=4 |
| 18 | + |
| 19 | + |
| 20 | +######################################################################################################################## |
| 21 | +########################################### Unlearn TOFU models ######################################################## |
| 22 | +######################################################################################################################## |
| 23 | + |
| 24 | + |
| 25 | +for split in "${forget_retain_splits[@]}"; do |
| 26 | + forget_split=$(echo $split | cut -d' ' -f1) |
| 27 | + retain_split=$(echo $split | cut -d' ' -f2) |
| 28 | + for model in "${models[@]}"; do |
| 29 | + for trainer_experiment in "${trainers_experiments[@]}"; do |
| 30 | + trainer=$(echo $trainer_experiment | cut -d' ' -f1) |
| 31 | + experiment=$(echo $trainer_experiment | cut -d' ' -f2) |
| 32 | + |
| 33 | + task_name=tofu_${model}_${forget_split}_${trainer} |
| 34 | + model_path=open-unlearning/tofu_${model}_full |
| 35 | + echo ${task_name}: Unlearning ${model_path} using ${trainer} |
| 36 | + |
| 37 | + # Unlearn |
| 38 | + python src/train.py --config-name=unlearn.yaml \ |
| 39 | + experiment=${experiment} \ |
| 40 | + trainer=${trainer} \ |
| 41 | + task_name=${task_name} \ |
| 42 | + model=${model} \ |
| 43 | + forget_split=${forget_split} \ |
| 44 | + retain_split=${retain_split} \ |
| 45 | + model.model_args.pretrained_model_name_or_path=${model_path} \ |
| 46 | + retain_logs_path=saves/eval/tofu_${model}_${retain_split}/TOFU_EVAL.json \ |
| 47 | + trainer.args.per_device_train_batch_size=$per_device_train_batch_size \ |
| 48 | + trainer.args.gradient_accumulation_steps=$gradient_accumulation_steps \ |
| 49 | + |
| 50 | + # Eval |
| 51 | + CUDA_VISIBLE_DEVICES=0 python src/eval.py \ |
| 52 | + experiment=eval/tofu/default.yaml \ |
| 53 | + forget_split=${forget_split} \ |
| 54 | + model=${model} \ |
| 55 | + task_name=${task_name} \ |
| 56 | + model.model_args.pretrained_model_name_or_path=saves/unlearn/${task_name} \ |
| 57 | + paths.output_dir=saves/unlearn/${task_name}/evals \ |
| 58 | + retain_logs_path=saves/eval/tofu_${model}_${retain_split}/TOFU_EVAL.json |
| 59 | + done |
| 60 | + done |
| 61 | +done |
| 62 | + |
| 63 | +# ######################################################### |
| 64 | +# #################### MUSE Unlearning #################### |
| 65 | +# ######################################################### |
| 66 | + |
| 67 | + |
| 68 | +model=Llama-3.2-1B-Instruct |
| 69 | + |
| 70 | +data_splits=( |
| 71 | + "News" |
| 72 | + "Books" |
| 73 | +) |
| 74 | + |
| 75 | +trainers=( |
| 76 | + "UNDIAL" |
| 77 | +) |
| 78 | + |
| 79 | +for data_split in "${data_splits[@]}"; do |
| 80 | + for trainer in "${trainers[@]}"; do |
| 81 | + |
| 82 | + task_name=muse_${model}_${data_split}_${trainer} |
| 83 | + |
| 84 | + python src/train.py --config-name=unlearn.yaml \ |
| 85 | + experiment=unlearn/muse/default.yaml \ |
| 86 | + model=${model} \ |
| 87 | + data_split=${data_split} \ |
| 88 | + trainer=${trainer} \ |
| 89 | + task_name=${task_name} \ |
| 90 | + retain_logs_path=saves/eval/muse_${model}_${data_split}_retrain/MUSE_EVAL.json \ |
| 91 | + trainer.args.per_device_train_batch_size=${per_device_train_batch_size} \ |
| 92 | + trainer.args.gradient_accumulation_steps=${gradient_accumulation_steps} \ |
| 93 | + |
| 94 | + CUDA_VISIBLE_DEVICES=0 python src/eval.py \ |
| 95 | + experiment=eval/muse/default.yaml \ |
| 96 | + data_split=${data_split} \ |
| 97 | + task_name=${task_name} \ |
| 98 | + model=${model} \ |
| 99 | + model.model_args.pretrained_model_name_or_path=saves/unlearn/${task_name} \ |
| 100 | + paths.output_dir=saves/unlearn/${trainer}/evals \ |
| 101 | + retain_logs_path=saves/eval/muse_${model}_${data_split}_retrain/MUSE_EVAL.json |
| 102 | + done |
| 103 | +done |
0 commit comments