Spaces:
Running
on
Zero
Running
on
Zero
File size: 1,702 Bytes
85e172b |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 |
#!/bin/bash
# list models and datasets
MODEL_NAMES=("gpt-j-6b" "llama-3-8b" "mamba-1.4b")
DATASET_NAMES=("mcf" "zsre")
for model in ${MODEL_NAMES[@]}
do
echo "Running extractions for model $model..."
python -m experiments.extract_norms \
--model $model \
--cache_path ./cache/
done
# Extract selection based on first token match
for model in ${MODEL_NAMES[@]}
do
for dataset in ${DATASET_NAMES[@]}
do
echo "Running selection for dataset $dataset model $model..."
python -m experiments.extract_selection \
--model $model \
--dataset $dataset \
--batch_size 64 \
--cache_path ./cache/
done
done
# extract prompt features at final token
for model in ${MODEL_NAMES[@]}
do
for dataset in ${DATASET_NAMES[@]}
do
echo "Running extractions (features) for dataset $dataset model $model..."
python -m experiments.extract_features \
--model $model \
--dataset $dataset \
--batch_size 64 \
--cache_path ./cache/
done
done
# extract wiki-train and wiki-test
for model in ${MODEL_NAMES[@]}
do
echo "Running extractions (wikipedia) for model $model..."
python -m experiments.extract_wikipedia \
--model $model \
--cache_path ./cache/wiki_train/
python -m experiments.extract_wikipedia \
--model $model \
--take_single 1 \
--max_len 100 \
--exclude_front 1 \
--sample_size 20000 \
--exclude_path ./cache/wiki_train/ \
--cache_path ./cache/wiki_test/
done
# extract wikipedia sentences cache
python -m experiments.extract_cache |