-
Notifications
You must be signed in to change notification settings - Fork 1
/
Copy pathappendModelGetResults.py
34 lines (27 loc) · 1.68 KB
/
appendModelGetResults.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
from LanguageModels.appendQAModel import AppendQAModel
from CLIPInterface.clipInterface import CLIPInterface
from VQAInterface.vqaInterface import VQAInterface
from CLIPVQA.clipvqa import CLIPVQA
import os, argparse
def get_arguments():
"""Parse all the arguments provided from the CLI.
Returns:
A list of parsed arguments.
"""
parser = argparse.ArgumentParser(description='Full Pipeline Training')
parser.add_argument('--num-candidates', type=int, default=3000,
help='Shorter side transformation.')
return parser.parse_args()
if __name__ == '__main__':
args = get_arguments()
resultsPath = "./output/results/append_numCandidates_{}_resultsVal.json".format(args.num_candidates)
appendModel = AppendQAModel(separator=" ", candidateAnswerGenerator='most_common')
clipInterface = CLIPInterface(device="cuda")
vqaInterface = VQAInterface(dataDir='./data', versionType="v2", taskType="OpenEnded", dataType="mscoco")
clipVqaModel = CLIPVQA(clipInterface, appendModel, vqaInterface)
images = ['./data/Images/mscoco/val2014/'+s for s in os.listdir('./data/Images/mscoco/val2014')]
allFeatures = clipInterface.getNormalisedImageFeatures(imageFilePath=images, batch_size=128, pklFilePath='./output/intermediate/normalisedFeatures.pkl')
results = clipVqaModel.generateResultsDataLoader(evalDataSubType="val2014", answersDataSubType="train2014",
numCandidates = args.num_candidates, \
pklImageFeaturesFile = './output/intermediate/normalisedFeatures.pkl',
outFile=resultsPath)