forked from tylin/coco-caption
-
Notifications
You must be signed in to change notification settings - Fork 85
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
Add example script and update pip installation method
- Loading branch information
Showing
5 changed files
with
44 additions
and
6 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Large diffs are not rendered by default.
Oops, something went wrong.
Large diffs are not rendered by default.
Oops, something went wrong.
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,25 @@ | ||
from pycocotools.coco import COCO | ||
from pycocoevalcap.eval import COCOEvalCap | ||
|
||
annotation_file = 'captions_val2014.json' | ||
results_file = 'captions_val2014_fakecap_results.json' | ||
|
||
# create coco object and coco_result object | ||
coco = COCO(annotation_file) | ||
coco_result = coco.loadRes(results_file) | ||
|
||
# create coco_eval object by taking coco and coco_result | ||
coco_eval = COCOEvalCap(coco, coco_result) | ||
|
||
# evaluate on a subset of images by setting | ||
# coco_eval.params['image_id'] = coco_result.getImgIds() | ||
# please remove this line when evaluating the full validation set | ||
coco_eval.params['image_id'] = coco_result.getImgIds() | ||
|
||
# evaluate results | ||
# SPICE will take a few minutes the first time, but speeds up due to caching | ||
coco_eval.evaluate() | ||
|
||
# print output evaluation scores | ||
for metric, score in coco_eval.eval.items(): | ||
print(f'{metric}: {score:.3f}') |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters