Home > Mobile >  what is the diffrence beetween the bleu score and the vaerage sentence of bleu score
what is the diffrence beetween the bleu score and the vaerage sentence of bleu score

Time:08-04

i'm having a hard time finding the bleus core for my seq to seq model for the task of question generation , my questions are the following : if i use the sentence bleu to find the score beetween each refrence and the output and then devide the total of these sentence-bleu scores by the len of the test data , will it be the same as the corpus bleu ? and for the corpus bleu implemented in the code as the nltk corpus bleu ?

import ntpath
import sys
import codecs
import os
import math
import operator
import functools


def fetch_data(cand, ref):
    references = []
    if '.eng' in ref:
        reference_file = codecs.open(ref, 'r', 'utf-8')
        references.append(reference_file.readlines())
    else:
        for root, dirs, files in os.walk(ref):
            for f in files:
                reference_file = codecs.open(os.path.join(root, f), 'r', 'utf-8')
                references.append(reference_file.readlines())
    candidate_file = codecs.open(cand, 'r', 'utf-8')
    candidate = candidate_file.readlines()
    return candidate, references


def count_ngram(candidate, references, n):
    clipped_count = 0
    count = 0
    r = 0
    c = 0
    for si in range(len(candidate)):
        # Calculate precision for each sentence
        ref_counts = []
        ref_lengths = []
        # Build dictionary of ngram counts
        for reference in references:
            ref_sentence = reference[si]
            ngram_d = {}
            words = ref_sentence.strip().split()
            ref_lengths.append(len(words))
            limits = len(words) - n   1
            # loop through the sentance consider the ngram length
            for i in range(limits):
                ngram = ' '.join(words[i:i n]).lower()
                if ngram in ngram_d.keys():
                    ngram_d[ngram]  = 1
                else:
                    ngram_d[ngram] = 1
            ref_counts.append(ngram_d)
        # candidate
        cand_sentence = candidate[si]
        cand_dict = {}
        words = cand_sentence.strip().split()
        limits = len(words) - n   1
        for i in range(0, limits):
            ngram = ' '.join(words[i:i   n]).lower()
            if ngram in cand_dict:
                cand_dict[ngram]  = 1
            else:
                cand_dict[ngram] = 1
        clipped_count  = clip_count(cand_dict, ref_counts)
        count  = limits
        r  = best_length_match(ref_lengths, len(words))
        c  = len(words)
    if clipped_count == 0:
        pr = 0
    else:
        pr = float(clipped_count) / count
    bp = brevity_penalty(c, r)
    return pr, bp


def clip_count(cand_d, ref_ds):
    """Count the clip count for each ngram considering all references"""
    count = 0
    for m in cand_d.keys():
        m_w = cand_d[m]
        m_max = 0
        for ref in ref_ds:
            if m in ref:
                m_max = max(m_max, ref[m])
        m_w = min(m_w, m_max)
        count  = m_w
    return count


def best_length_match(ref_l, cand_l):
    """Find the closest length of reference to that of candidate"""
    least_diff = abs(cand_l-ref_l[0])
    best = ref_l[0]
    for ref in ref_l:
        if abs(cand_l-ref) < least_diff:
            least_diff = abs(cand_l-ref)
            best = ref
    return best


def brevity_penalty(c, r):
    if c > r:
        bp = 1
    else:
        bp = math.exp(1-(float(r)/c))
    return bp


def geometric_mean(precisions):
    return (functools.reduce(operator.mul, precisions)) ** (1.0 / len(precisions))


def BLEU(candidate, references):
    precisions = []
    for i in range(4):
        pr, bp = count_ngram(candidate, references, i 1)
        precisions.append(pr)
    bleu = geometric_mean(precisions) * bp
    return bleu

if __name__ == "__main__":
    candidate, references = fetch_data(sys.argv[1], sys.argv[2])
    bleu = BLEU(candidate, references)
    print (bleu)

CodePudding user response:

I'm not sure about the implementation you show but for implementations strictly following the original paper such as NLTKs it would not be the same: https://github.com/nltk/nltk/blob/develop/nltk/translate/bleu_score.py#L123.

Using sentence-BLEU means basically calling corpus-BLEU with just a one-sentence-corpus, but the other way around doesn't work. The scores should not be drastically different but they do differ because of macro-average vs micro-average.

I used BLEU for Seq2Seq evaluation before and just used sentence-BLEU and it worked just fine.

  • Related