misc: add pre-commit config (#637)

This commit is contained in:
zhyncs
2024-07-18 04:55:39 +10:00
committed by GitHub
parent a8552cb18b
commit 2e341cd493
43 changed files with 481 additions and 299 deletions

View File

@@ -33,8 +33,7 @@ def cot_decoding(s, question, get_top_k, is_chat_model, verbose):
)
logprobs = step_0.get_meta_info("get_top_k")["decode_top_logprobs"][0]
print("Decoding step 0:",
", ".join(pformat(token[2]) for token in logprobs))
print("Decoding step 0:", ", ".join(pformat(token[2]) for token in logprobs))
for idx, (f, token) in enumerate(zip(forks, logprobs)):
logprob, token_id, text = token
f += text
@@ -56,17 +55,9 @@ def cot_decoding(s, question, get_top_k, is_chat_model, verbose):
)
# calculate probability disparity between the top and secondary tokens
x1s = [
exp(xt[0][0])
for xt in f.get_meta_info("answer")["decode_top_logprobs"]
]
x2s = [
exp(xt[1][0])
for xt in f.get_meta_info("answer")["decode_top_logprobs"]
]
tokens = [
xt[0][2] for xt in f.get_meta_info("answer")["decode_top_logprobs"]
]
x1s = [exp(xt[0][0]) for xt in f.get_meta_info("answer")["decode_top_logprobs"]]
x2s = [exp(xt[1][0]) for xt in f.get_meta_info("answer")["decode_top_logprobs"]]
tokens = [xt[0][2] for xt in f.get_meta_info("answer")["decode_top_logprobs"]]
delta = (sum(x1s) - sum(x2s)) / len(x1s)
# extract the answer span (without the '<|end_of_text|>' token)
@@ -79,42 +70,45 @@ def cot_decoding(s, question, get_top_k, is_chat_model, verbose):
top_logprobs_num=2,
return_text_in_logprobs=True,
)
answer = answer_forks[idx]['answer_span'].replace('\n', ' ').strip(':')
answer = answer_forks[idx]["answer_span"].replace("\n", " ").strip(":")
print(
f"{YELLOW}Path #{idx} {pformat(text)}[{exp(logprob):.3f}] (score={delta}, answer={answer}){CLEAR}"
)
generated_text = str(answer_forks[idx])[len("ProgramState("):-1]
generated_text = str(answer_forks[idx])[len("ProgramState(") : -1]
print(f"{BLUE}{pformat(generated_text)}{CLEAR}")
if verbose:
answer_tokens = [
xt[0][2] for xt in answer_forks[idx].get_meta_info(
"answer_span")["decode_top_logprobs"]
xt[0][2]
for xt in answer_forks[idx].get_meta_info("answer_span")[
"decode_top_logprobs"
]
]
answer_x1s = [
exp(xt[0][0]) for xt in answer_forks[idx].get_meta_info(
"answer_span")["decode_top_logprobs"]
exp(xt[0][0])
for xt in answer_forks[idx].get_meta_info("answer_span")[
"decode_top_logprobs"
]
]
answer_x2s = [
exp(xt[1][0]) for xt in answer_forks[idx].get_meta_info(
"answer_span")["decode_top_logprobs"]
exp(xt[1][0])
for xt in answer_forks[idx].get_meta_info("answer_span")[
"decode_top_logprobs"
]
]
for token, x1, x2 in zip(tokens, x1s, x2s):
print(f" {GREEN}{pformat(token)}{CLEAR}({x1:.3f}-{x2:.3f})",
end="")
print(f" {GREEN}{pformat(token)}{CLEAR}({x1:.3f}-{x2:.3f})", end="")
print("\n===========")
for token, x1, x2 in zip(answer_tokens, answer_x1s, answer_x2s):
print(f" {GREEN}{pformat(token)}{CLEAR}({x1:.3f}-{x2:.3f})",
end="")
print(f" {GREEN}{pformat(token)}{CLEAR}({x1:.3f}-{x2:.3f})", end="")
print()
sgl.set_default_backend(sgl.RuntimeEndpoint("http://localhost:30000"))
state = cot_decoding.run(
question=
r"Claire makes a 3 egg omelet every morning for breakfast. How many dozens of eggs will she eat in 4 weeks?",
question=r"Claire makes a 3 egg omelet every morning for breakfast. How many dozens of eggs will she eat in 4 weeks?",
get_top_k=10,
is_chat_model=True,
verbose=False,