File size: 2,561 Bytes
b6b15da
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
#!/usr/bin/env python3

"""
Run this script as ./conversion_script.py to convert the SCITE files to HF-compatible parquet files.
"""

import re
from typing import Literal

import pandas as pd


Split = Literal["train", "test"]

def convert_for_causality_detection(split: Split) -> None:
    df = pd.read_xml(f"{split}-corpus.xml")
    df["label"] = df["label"].apply(lambda x: 0 if x == "Non-Causal" else 1)
    df["text"] = df["sentence"].apply(lambda x: re.sub(r'</?e\d+>', "", x))
    df["index"] = df["id"].apply(lambda x: f"scite_{split}_{x}")
    df = df.set_index("index")
    df = df[["label", "text"]]
    df.to_parquet(f"./causality-detection/{split}.parquet", engine="pyarrow")

def convert_for_causal_candidate_extraction(split: Split) -> None:
    def map_to_tokens(text: str):
        splits: list[str] = []
        tags: list[set[int]] = []
        curtags: set[int] = set()
        for match in re.finditer(r"(.*?)<(/?)e(\d+)>", text):
            splits.append(match[1])
            tags.append(list(curtags))
            if match[2] == "":
                curtags.add(int(match[3]))
            else:
                curtags.remove(int(match[3]))
        return pd.Series((splits, tags))
    df = pd.read_xml(f"{split}-corpus.xml")
    df[["tokens", "entity"]] = df["sentence"].apply(map_to_tokens)
    df["index"] = df["id"].apply(lambda x: f"scite_{split}_{x}")
    df = df[["index", "tokens", "entity"]].set_index("index")
    print(df)
    df.to_parquet(f"./causal-candidate-extraction/{split}.parquet", engine="pyarrow")

def convert_for_causality_identification(split: Split) -> None:
    def map_label(label: str):
        if label == "Non-Causal":
            return []
        tmp = list()
        for t in label[len("Cause-Effect("):-1].split("),("):
            left, right = t.strip('()').split(',')
            tmp.append({"relationship": 1, "first": left, "second": right})
        return tmp
    df = pd.read_xml(f"{split}-corpus.xml", dtype_backend="pyarrow")
    df["relations"] = df["label"].apply(map_label)
    df["text"] = df["sentence"]
    df["index"] = df["id"].apply(lambda x: f"scite_{split}_{x}")
    df = df.set_index("index")

    df = df[["text", "relations"]]
    df.to_parquet(f"./causality-identification/{split}.parquet", engine="pyarrow")

convert_for_causality_detection("test")
convert_for_causality_detection("train")
convert_for_causal_candidate_extraction("test")
convert_for_causal_candidate_extraction("train")
convert_for_causality_identification("test")
convert_for_causality_identification("train")