sentences listlengths 1 61 | coref_chains sequencelengths 0 44 | id stringlengths 7 14 |
|---|---|---|
[
{
"detokenized_text": "์๋์๋ฅด๋ ๋ฆฌ๋ชจ๋
ธํ(ะญะดัะฐัะด ะะธะผะพะฝะพะฒ)๋ ๋ฌ์์์ ์ ๋ช
์๊ฐ์ ์ ์น์ธ์ด๋ค.",
"index": 0,
"token_positions": [
[
0,
5
],
[
6,
10
],
[
10,
11
],
[
11,
17
],
[
18,
25
],
... | [
[
[
0,
0,
1
],
[
2,
32,
32
],
[
6,
33,
33
],
[
7,
24,
24
],
[
8,
7,
7
],
[
9,
0,
0
],
[
10,
3,
3
],
[
10,
5,
5
... | 1726968.json |
[
{
"detokenized_text": "ใ๊ณต๋ช
์ ๊ฐ๋ฆผ ๊ธธใ ( )์ 2006๋
1์ 8์ผ๋ถํฐ 2006๋
12์ 10์ผ๊น์ง ๋ฐฉ์๋ใด, NHK ๋ฐฉ์ก์ฌ์ 45๋ฒ์งธ ๋ํ ๋๋ผ๋ง๋ค.",
"index": 0,
"token_positions": [
[
0,
1
],
[
1,
3
],
[
3,
4
],
[
5,
7
],
[
8,
... | [
[
[
0,
1,
4
],
[
0,
33,
34
],
[
8,
7,
10
],
[
9,
0,
1
],
[
10,
1,
2
],
[
16,
6,
7
],
[
18,
1,
2
],
[
19,
21,
22
... | 809717.json |
[
{
"detokenized_text": "๋ผ์ ๋๋ผ ํ๋ผ์ฌ๋(Rajendra Prasad, เคฐเคพเคเฅเคจเฅเคฆเฅเคฐ เคชเฅเคฐเคธเคพเคฆ, 1884๋
12์ 3์ผ~1963๋
2์ 28์ผ)์ ์ธ๋์ ๋ฒ์กฐ์ธ ๊ฒธ ๊ฒฝ์ ํ์์ ๋ณํธ์ฌ ๊ฒธ ์ ์ ๊ฐ๊ณ ๋ฒํ์ ๋ฐ ๋
๋ฆฝ ์ด๋๊ฐ ๊ฒธ ์ ์น๊ฐ๋ฉฐ ๅ ์ธ๋ ์ด๋ ๋ํต๋ น์ด๋ค.",
"index": 0,
"token_positions": [
[
0,
4
],
[
5,
9
],
[
9,
10
... | [
[
[
0,
0,
1
],
[
2,
0,
0
],
[
3,
1,
1
]
],
[
[
0,
24,
24
],
[
1,
4,
4
],
[
2,
9,
9
],
[
3,
10,
10
]
],
[
[
0,
... | 1448366.json |
[
{
"detokenized_text": "๋ฐ์ฐ ์์ฐ(Bow Wow, 1987๋
3์ 9์ผ~)์ ๋ฏธ๊ตญ์ ๋ฉ ๊ฐ์, ํ๋ก๋์, ๋ฐฐ์ฐ๋ค.",
"index": 0,
"token_positions": [
[
0,
2
],
[
3,
5
],
[
5,
6
],
[
6,
9
],
[
10,
13
],
... | [
[
[
0,
0,
1
],
[
5,
0,
0
]
],
[
[
2,
18,
20
],
[
6,
0,
2
],
[
7,
6,
6
],
[
8,
7,
7
]
],
[
[
7,
0,
1
],
[
7,
16... | 299933.json |
[
{
"detokenized_text": "์ค๋ฏธ๋(ๅฐนๆชไพ, 1981๋
5์ 31์ผ~)์ ํ๊ตญ๊ณ ๋ฏธ๊ตญ์ธ ํํฉ ๋ฐ R &B ๊ฐ์๋ค.",
"index": 0,
"token_positions": [
[
0,
3
],
[
3,
4
],
[
4,
7
],
[
7,
8
],
[
9,
13
],
[... | [
[
[
0,
0,
0
],
[
1,
2,
4
],
[
1,
6,
8
],
[
1,
13,
13
],
[
4,
0,
0
],
[
4,
2,
3
],
[
6,
5,
5
],
[
8,
13,
13
]... | 165261.json |
[
{
"detokenized_text": "์ ๋ฌผํฌ ์กฐ์ฝ(ๆฟ็ฉๆตฆๆข็ด,)๋ 1882๋
8์ 30์ผ(๊ณ ์ข
19๋
์๋ ฅ 7์ 17์ผ)์์ค ๊ตฐ๋์ ์ฌํ ์ฒ๋ฆฌ๋ฅผ ์ํด ์กฐ์ ๊ณผ ์ผ๋ณธ ์ ๊ตญ ์ฌ์ด์ ์ฒด๊ฒฐ๋ใด ๋ถํ๋ฑ ์กฐ์ฝ์ด๋ค.",
"index": 0,
"token_positions": [
[
0,
3
],
[
4,
6
],
[
6,
7
],
[
7,
12
],
... | [
[
[
0,
0,
1
],
[
0,
40,
42
],
[
1,
0,
2
],
[
13,
4,
5
]
],
[
[
0,
7,
8
],
[
0,
14,
16
]
],
[
[
0,
9,
12
],
[
0,
... | 48682.json |
[{"detokenized_text":"๊น์ํ(้็ธ่ณข, 1980๋
11์ 12์ผ~)์ KBO ๋ฆฌ๊ทธ ์ ๋๋งจ ์ธ์ธ ๊ตฌ(...TRUNCATED) | [[[0,0,0],[6,5,5],[7,1,1],[8,0,0],[8,30,30],[9,5,5],[11,4,4],[14,0,0],[14,9,9]],[[0,13,14],[23,8,9]](...TRUNCATED) | 168405.json |
[{"detokenized_text":"๊ฐ์ ๋น์ง ๋ฆฌ๋ง๋(Gaรซl Bigirimana, ํค๋ฃฌ๋์ด ๋ฐ์:], 1993๋
10์(...TRUNCATED) | [[[0,0,2],[4,11,11],[7,13,13]],[[0,21,21],[10,27,27]],[[3,4,5],[4,0,1],[4,13,17]],[[8,0,1],[8,10,11](...TRUNCATED) | 830233.json |
[{"detokenized_text":"์ํด๋ก ์ํค ์ค๋
ธ(, 1982๋
5์ 22์ผ~)์ ์ผ๋ณธ๊ณ ๋ฏธ๊ตญ์ธ์ผ๋ก (...TRUNCATED) | [[[0,0,2],[8,17,19],[10,0,1],[10,60,60],[13,0,0],[14,18,18],[15,43,43]],[[0,14,17],[0,19,19],[4,2,2](...TRUNCATED) | 36262.json |
[{"detokenized_text":"2013๋
NC ๋ค์ด๋
ธ์ค ์์ฆ์ KBO ๋ฆฌ๊ทธ์์ ๊น๊ฒฝ๋ฌธ ๊ฐ๋
์ด ์ด๋(...TRUNCATED) | [[[0,0,4],[0,16,17],[0,19,21],[3,4,5],[17,15,16],[24,17,18]],[[5,17,18],[13,10,12],[21,10,12]],[[6,0(...TRUNCATED) | 963277.json |
End of preview. Expand
in Data Studio
Detokenizes the ECMT dataset using the kiwipiepy library.
The script used to convert the dataset is here: https://gist.github.com/ianporada/a246ebf59696c6e16e1bc1873bc182a4
The library version used is kiwipiepy==0.20.3 / kiwipiepy_model==0.20.0
The dataset schema is as follows:
{
# the original document filename
"doc_id": str,
# a list of sentences in the document
"sentences": [
"index": int, # the index of the sentence within the document
"detokenized_text": str, # a single string representing the text of the sentence (detokenized using kiwipiepy)
# a list of token positions which are tuples of the form (start, end)
# the token at index i corresponds to characters detokenized_text[start:end]
"token_positions": [(int, int), ...],
# the original values of each token from the dataset
"tokens": [{"index": int, "text": str, "xpos": str}, ...],
],
# a list of coreference chains, each chain is a list of mentions
# each mention is a list of form [sentence_index, start_token_index, end_token_index] where token indices are inclusive indices within the given sentence
"coref_chains": [[[int, int, int], ...], ...]
}
- Downloads last month
- 7