[PYTHON] 100 language processing knocks (2020): 27

"""
27.Removal of internal links
In addition to the 26 processes, remove MediaWiki's internal link markup from the template value and convert it to text (reference):Markup quick reference table).
"""

import json
import re

import utils


def get_uk_text(path):
    with open(path) as f:
        for line in f:
            line_data = json.loads(line)
            if line_data["title"] == "England":
                data = line_data
                break
    return data["text"]


def get_basic_info(string: str) -> str:
    """Get basic information section
    """
    pattern = re.compile(
        r"""
            ^\{\{Basic information.*?$   # '{{Basic information'Lines starting with
            (.*?)       #Capture target, any 0 or more characters, non-greedy
            ^\}\}$      # '}}'Lines ending with
        """,
        re.MULTILINE | re.DOTALL | re.VERBOSE,
    )

    return re.findall(pattern, string)[0]


def get_content(string: str) -> list:
    r"""
    https://docs.python.org/3/library/re.html#regular-expression-syntax

    RE:
        - re.X (re.VERBOSE)     Allow us add command to explain the regular expression
        - re.M (re.MULTILINE)   Apply match to each line. If not specified, only match the first line.
        - re.S (re.DOTALL)      Allow to recognize '\n'
        - ^\|       String begin with |
        - ?         Causes the resulting RE to match 0 or 1 repetitions

        - *?        The '*' qualifier is greedy.
                    Adding ? after the qualifier makes it perform the match in non-greedy or minimal fashion; as few characters as possible will be matched.
                    e.g. <.*> is matched against '<a> b <c>'
                    e.g. <.*?> will match only '<a>'

        - (...)     Matches whatever regular expression is inside the parentheses,
        - (?=...)   Matches if ... matches next, but doesn’t consume any of the string. This is called a lookahead assertion.
                    For example, Isaac (?=Asimov) will match 'Isaac ' only if it’s followed by 'Asimov'.
        - (?:...)   A non-capturing version of regular parentheses.

    Input:
        - '|National emblem link=([[British coat of arms|National emblem]])'
    Return:
        - {'National emblem link': '([[British coat of arms|National emblem]])'}
    """
    pattern = re.compile(
        r"""
            ^\|         # '|'Lines starting with
            (.+?)       #Capture target (field name), any one or more characters, non-greedy
            \s*         #0 or more whitespace characters
            =
            \s*         #0 or more whitespace characters
            (.+?)       #Capture target (value), any one or more characters, non-greedy
            (?:         #Start a group that is not captured
                (?=\n\|)    #new line+'|'Before (Affirmative look-ahead)
                |           #Or
                (?=\n$)     #new line+Before the end (affirmative look-ahead)
            )           #Group end
            """,
        re.MULTILINE | re.DOTALL | re.VERBOSE,
    )
    result = re.findall(pattern, string)
    return {k: v for k, v in result}  # dict is ordered when using python 3.7


def remove_markup(target: str) -> str:
    # ans26
    # Remvoe highlight markup
    #Using the "Great Britain" to replace "'''Great britain'''」
    pattern1 = re.compile(
        r"""
            (\'{2,5})   #2-5'(Start of markup)
            (.*?)       #Any one or more characters (target character string)
            (\1)        #Same as the first capture (end of markup)
        """,
        re.MULTILINE | re.VERBOSE,
    )
    target = re.sub(pattern1, r"\2", target)

    # and27
    # Remove link markup
    """
    [[London]] -> London
    [[British Prime Minister|Prime Minister]] -> Prime Minister
    [[File:Royal Coat of Arms of the United Kingdom.svg|85px|British coat of arms]] -> British coat of arms

    [] -> Used to indicate a set of characters. [(+*)] will match any of the literal characters '(', '+', '*', or ')'.
    """
    pattern2 = re.compile(
        r"""
            \[\[        # '[['(Start of markup)
            (?:         #Start a group that is not captured
                [^|]*?  # '|'0 or more characters other than, non-greedy
                \|      # '|'
            )*?          #Group is 0 or more, non-greedy
            ([^|]*?)    #Capture target,'|'Other than 0 characters, non-greedy (character string to be displayed)
            \]\]        # ']]'(End of markup)
        """,
        re.MULTILINE | re.VERBOSE,
    )
    target = re.sub(pattern2, r"\1", target)

    return target


# and20
uk_text = get_uk_text("jawiki-country.json")  # See uk_text.txt

# ans25
basic_info = get_basic_info(uk_text)
fields = get_content(basic_info)  # See 25_en_basic_info.json

# ans26, and27
result = {k: remove_markup(v) for k, v in fields.items()}  # See 26_no_markup.json
# "National emblem image": "[[File:Royal Coat of Arms of the United Kingdom.svg|85px|British coat of arms]]",
print(result["National emblem image"])
utils.save_json(result, "27_no_link.json")


# Test for 27
data = [
    ("[[London]]", "London"),
    ("[[British Prime Minister|Prime Minister]]", "Prime Minister"),
    ("[[File:Royal Coat of Arms of the United Kingdom.svg|85px|British coat of arms]]", "British coat of arms"),
    (
        "{{lang|fr|[[Dieu et mon droit]]}}<br />([[French]]:[[Dieu et mon droit|God and my rights]])",
        "{{lang|fr|Dieu et mon droit}}<br />(French:God and my rights)",
    ),
]


pattern2 = re.compile(
    r"""
            \[\[        # '[['(Start of markup)
            (?:         #Start a group that is not captured
                [^|]*?  # '|'0 or more characters other than, non-greedy
                \|      # '|'
            )*?          #Group is 0 or more, non-greedy
            ([^|]*?)    #Capture target,'|'Other than 0 characters, non-greedy (character string to be displayed)
            \]\]        # ']]'(End of markup)
        """,
    re.MULTILINE | re.VERBOSE,
)

for target, answer in data:
    assert answer == re.sub(pattern2, r"\1", target)

Recommended Posts

100 language processing knocks 03 ~ 05
100 language processing knocks (2020): 40
100 language processing knocks (2020): 32
100 language processing knocks (2020): 35
100 language processing knocks (2020): 47
100 language processing knocks (2020): 39
100 language processing knocks (2020): 26
100 language processing knocks (2020): 34
100 language processing knocks (2020): 42
100 language processing knocks (2020): 49
100 language processing knocks 06 ~ 09
100 language processing knocks (2020): 43
100 language processing knocks (2020): 24
100 language processing knocks (2020): 10-19
100 language processing knocks (2020): 30
100 language processing knocks (2020): 31
100 language processing knocks (2020): 48
100 language processing knocks (2020): 44
100 language processing knocks (2020): 41
100 language processing knocks (2020): 37
100 language processing knocks (2020): 25
100 language processing knocks (2020): 23
100 language processing knocks (2020): 33
100 language processing knocks (2020): 20
100 language processing knocks (2020): 27
100 language processing knocks (2020): 46
100 language processing knocks (2020): 21
100 language processing knocks (2020): 36
100 amateur language processing knocks: 41
100 amateur language processing knocks: 71
100 amateur language processing knocks: 56
100 amateur language processing knocks: 50
100 amateur language processing knocks: 59
100 amateur language processing knocks: 70
100 amateur language processing knocks: 62
100 amateur language processing knocks: 60
100 amateur language processing knocks: 30
100 amateur language processing knocks: 06
100 amateur language processing knocks: 84
100 amateur language processing knocks: 81
100 amateur language processing knocks: 33
100 amateur language processing knocks: 46
100 amateur language processing knocks: 88
100 amateur language processing knocks: 89
100 amateur language processing knocks: 40
100 amateur language processing knocks: 45
100 amateur language processing knocks: 43
100 amateur language processing knocks: 55
100 amateur language processing knocks: 22
100 amateur language processing knocks: 61
100 amateur language processing knocks: 94
100 amateur language processing knocks: 54
100 amateur language processing knocks: 04
100 amateur language processing knocks: 63
100 amateur language processing knocks: 78
100 amateur language processing knocks: 12
100 amateur language processing knocks: 14
100 amateur language processing knocks: 08
100 amateur language processing knocks: 42
100 language processing knocks ~ Chapter 1
100 amateur language processing knocks: 19