3
d:              	   @   s   d Z ddlZddlmZ ddlZddlmZ dZdZdZ	eedd	d
ddddf	Z
e
d e	fe
dd ZejdZejeejejB ejB ZejdZejdZd#ddZf ddfddZG dd deZdd Zdd Zd$d!d"ZdS )%a  
Twitter-aware tokenizer, designed to be flexible and easy to adapt to new
domains and tasks. The basic logic is this:

1. The tuple REGEXPS defines a list of regular expression
   strings.

2. The REGEXPS strings are put, in order, into a compiled
   regular expression object called WORD_RE, under the TweetTokenizer
   class.

3. The tokenization is done by WORD_RE.findall(s), where s is the
   user-supplied string, inside the tokenize() method of the class
   TweetTokenizer.

4. When instantiating Tokenizer objects, there are several options:
    * preserve_case. By default, it is set to True. If it is set to
      False, then the tokenizer will downcase everything except for
      emoticons.
    * reduce_len. By default, it is set to False. It specifies whether
      to replace repeated character sequences of length 3 or greater
      with sequences of length 3.
    * strip_handles. By default, it is set to False. It specifies
      whether to remove Twitter handles of text used in the
      `tokenize` method.
    * match_phone_numbers. By default, it is set to True. It indicates
      whether the `tokenize` method should look for phone numbers.
    N)List)
TokenizerIac  
    (?:
      [<>]?
      [:;=8]                     # eyes
      [\-o\*\']?                 # optional nose
      [\)\]\(\[dDpP/\:\}\{@\|\\] # mouth
      |
      [\)\]\(\[dDpP/\:\}\{@\|\\] # mouth
      [\-o\*\']?                 # optional nose
      [:;=8]                     # eyes
      [<>]?
      |
      </?3                       # heart
    )u  			# Capture 1: entire matched URL
  (?:
  https?:				# URL protocol and colon
    (?:
      /{1,3}				# 1-3 slashes
      |					#   or
      [a-z0-9%]				# Single letter or digit or '%'
                                       # (Trying not to match e.g. "URI::Escape")
    )
    |					#   or
                                       # looks like domain name followed by a slash:
    [a-z0-9.\-]+[.]
    (?:[a-z]{2,13})
    /
  )
  (?:					# One or more:
    [^\s()<>{}\[\]]+			# Run of non-space, non-()<>{}[]
    |					#   or
    \([^\s()]*?\([^\s()]+\)[^\s()]*?\) # balanced parens, one level deep: (...(...)...)
    |
    \([^\s]+?\)				# balanced parens, non-recursive: (...)
  )+
  (?:					# End with:
    \([^\s()]*?\([^\s()]+\)[^\s()]*?\) # balanced parens, one level deep: (...(...)...)
    |
    \([^\s]+?\)				# balanced parens, non-recursive: (...)
    |					#   or
    [^\s`!()\[\]{};:'".,<>?«»“”‘’]	# not a space or one of these punct chars
  )
  |					# OR, the following to match naked domains:
  (?:
  	(?<!@)			        # not preceded by a @, avoid matching foo@_gmail.com_
    [a-z0-9]+
    (?:[.\-][a-z0-9]+)*
    [.]
    (?:[a-z]{2,13})
    \b
    /?
    (?!@)			        # not succeeded by a @,
                            # avoid matching "foo.na" in "foo.na@example.com"
  )
a	  
    (?:
      (?:            # (international)
        \+?[01]
        [ *\-.\)]*
      )?
      (?:            # (area code)
        [\(]?
        \d{3}
        [ *\-.\)]*
      )?
      \d{3}          # exchange
      [ *\-.\)]*
      \d{4}          # base
    )z	<[^>\s]+>z[\-]+>|<[\-]+z(?:@[\w_]+)z(?:\#+[\w_]+[\w\'_\-]*[\w_]+)z#[\w.+-]+@[\w-]+\.(?:[\w-]\.?)+[\w-]uR   .(?:
        [🏻-🏿]?(?:‍.[🏻-🏿]?)+
        |
        [🏻-🏿]
    )a  
    (?:[^\W\d_](?:[^\W\d_]|['\-_])+[^\W\d_]) # Words with apostrophes or dashes.
    |
    (?:[+\-]?\d+[,/.:-]\d+[+\-]?)  # Numbers, including fractions, decimals.
    |
    (?:[\w_]+)                     # Words without apostrophes or dashes.
    |
    (?:\.(?:\s*\.){1,})            # Ellipsis dots.
    |
    (?:\S)                         # Everything else that isn't whitespace.
       z([^a-zA-Z0-9])\1{3,}z&(#?(x?))([^&;\s]+);zZ(?<![A-Za-z0-9_!@#\$%&*])@(([A-Za-z0-9_]){15}(?!@)|([A-Za-z0-9_]){1,14}(?![A-Za-z0-9_]*@))strictc             C   s&   |d krd}t | tr"| j||S | S )Nzutf-8)
isinstancebytesdecode)textencodingerrors r   4/tmp/pip-build-v9q4h5k9/nltk/nltk/tokenize/casual.py_str_to_unicode   s
    
r   Tzutf-8c                s     fdd}t j|t| |S )u  
    Remove entities from text by converting them to their
    corresponding unicode character.

    :param text: a unicode string or a byte string encoded in the given
    `encoding` (which defaults to 'utf-8').

    :param list keep:  list of entity names which should not be replaced.    This supports both numeric entities (``&#nnnn;`` and ``&#hhhh;``)
    and named entities (such as ``&nbsp;`` or ``&gt;``).

    :param bool remove_illegal: If `True`, entities that can't be converted are    removed. Otherwise, entities that can't be converted are kept "as
    is".

    :returns: A unicode string with the entities removed.

    See https://github.com/scrapy/w3lib/blob/master/w3lib/html.py

        >>> from nltk.tokenize.casual import _replace_html_entities
        >>> _replace_html_entities(b'Price: &pound;100')
        'Price: \xa3100'
        >>> print(_replace_html_entities(b'Price: &pound;100'))
        Price: £100
        >>>
    c                s   | j d}| j dr|yL| j dr,t|d}n
t|d}d|  koHdkn  r^t|fjdS W q tk
rx   d }Y qX n | kr| j d	S tjjj|}|d k	ryt	|S  tt
fk
r   Y nX rd
S | j d	S )N   r         
         cp1252r    )groupintr   r   
ValueErrorhtmlentitiesname2codepointgetchrOverflowError)matchZentity_bodynumber)keepremove_illegalr   r   _convert_entity   s&    




z/_replace_html_entities.<locals>._convert_entity)ENT_REsubr   )r	   r"   r#   r
   r$   r   )r"   r#   r   _replace_html_entities   s    r'   c               @   sZ   e Zd ZdZdZdZdddZeee ddd	Z	e
d
dddZe
d
dddZdS )TweetTokenizera  
    Tokenizer for tweets.

        >>> from nltk.tokenize import TweetTokenizer
        >>> tknzr = TweetTokenizer()
        >>> s0 = "This is a cooool #dummysmiley: :-) :-P <3 and some arrows < > -> <--"
        >>> tknzr.tokenize(s0)
        ['This', 'is', 'a', 'cooool', '#dummysmiley', ':', ':-)', ':-P', '<3'
        , 'and', 'some', 'arrows', '<', '>', '->', '<--']

    Examples using `strip_handles` and `reduce_len parameters`:

        >>> tknzr = TweetTokenizer(strip_handles=True, reduce_len=True)
        >>> s1 = '@remy: This is waaaaayyyy too much for you!!!!!!'
        >>> tknzr.tokenize(s1)
        [':', 'This', 'is', 'waaayyy', 'too', 'much', 'for', 'you', '!', '!', '!']
    NTFc             C   s   || _ || _|| _|| _dS )ae  
        Create a `TweetTokenizer` instance with settings for use in the `tokenize` method.

        :param preserve_case: Flag indicating whether to preserve the casing (capitalisation)
            of text used in the `tokenize` method. Defaults to True.
        :type preserve_case: bool
        :param reduce_len: Flag indicating whether to replace repeated character sequences
            of length 3 or greater with sequences of length 3. Defaults to False.
        :type reduce_len: bool
        :param strip_handles: Flag indicating whether to remove Twitter handles of text used
            in the `tokenize` method. Defaults to False.
        :type strip_handles: bool
        :param match_phone_numbers: Flag indicating whether the `tokenize` method should look
            for phone numbers. Defaults to True.
        :type match_phone_numbers: bool
        N)preserve_case
reduce_lenstrip_handlesmatch_phone_numbers)selfr)   r*   r+   r,   r   r   r   __init__3  s    zTweetTokenizer.__init__)r	   returnc             C   sl   t |}| jrt|}| jr$t|}tjd|}| jrD| jj	|}n| j
j	|}| jshttdd |}|S )zTokenize the input text.

        :param text: str
        :rtype: list(str)
        :return: a tokenized list of strings; joining this list returns        the original string if `preserve_case=False`.
        z\1\1\1c             S   s   t j| r| S | j S )N)EMOTICON_REsearchlower)xr   r   r   <lambda>i  s    z)TweetTokenizer.tokenize.<locals>.<lambda>)r'   r+   remove_handlesr*   reduce_lengtheningHANG_REr&   r,   PHONE_WORD_REfindallWORD_REr)   listmap)r-   r	   Z	safe_textwordsr   r   r   tokenizeO  s    	zTweetTokenizer.tokenizezregex.Pattern)r/   c             C   sB   t | js8tjddjt dtjtjB tjB t | _t | jS )zCore TweetTokenizer regex(|))	type_WORD_REregexcompilejoinREGEXPSVERBOSEIUNICODE)r-   r   r   r   r:   m  s
    
zTweetTokenizer.WORD_REc             C   sB   t | js8tjddjt dtjtjB tjB t | _t | jS )z#Secondary core TweetTokenizer regexr?   r@   rA   )	rB   _PHONE_WORD_RErD   rE   rF   REGEXPS_PHONErH   rI   rJ   )r-   r   r   r   r8   x  s
    
zTweetTokenizer.PHONE_WORD_RE)TFFT)__name__
__module____qualname____doc__rC   rK   r.   strr   r>   propertyr:   r8   r   r   r   r   r(     s      

r(   c             C   s   t jd}|jd| S )ze
    Replace repeated character sequences of length 3 or greater with sequences
    of length 3.
    z	(.)\1{2,}z\1\1\1)rD   rE   r&   )r	   patternr   r   r   r6     s    
r6   c             C   s   t jd| S )z4
    Remove Twitter username handles from text.
     )
HANDLES_REr&   )r	   r   r   r   r5     s    r5   Fc             C   s   t ||||dj| S )z:
    Convenience function for wrapping the tokenizer.
    )r)   r*   r+   r,   )r(   r>   )r	   r)   r*   r+   r,   r   r   r   casual_tokenize  s    
rV   )Nr   )TFFT)rP   r   Ztypingr   rD   Znltk.tokenize.apir   Z	EMOTICONSZURLSZPHONE_REGEXrG   rL   rE   r7   rH   rI   rJ   r0   r%   rU   r   r'   r(   r6   r5   rV   r   r   r   r   <module>)   s>   .



>p	   