Python yêu cầu ký tự đặc biệt

Mã nguồn cho các yêu cầu. đồ dùng

"""
requests.utils
~~~~~~~~~~~~~~

This module provides utility functions that are used within Requests
that are also useful for external consumption.
"""

import codecs
import contextlib
import io
import os
import re
import socket
import struct
import sys
import tempfile
import warnings
import zipfile
from collections import OrderedDict

from urllib3.util import make_headers, parse_url

from . import certs
from .__version__ import __version__

# to_native_string is unused here, but imported here for backwards compatibility
from ._internal_utils import HEADER_VALIDATORS, to_native_string  # noqa: F401
from .compat import [
    Mapping,
    basestring,
    bytes,
    getproxies,
    getproxies_environment,
    integer_types,
]
from .compat import parse_http_list as _parse_list_header
from .compat import [
    proxy_bypass,
    proxy_bypass_environment,
    quote,
    str,
    unquote,
    urlparse,
    urlunparse,
]
from .cookies import cookiejar_from_dict
from .exceptions import [
    FileModeWarning,
    InvalidHeader,
    InvalidURL,
    UnrewindableBodyError,
]
from .structures import CaseInsensitiveDict

NETRC_FILES = [".netrc", "_netrc"]

DEFAULT_CA_BUNDLE_PATH = certs.where[]

DEFAULT_PORTS = {"http": 80, "https": 443}

# Ensure that ', ' is used to preserve previous delimiter behavior.
DEFAULT_ACCEPT_ENCODING = ", ".join[
    re.split[r",\s*", make_headers[accept_encoding=True]["accept-encoding"]]
]


if sys.platform == "win32":
    # provide a proxy_bypass version on Windows without DNS lookups

    def proxy_bypass_registry[host]:
        try:
            import winreg
        except ImportError:
            return False

        try:
            internetSettings = winreg.OpenKey[
                winreg.HKEY_CURRENT_USER,
                r"Software\Microsoft\Windows\CurrentVersion\Internet Settings",
            ]
            # ProxyEnable could be REG_SZ or REG_DWORD, normalizing it
            proxyEnable = int[winreg.QueryValueEx[internetSettings, "ProxyEnable"][0]]
            # ProxyOverride is almost always a string
            proxyOverride = winreg.QueryValueEx[internetSettings, "ProxyOverride"][0]
        except [OSError, ValueError]:
            return False
        if not proxyEnable or not proxyOverride:
            return False

        # make a check value list from the registry entry: replace the
        # '' string by the localhost entry and the corresponding
        # canonical entry.
        proxyOverride = proxyOverride.split[";"]
        # now check if we match one of the registry values.
        for test in proxyOverride:
            if test == "":
                if "." not in host:
                    return True
            test = test.replace[".", r"\."]  # mask dots
            test = test.replace["*", r".*"]  # change glob sequence
            test = test.replace["?", r"."]  # change glob char
            if re.match[test, host, re.I]:
                return True
        return False

    def proxy_bypass[host]:  # noqa
        """Return True, if the host should be bypassed.

        Checks proxy settings gathered from the environment, if specified,
        or the registry.
        """
        if getproxies_environment[]:
            return proxy_bypass_environment[host]
        else:
            return proxy_bypass_registry[host]


def dict_to_sequence[d]:
    """Returns an internal sequence dictionary update."""

    if hasattr[d, "items"]:
        d = d.items[]

    return d


def super_len[o]:
    total_length = None
    current_position = 0

    if hasattr[o, "__len__"]:
        total_length = len[o]

    elif hasattr[o, "len"]:
        total_length = o.len

    elif hasattr[o, "fileno"]:
        try:
            fileno = o.fileno[]
        except [io.UnsupportedOperation, AttributeError]:
            # AttributeError is a surprising exception, seeing as how we've just checked
            # that `hasattr[o, 'fileno']`.  It happens for objects obtained via
            # `Tarfile.extractfile[]`, per issue 5229.
            pass
        else:
            total_length = os.fstat[fileno].st_size

            # Having used fstat to determine the file length, we need to
            # confirm that this file was opened up in binary mode.
            if "b" not in o.mode:
                warnings.warn[
                    [
                        "Requests has determined the content-length for this "
                        "request using the binary size of the file: however, the "
                        "file has been opened in text mode [i.e. without the 'b' "
                        "flag in the mode]. This may lead to an incorrect "
                        "content-length. In Requests 3.0, support will be removed "
                        "for files in text mode."
                    ],
                    FileModeWarning,
                ]

    if hasattr[o, "tell"]:
        try:
            current_position = o.tell[]
        except OSError:
            # This can happen in some weird situations, such as when the file
            # is actually a special file descriptor like stdin. In this
            # instance, we don't know what the length is, so set it to zero and
            # let requests chunk it instead.
            if total_length is not None:
                current_position = total_length
        else:
            if hasattr[o, "seek"] and total_length is None:
                # StringIO and BytesIO have seek but no usable fileno
                try:
                    # seek to end of file
                    o.seek[0, 2]
                    total_length = o.tell[]

                    # seek back to current position to support
                    # partially read file-like objects
                    o.seek[current_position or 0]
                except OSError:
                    total_length = 0

    if total_length is None:
        total_length = 0

    return max[0, total_length - current_position]


def get_netrc_auth[url, raise_errors=False]:
    """Returns the Requests tuple auth for a given url from netrc."""

    netrc_file = os.environ.get["NETRC"]
    if netrc_file is not None:
        netrc_locations = [netrc_file,]
    else:
        netrc_locations = [f"~/{f}" for f in NETRC_FILES]

    try:
        from netrc import NetrcParseError, netrc

        netrc_path = None

        for f in netrc_locations:
            try:
                loc = os.path.expanduser[f]
            except KeyError:
                # os.path.expanduser can fail when $HOME is undefined and
                # getpwuid fails. See //bugs.python.org/issue20164 &
                # //github.com/psf/requests/issues/1846
                return

            if os.path.exists[loc]:
                netrc_path = loc
                break

        # Abort early if there isn't one.
        if netrc_path is None:
            return

        ri = urlparse[url]

        # Strip port numbers from netloc. This weird `if...encode`` dance is
        # used for Python 3.2, which doesn't support unicode literals.
        splitstr = b":"
        if isinstance[url, str]:
            splitstr = splitstr.decode["ascii"]
        host = ri.netloc.split[splitstr][0]

        try:
            _netrc = netrc[netrc_path].authenticators[host]
            if _netrc:
                # Return with login / password
                login_i = 0 if _netrc[0] else 1
                return [_netrc[login_i], _netrc[2]]
        except [NetrcParseError, OSError]:
            # If there was a parsing error or a permissions issue reading the file,
            # we'll just skip netrc auth unless explicitly asked to raise errors.
            if raise_errors:
                raise

    # App Engine hackiness.
    except [ImportError, AttributeError]:
        pass


def guess_filename[obj]:
    """Tries to guess the filename of the given object."""
    name = getattr[obj, "name", None]
    if name and isinstance[name, basestring] and name[0] != "":
        return os.path.basename[name]


def extract_zipped_paths[path]:
    """Replace nonexistent paths that look like they refer to a member of a zip
    archive with the location of an extracted copy of the target, or else
    just return the provided path unchanged.
    """
    if os.path.exists[path]:
        # this is already a valid path, no need to do anything further
        return path

    # find the first valid part of the provided path and treat that as a zip archive
    # assume the rest of the path is the name of a member in the archive
    archive, member = os.path.split[path]
    while archive and not os.path.exists[archive]:
        archive, prefix = os.path.split[archive]
        if not prefix:
            # If we don't check for an empty prefix after the split [in other words, archive remains unchanged after the split],
            # we _can_ end up in an infinite loop on a rare corner case affecting a small number of users
            break
        member = "/".join[[prefix, member]]

    if not zipfile.is_zipfile[archive]:
        return path

    zip_file = zipfile.ZipFile[archive]
    if member not in zip_file.namelist[]:
        return path

    # we have a valid zip archive and a valid member of that archive
    tmp = tempfile.gettempdir[]
    extracted_path = os.path.join[tmp, member.split["/"][-1]]
    if not os.path.exists[extracted_path]:
        # use read + write to avoid the creating nested folders, we only want the file, avoids mkdir racing condition
        with atomic_open[extracted_path] as file_handler:
            file_handler.write[zip_file.read[member]]
    return extracted_path


@contextlib.contextmanager
def atomic_open[filename]:
    """Write a file to the disk in an atomic fashion"""
    tmp_descriptor, tmp_name = tempfile.mkstemp[dir=os.path.dirname[filename]]
    try:
        with os.fdopen[tmp_descriptor, "wb"] as tmp_handler:
            yield tmp_handler
        os.replace[tmp_name, filename]
    except BaseException:
        os.remove[tmp_name]
        raise


def from_key_val_list[value]:
    """Take an object and test to see if it can be represented as a
    dictionary. Unless it can not be represented as such, return an
    OrderedDict, e.g.,

    ::

        >>> from_key_val_list[[['key', 'val']]]
        OrderedDict[[['key', 'val']]]
        >>> from_key_val_list['string']
        Traceback [most recent call last]:
        ...
        ValueError: cannot encode objects that are not 2-tuples
        >>> from_key_val_list[{'key': 'val'}]
        OrderedDict[[['key', 'val']]]

    :rtype: OrderedDict
    """
    if value is None:
        return None

    if isinstance[value, [str, bytes, bool, int]]:
        raise ValueError["cannot encode objects that are not 2-tuples"]

    return OrderedDict[value]


def to_key_val_list[value]:
    """Take an object and test to see if it can be represented as a
    dictionary. If it can be, return a list of tuples, e.g.,

    ::

        >>> to_key_val_list[[['key', 'val']]]
        [['key', 'val']]
        >>> to_key_val_list[{'key': 'val'}]
        [['key', 'val']]
        >>> to_key_val_list['string']
        Traceback [most recent call last]:
        ...
        ValueError: cannot encode objects that are not 2-tuples

    :rtype: list
    """
    if value is None:
        return None

    if isinstance[value, [str, bytes, bool, int]]:
        raise ValueError["cannot encode objects that are not 2-tuples"]

    if isinstance[value, Mapping]:
        value = value.items[]

    return list[value]


# From mitsuhiko/werkzeug [used with permission].
def parse_list_header[value]:
    """Parse lists as described by RFC 2068 Section 2.

    In particular, parse comma-separated lists where the elements of
    the list may include quoted-strings.  A quoted-string could
    contain a comma.  A non-quoted string could have quotes in the
    middle.  Quotes are removed automatically after parsing.

    It basically works like :func:`parse_set_header` just that items
    may appear multiple times and case sensitivity is preserved.

    The return value is a standard :class:`list`:

    >>> parse_list_header['token, "quoted value"']
    ['token', 'quoted value']

    To create a header from the :class:`list` again, use the
    :func:`dump_header` function.

    :param value: a string with a list header.
    :return: :class:`list`
    :rtype: list
    """
    result = []
    for item in _parse_list_header[value]:
        if item[:1] == item[-1:] == '"':
            item = unquote_header_value[item[1:-1]]
        result.append[item]
    return result


# From mitsuhiko/werkzeug [used with permission].
def parse_dict_header[value]:
    """Parse lists of key, value pairs as described by RFC 2068 Section 2 and
    convert them into a python dict:

    >>> d = parse_dict_header['foo="is a fish", bar="as well"']
    >>> type[d] is dict
    True
    >>> sorted[d.items[]]
    [['bar', 'as well'], ['foo', 'is a fish']]

    If there is no value for a key it will be `None`:

    >>> parse_dict_header['key_without_value']
    {'key_without_value': None}

    To create a header from the :class:`dict` again, use the
    :func:`dump_header` function.

    :param value: a string with a dict header.
    :return: :class:`dict`
    :rtype: dict
    """
    result = {}
    for item in _parse_list_header[value]:
        if "=" not in item:
            result[item] = None
            continue
        name, value = item.split["=", 1]
        if value[:1] == value[-1:] == '"':
            value = unquote_header_value[value[1:-1]]
        result[name] = value
    return result


# From mitsuhiko/werkzeug [used with permission].
def unquote_header_value[value, is_filename=False]:
    r"""Unquotes a header value.  [Reversal of :func:`quote_header_value`].
    This does not use the real unquoting but what browsers are actually
    using for quoting.

    :param value: the header value to unquote.
    :rtype: str
    """
    if value and value[0] == value[-1] == '"':
        # this is not the real unquoting, but fixing this so that the
        # RFC is met will result in bugs with internet explorer and
        # probably some other browsers as well.  IE for example is
        # uploading files with "C:\foo\bar.txt" as filename
        value = value[1:-1]

        # if this is a filename and the starting characters look like
        # a UNC path, then just return the value without quotes.  Using the
        # replace sequence below on a UNC path has the effect of turning
        # the leading double slash into a single slash and then
        # _fix_ie_filename[] doesn't work correctly.  See #458.
        if not is_filename or value[:2] != "\\\\":
            return value.replace["\\\\", "\\"].replace['\\"', '"']
    return value


[docs]def dict_from_cookiejar[cj]: """Returns a key/value dictionary from a CookieJar. :param cj: CookieJar object to extract cookies from. :rtype: dict """ cookie_dict = {} for cookie in cj: cookie_dict[cookie.name] = cookie.value return cookie_dict

[docs]def add_dict_to_cookiejar[cj, cookie_dict]: """Returns a CookieJar from a key/value dictionary. :param cj: CookieJar to insert cookies into. :param cookie_dict: Dict of key/values to insert into CookieJar. :rtype: CookieJar """ return cookiejar_from_dict[cookie_dict, cj]

[docs]def get_encodings_from_content[content]: """Returns encodings from given content string. :param content: bytestring to extract encodings from. """ warnings.warn[ [ "In requests 3.0, get_encodings_from_content will be removed. For " "more information, please see the discussion on issue #2266. [This" " warning should only appear once.]" ], DeprecationWarning, ] charset_re = re.compile[r']'] return [ charset_re.findall[content] + pragma_re.findall[content] + xml_re.findall[content] ]

def _parse_content_type_header[header]: """Returns content type and parameters from given header :param header: string :return: tuple containing content type and dictionary of parameters """ tokens = header.split[";"] content_type, params = tokens[0].strip[], tokens[1:] params_dict = {} items_to_strip = "\"' " for param in params: param = param.strip[] if param: key, value = param, True index_of_equals = param.find["="] if index_of_equals != -1: key = param[:index_of_equals].strip[items_to_strip] value = param[index_of_equals + 1 :].strip[items_to_strip] params_dict[key.lower[]] = value return content_type, params_dict

[docs]def get_encoding_from_headers[headers]: """Returns encodings from given HTTP Header Dict. :param headers: dictionary to extract encoding from. :rtype: str """ content_type = headers.get["content-type"] if not content_type: return None content_type, params = _parse_content_type_header[content_type] if "charset" in params: return params["charset"].strip["'\""] if "text" in content_type: return "ISO-8859-1" if "application/json" in content_type: # Assume UTF-8 based on RFC 4627: //www.ietf.org/rfc/rfc4627.txt since the charset was unset return "utf-8"

def stream_decode_response_unicode[iterator, r]: """Stream decodes an iterator.""" if r.encoding is None: yield from iterator return decoder = codecs.getincrementaldecoder[r.encoding][errors="replace"] for chunk in iterator: rv = decoder.decode[chunk] if rv: yield rv rv = decoder.decode[b"", final=True] if rv: yield rv def iter_slices[string, slice_length]: """Iterate over slices of a string.""" pos = 0 if slice_length is None or slice_length

Chủ Đề