summaryrefslogtreecommitdiff
path: root/lib/python2.7/httplib.py
diff options
context:
space:
mode:
authorrahulp132020-03-17 14:55:41 +0530
committerrahulp132020-03-17 14:55:41 +0530
commit296443137f4288cb030e92859ccfbe3204bc1088 (patch)
treeca4798c2da1e7244edc3bc108d81b462b537aea2 /lib/python2.7/httplib.py
parent0db48f6533517ecebfd9f0693f89deca28408b76 (diff)
downloadKiCad-eSim-296443137f4288cb030e92859ccfbe3204bc1088.tar.gz
KiCad-eSim-296443137f4288cb030e92859ccfbe3204bc1088.tar.bz2
KiCad-eSim-296443137f4288cb030e92859ccfbe3204bc1088.zip
initial commit
Diffstat (limited to 'lib/python2.7/httplib.py')
-rw-r--r--lib/python2.7/httplib.py1430
1 files changed, 1430 insertions, 0 deletions
diff --git a/lib/python2.7/httplib.py b/lib/python2.7/httplib.py
new file mode 100644
index 0000000..f3bb22c
--- /dev/null
+++ b/lib/python2.7/httplib.py
@@ -0,0 +1,1430 @@
+r"""HTTP/1.1 client library
+
+<intro stuff goes here>
+<other stuff, too>
+
+HTTPConnection goes through a number of "states", which define when a client
+may legally make another request or fetch the response for a particular
+request. This diagram details these state transitions:
+
+ (null)
+ |
+ | HTTPConnection()
+ v
+ Idle
+ |
+ | putrequest()
+ v
+ Request-started
+ |
+ | ( putheader() )* endheaders()
+ v
+ Request-sent
+ |
+ | response = getresponse()
+ v
+ Unread-response [Response-headers-read]
+ |\____________________
+ | |
+ | response.read() | putrequest()
+ v v
+ Idle Req-started-unread-response
+ ______/|
+ / |
+ response.read() | | ( putheader() )* endheaders()
+ v v
+ Request-started Req-sent-unread-response
+ |
+ | response.read()
+ v
+ Request-sent
+
+This diagram presents the following rules:
+ -- a second request may not be started until {response-headers-read}
+ -- a response [object] cannot be retrieved until {request-sent}
+ -- there is no differentiation between an unread response body and a
+ partially read response body
+
+Note: this enforcement is applied by the HTTPConnection class. The
+ HTTPResponse class does not enforce this state machine, which
+ implies sophisticated clients may accelerate the request/response
+ pipeline. Caution should be taken, though: accelerating the states
+ beyond the above pattern may imply knowledge of the server's
+ connection-close behavior for certain requests. For example, it
+ is impossible to tell whether the server will close the connection
+ UNTIL the response headers have been read; this means that further
+ requests cannot be placed into the pipeline until it is known that
+ the server will NOT be closing the connection.
+
+Logical State __state __response
+------------- ------- ----------
+Idle _CS_IDLE None
+Request-started _CS_REQ_STARTED None
+Request-sent _CS_REQ_SENT None
+Unread-response _CS_IDLE <response_class>
+Req-started-unread-response _CS_REQ_STARTED <response_class>
+Req-sent-unread-response _CS_REQ_SENT <response_class>
+"""
+
+from array import array
+import os
+import re
+import socket
+from sys import py3kwarning
+from urlparse import urlsplit
+import warnings
+with warnings.catch_warnings():
+ if py3kwarning:
+ warnings.filterwarnings("ignore", ".*mimetools has been removed",
+ DeprecationWarning)
+ import mimetools
+
+try:
+ from cStringIO import StringIO
+except ImportError:
+ from StringIO import StringIO
+
+__all__ = ["HTTP", "HTTPResponse", "HTTPConnection",
+ "HTTPException", "NotConnected", "UnknownProtocol",
+ "UnknownTransferEncoding", "UnimplementedFileMode",
+ "IncompleteRead", "InvalidURL", "ImproperConnectionState",
+ "CannotSendRequest", "CannotSendHeader", "ResponseNotReady",
+ "BadStatusLine", "error", "responses"]
+
+HTTP_PORT = 80
+HTTPS_PORT = 443
+
+_UNKNOWN = 'UNKNOWN'
+
+# connection states
+_CS_IDLE = 'Idle'
+_CS_REQ_STARTED = 'Request-started'
+_CS_REQ_SENT = 'Request-sent'
+
+# status codes
+# informational
+CONTINUE = 100
+SWITCHING_PROTOCOLS = 101
+PROCESSING = 102
+
+# successful
+OK = 200
+CREATED = 201
+ACCEPTED = 202
+NON_AUTHORITATIVE_INFORMATION = 203
+NO_CONTENT = 204
+RESET_CONTENT = 205
+PARTIAL_CONTENT = 206
+MULTI_STATUS = 207
+IM_USED = 226
+
+# redirection
+MULTIPLE_CHOICES = 300
+MOVED_PERMANENTLY = 301
+FOUND = 302
+SEE_OTHER = 303
+NOT_MODIFIED = 304
+USE_PROXY = 305
+TEMPORARY_REDIRECT = 307
+
+# client error
+BAD_REQUEST = 400
+UNAUTHORIZED = 401
+PAYMENT_REQUIRED = 402
+FORBIDDEN = 403
+NOT_FOUND = 404
+METHOD_NOT_ALLOWED = 405
+NOT_ACCEPTABLE = 406
+PROXY_AUTHENTICATION_REQUIRED = 407
+REQUEST_TIMEOUT = 408
+CONFLICT = 409
+GONE = 410
+LENGTH_REQUIRED = 411
+PRECONDITION_FAILED = 412
+REQUEST_ENTITY_TOO_LARGE = 413
+REQUEST_URI_TOO_LONG = 414
+UNSUPPORTED_MEDIA_TYPE = 415
+REQUESTED_RANGE_NOT_SATISFIABLE = 416
+EXPECTATION_FAILED = 417
+UNPROCESSABLE_ENTITY = 422
+LOCKED = 423
+FAILED_DEPENDENCY = 424
+UPGRADE_REQUIRED = 426
+
+# server error
+INTERNAL_SERVER_ERROR = 500
+NOT_IMPLEMENTED = 501
+BAD_GATEWAY = 502
+SERVICE_UNAVAILABLE = 503
+GATEWAY_TIMEOUT = 504
+HTTP_VERSION_NOT_SUPPORTED = 505
+INSUFFICIENT_STORAGE = 507
+NOT_EXTENDED = 510
+
+# Mapping status codes to official W3C names
+responses = {
+ 100: 'Continue',
+ 101: 'Switching Protocols',
+
+ 200: 'OK',
+ 201: 'Created',
+ 202: 'Accepted',
+ 203: 'Non-Authoritative Information',
+ 204: 'No Content',
+ 205: 'Reset Content',
+ 206: 'Partial Content',
+
+ 300: 'Multiple Choices',
+ 301: 'Moved Permanently',
+ 302: 'Found',
+ 303: 'See Other',
+ 304: 'Not Modified',
+ 305: 'Use Proxy',
+ 306: '(Unused)',
+ 307: 'Temporary Redirect',
+
+ 400: 'Bad Request',
+ 401: 'Unauthorized',
+ 402: 'Payment Required',
+ 403: 'Forbidden',
+ 404: 'Not Found',
+ 405: 'Method Not Allowed',
+ 406: 'Not Acceptable',
+ 407: 'Proxy Authentication Required',
+ 408: 'Request Timeout',
+ 409: 'Conflict',
+ 410: 'Gone',
+ 411: 'Length Required',
+ 412: 'Precondition Failed',
+ 413: 'Request Entity Too Large',
+ 414: 'Request-URI Too Long',
+ 415: 'Unsupported Media Type',
+ 416: 'Requested Range Not Satisfiable',
+ 417: 'Expectation Failed',
+
+ 500: 'Internal Server Error',
+ 501: 'Not Implemented',
+ 502: 'Bad Gateway',
+ 503: 'Service Unavailable',
+ 504: 'Gateway Timeout',
+ 505: 'HTTP Version Not Supported',
+}
+
+# maximal amount of data to read at one time in _safe_read
+MAXAMOUNT = 1048576
+
+# maximal line length when calling readline().
+_MAXLINE = 65536
+
+# maximum amount of headers accepted
+_MAXHEADERS = 100
+
+# Header name/value ABNF (http://tools.ietf.org/html/rfc7230#section-3.2)
+#
+# VCHAR = %x21-7E
+# obs-text = %x80-FF
+# header-field = field-name ":" OWS field-value OWS
+# field-name = token
+# field-value = *( field-content / obs-fold )
+# field-content = field-vchar [ 1*( SP / HTAB ) field-vchar ]
+# field-vchar = VCHAR / obs-text
+#
+# obs-fold = CRLF 1*( SP / HTAB )
+# ; obsolete line folding
+# ; see Section 3.2.4
+
+# token = 1*tchar
+#
+# tchar = "!" / "#" / "$" / "%" / "&" / "'" / "*"
+# / "+" / "-" / "." / "^" / "_" / "`" / "|" / "~"
+# / DIGIT / ALPHA
+# ; any VCHAR, except delimiters
+#
+# VCHAR defined in http://tools.ietf.org/html/rfc5234#appendix-B.1
+
+# the patterns for both name and value are more lenient than RFC
+# definitions to allow for backwards compatibility
+_is_legal_header_name = re.compile(r'\A[^:\s][^:\r\n]*\Z').match
+_is_illegal_header_value = re.compile(r'\n(?![ \t])|\r(?![ \t\n])').search
+
+# We always set the Content-Length header for these methods because some
+# servers will otherwise respond with a 411
+_METHODS_EXPECTING_BODY = {'PATCH', 'POST', 'PUT'}
+
+
+class HTTPMessage(mimetools.Message):
+
+ def addheader(self, key, value):
+ """Add header for field key handling repeats."""
+ prev = self.dict.get(key)
+ if prev is None:
+ self.dict[key] = value
+ else:
+ combined = ", ".join((prev, value))
+ self.dict[key] = combined
+
+ def addcontinue(self, key, more):
+ """Add more field data from a continuation line."""
+ prev = self.dict[key]
+ self.dict[key] = prev + "\n " + more
+
+ def readheaders(self):
+ """Read header lines.
+
+ Read header lines up to the entirely blank line that terminates them.
+ The (normally blank) line that ends the headers is skipped, but not
+ included in the returned list. If an invalid line is found in the
+ header section, it is skipped, and further lines are processed.
+
+ The variable self.status is set to the empty string if all went well,
+ otherwise it is an error message. The variable self.headers is a
+ completely uninterpreted list of lines contained in the header (so
+ printing them will reproduce the header exactly as it appears in the
+ file).
+
+ If multiple header fields with the same name occur, they are combined
+ according to the rules in RFC 2616 sec 4.2:
+
+ Appending each subsequent field-value to the first, each separated
+ by a comma. The order in which header fields with the same field-name
+ are received is significant to the interpretation of the combined
+ field value.
+ """
+ # XXX The implementation overrides the readheaders() method of
+ # rfc822.Message. The base class design isn't amenable to
+ # customized behavior here so the method here is a copy of the
+ # base class code with a few small changes.
+
+ self.dict = {}
+ self.unixfrom = ''
+ self.headers = hlist = []
+ self.status = ''
+ headerseen = ""
+ firstline = 1
+ tell = None
+ if not hasattr(self.fp, 'unread') and self.seekable:
+ tell = self.fp.tell
+ while True:
+ if len(hlist) > _MAXHEADERS:
+ raise HTTPException("got more than %d headers" % _MAXHEADERS)
+ if tell:
+ try:
+ tell()
+ except IOError:
+ tell = None
+ self.seekable = 0
+ line = self.fp.readline(_MAXLINE + 1)
+ if len(line) > _MAXLINE:
+ raise LineTooLong("header line")
+ if not line:
+ self.status = 'EOF in headers'
+ break
+ # Skip unix From name time lines
+ if firstline and line.startswith('From '):
+ self.unixfrom = self.unixfrom + line
+ continue
+ firstline = 0
+ if headerseen and line[0] in ' \t':
+ # XXX Not sure if continuation lines are handled properly
+ # for http and/or for repeating headers
+ # It's a continuation line.
+ hlist.append(line)
+ self.addcontinue(headerseen, line.strip())
+ continue
+ elif self.iscomment(line):
+ # It's a comment. Ignore it.
+ continue
+ elif self.islast(line):
+ # Note! No pushback here! The delimiter line gets eaten.
+ break
+ headerseen = self.isheader(line)
+ if headerseen:
+ # It's a legal header line, save it.
+ hlist.append(line)
+ self.addheader(headerseen, line[len(headerseen)+1:].strip())
+ elif headerseen is not None:
+ # An empty header name. These aren't allowed in HTTP, but it's
+ # probably a benign mistake. Don't add the header, just keep
+ # going.
+ pass
+ else:
+ # It's not a header line; skip it and try the next line.
+ self.status = 'Non-header line where header expected'
+
+class HTTPResponse:
+
+ # strict: If true, raise BadStatusLine if the status line can't be
+ # parsed as a valid HTTP/1.0 or 1.1 status line. By default it is
+ # false because it prevents clients from talking to HTTP/0.9
+ # servers. Note that a response with a sufficiently corrupted
+ # status line will look like an HTTP/0.9 response.
+
+ # See RFC 2616 sec 19.6 and RFC 1945 sec 6 for details.
+
+ def __init__(self, sock, debuglevel=0, strict=0, method=None, buffering=False):
+ if buffering:
+ # The caller won't be using any sock.recv() calls, so buffering
+ # is fine and recommended for performance.
+ self.fp = sock.makefile('rb')
+ else:
+ # The buffer size is specified as zero, because the headers of
+ # the response are read with readline(). If the reads were
+ # buffered the readline() calls could consume some of the
+ # response, which make be read via a recv() on the underlying
+ # socket.
+ self.fp = sock.makefile('rb', 0)
+ self.debuglevel = debuglevel
+ self.strict = strict
+ self._method = method
+
+ self.msg = None
+
+ # from the Status-Line of the response
+ self.version = _UNKNOWN # HTTP-Version
+ self.status = _UNKNOWN # Status-Code
+ self.reason = _UNKNOWN # Reason-Phrase
+
+ self.chunked = _UNKNOWN # is "chunked" being used?
+ self.chunk_left = _UNKNOWN # bytes left to read in current chunk
+ self.length = _UNKNOWN # number of bytes left in response
+ self.will_close = _UNKNOWN # conn will close at end of response
+
+ def _read_status(self):
+ # Initialize with Simple-Response defaults
+ line = self.fp.readline(_MAXLINE + 1)
+ if len(line) > _MAXLINE:
+ raise LineTooLong("header line")
+ if self.debuglevel > 0:
+ print "reply:", repr(line)
+ if not line:
+ # Presumably, the server closed the connection before
+ # sending a valid response.
+ raise BadStatusLine(line)
+ try:
+ [version, status, reason] = line.split(None, 2)
+ except ValueError:
+ try:
+ [version, status] = line.split(None, 1)
+ reason = ""
+ except ValueError:
+ # empty version will cause next test to fail and status
+ # will be treated as 0.9 response.
+ version = ""
+ if not version.startswith('HTTP/'):
+ if self.strict:
+ self.close()
+ raise BadStatusLine(line)
+ else:
+ # assume it's a Simple-Response from an 0.9 server
+ self.fp = LineAndFileWrapper(line, self.fp)
+ return "HTTP/0.9", 200, ""
+
+ # The status code is a three-digit number
+ try:
+ status = int(status)
+ if status < 100 or status > 999:
+ raise BadStatusLine(line)
+ except ValueError:
+ raise BadStatusLine(line)
+ return version, status, reason
+
+ def begin(self):
+ if self.msg is not None:
+ # we've already started reading the response
+ return
+
+ # read until we get a non-100 response
+ while True:
+ version, status, reason = self._read_status()
+ if status != CONTINUE:
+ break
+ # skip the header from the 100 response
+ while True:
+ skip = self.fp.readline(_MAXLINE + 1)
+ if len(skip) > _MAXLINE:
+ raise LineTooLong("header line")
+ skip = skip.strip()
+ if not skip:
+ break
+ if self.debuglevel > 0:
+ print "header:", skip
+
+ self.status = status
+ self.reason = reason.strip()
+ if version == 'HTTP/1.0':
+ self.version = 10
+ elif version.startswith('HTTP/1.'):
+ self.version = 11 # use HTTP/1.1 code for HTTP/1.x where x>=1
+ elif version == 'HTTP/0.9':
+ self.version = 9
+ else:
+ raise UnknownProtocol(version)
+
+ if self.version == 9:
+ self.length = None
+ self.chunked = 0
+ self.will_close = 1
+ self.msg = HTTPMessage(StringIO())
+ return
+
+ self.msg = HTTPMessage(self.fp, 0)
+ if self.debuglevel > 0:
+ for hdr in self.msg.headers:
+ print "header:", hdr,
+
+ # don't let the msg keep an fp
+ self.msg.fp = None
+
+ # are we using the chunked-style of transfer encoding?
+ tr_enc = self.msg.getheader('transfer-encoding')
+ if tr_enc and tr_enc.lower() == "chunked":
+ self.chunked = 1
+ self.chunk_left = None
+ else:
+ self.chunked = 0
+
+ # will the connection close at the end of the response?
+ self.will_close = self._check_close()
+
+ # do we have a Content-Length?
+ # NOTE: RFC 2616, S4.4, #3 says we ignore this if tr_enc is "chunked"
+ length = self.msg.getheader('content-length')
+ if length and not self.chunked:
+ try:
+ self.length = int(length)
+ except ValueError:
+ self.length = None
+ else:
+ if self.length < 0: # ignore nonsensical negative lengths
+ self.length = None
+ else:
+ self.length = None
+
+ # does the body have a fixed length? (of zero)
+ if (status == NO_CONTENT or status == NOT_MODIFIED or
+ 100 <= status < 200 or # 1xx codes
+ self._method == 'HEAD'):
+ self.length = 0
+
+ # if the connection remains open, and we aren't using chunked, and
+ # a content-length was not provided, then assume that the connection
+ # WILL close.
+ if not self.will_close and \
+ not self.chunked and \
+ self.length is None:
+ self.will_close = 1
+
+ def _check_close(self):
+ conn = self.msg.getheader('connection')
+ if self.version == 11:
+ # An HTTP/1.1 proxy is assumed to stay open unless
+ # explicitly closed.
+ conn = self.msg.getheader('connection')
+ if conn and "close" in conn.lower():
+ return True
+ return False
+
+ # Some HTTP/1.0 implementations have support for persistent
+ # connections, using rules different than HTTP/1.1.
+
+ # For older HTTP, Keep-Alive indicates persistent connection.
+ if self.msg.getheader('keep-alive'):
+ return False
+
+ # At least Akamai returns a "Connection: Keep-Alive" header,
+ # which was supposed to be sent by the client.
+ if conn and "keep-alive" in conn.lower():
+ return False
+
+ # Proxy-Connection is a netscape hack.
+ pconn = self.msg.getheader('proxy-connection')
+ if pconn and "keep-alive" in pconn.lower():
+ return False
+
+ # otherwise, assume it will close
+ return True
+
+ def close(self):
+ fp = self.fp
+ if fp:
+ self.fp = None
+ fp.close()
+
+ def isclosed(self):
+ # NOTE: it is possible that we will not ever call self.close(). This
+ # case occurs when will_close is TRUE, length is None, and we
+ # read up to the last byte, but NOT past it.
+ #
+ # IMPLIES: if will_close is FALSE, then self.close() will ALWAYS be
+ # called, meaning self.isclosed() is meaningful.
+ return self.fp is None
+
+ # XXX It would be nice to have readline and __iter__ for this, too.
+
+ def read(self, amt=None):
+ if self.fp is None:
+ return ''
+
+ if self._method == 'HEAD':
+ self.close()
+ return ''
+
+ if self.chunked:
+ return self._read_chunked(amt)
+
+ if amt is None:
+ # unbounded read
+ if self.length is None:
+ s = self.fp.read()
+ else:
+ try:
+ s = self._safe_read(self.length)
+ except IncompleteRead:
+ self.close()
+ raise
+ self.length = 0
+ self.close() # we read everything
+ return s
+
+ if self.length is not None:
+ if amt > self.length:
+ # clip the read to the "end of response"
+ amt = self.length
+
+ # we do not use _safe_read() here because this may be a .will_close
+ # connection, and the user is reading more bytes than will be provided
+ # (for example, reading in 1k chunks)
+ s = self.fp.read(amt)
+ if not s and amt:
+ # Ideally, we would raise IncompleteRead if the content-length
+ # wasn't satisfied, but it might break compatibility.
+ self.close()
+ if self.length is not None:
+ self.length -= len(s)
+ if not self.length:
+ self.close()
+
+ return s
+
+ def _read_chunked(self, amt):
+ assert self.chunked != _UNKNOWN
+ chunk_left = self.chunk_left
+ value = []
+ while True:
+ if chunk_left is None:
+ line = self.fp.readline(_MAXLINE + 1)
+ if len(line) > _MAXLINE:
+ raise LineTooLong("chunk size")
+ i = line.find(';')
+ if i >= 0:
+ line = line[:i] # strip chunk-extensions
+ try:
+ chunk_left = int(line, 16)
+ except ValueError:
+ # close the connection as protocol synchronisation is
+ # probably lost
+ self.close()
+ raise IncompleteRead(''.join(value))
+ if chunk_left == 0:
+ break
+ if amt is None:
+ value.append(self._safe_read(chunk_left))
+ elif amt < chunk_left:
+ value.append(self._safe_read(amt))
+ self.chunk_left = chunk_left - amt
+ return ''.join(value)
+ elif amt == chunk_left:
+ value.append(self._safe_read(amt))
+ self._safe_read(2) # toss the CRLF at the end of the chunk
+ self.chunk_left = None
+ return ''.join(value)
+ else:
+ value.append(self._safe_read(chunk_left))
+ amt -= chunk_left
+
+ # we read the whole chunk, get another
+ self._safe_read(2) # toss the CRLF at the end of the chunk
+ chunk_left = None
+
+ # read and discard trailer up to the CRLF terminator
+ ### note: we shouldn't have any trailers!
+ while True:
+ line = self.fp.readline(_MAXLINE + 1)
+ if len(line) > _MAXLINE:
+ raise LineTooLong("trailer line")
+ if not line:
+ # a vanishingly small number of sites EOF without
+ # sending the trailer
+ break
+ if line == '\r\n':
+ break
+
+ # we read everything; close the "file"
+ self.close()
+
+ return ''.join(value)
+
+ def _safe_read(self, amt):
+ """Read the number of bytes requested, compensating for partial reads.
+
+ Normally, we have a blocking socket, but a read() can be interrupted
+ by a signal (resulting in a partial read).
+
+ Note that we cannot distinguish between EOF and an interrupt when zero
+ bytes have been read. IncompleteRead() will be raised in this
+ situation.
+
+ This function should be used when <amt> bytes "should" be present for
+ reading. If the bytes are truly not available (due to EOF), then the
+ IncompleteRead exception can be used to detect the problem.
+ """
+ # NOTE(gps): As of svn r74426 socket._fileobject.read(x) will never
+ # return less than x bytes unless EOF is encountered. It now handles
+ # signal interruptions (socket.error EINTR) internally. This code
+ # never caught that exception anyways. It seems largely pointless.
+ # self.fp.read(amt) will work fine.
+ s = []
+ while amt > 0:
+ chunk = self.fp.read(min(amt, MAXAMOUNT))
+ if not chunk:
+ raise IncompleteRead(''.join(s), amt)
+ s.append(chunk)
+ amt -= len(chunk)
+ return ''.join(s)
+
+ def fileno(self):
+ return self.fp.fileno()
+
+ def getheader(self, name, default=None):
+ if self.msg is None:
+ raise ResponseNotReady()
+ return self.msg.getheader(name, default)
+
+ def getheaders(self):
+ """Return list of (header, value) tuples."""
+ if self.msg is None:
+ raise ResponseNotReady()
+ return self.msg.items()
+
+
+class HTTPConnection:
+
+ _http_vsn = 11
+ _http_vsn_str = 'HTTP/1.1'
+
+ response_class = HTTPResponse
+ default_port = HTTP_PORT
+ auto_open = 1
+ debuglevel = 0
+ strict = 0
+
+ def __init__(self, host, port=None, strict=None,
+ timeout=socket._GLOBAL_DEFAULT_TIMEOUT, source_address=None):
+ self.timeout = timeout
+ self.source_address = source_address
+ self.sock = None
+ self._buffer = []
+ self.__response = None
+ self.__state = _CS_IDLE
+ self._method = None
+ self._tunnel_host = None
+ self._tunnel_port = None
+ self._tunnel_headers = {}
+ if strict is not None:
+ self.strict = strict
+
+ (self.host, self.port) = self._get_hostport(host, port)
+
+ # This is stored as an instance variable to allow unittests
+ # to replace with a suitable mock
+ self._create_connection = socket.create_connection
+
+ def set_tunnel(self, host, port=None, headers=None):
+ """ Set up host and port for HTTP CONNECT tunnelling.
+
+ In a connection that uses HTTP Connect tunneling, the host passed to the
+ constructor is used as proxy server that relays all communication to the
+ endpoint passed to set_tunnel. This is done by sending a HTTP CONNECT
+ request to the proxy server when the connection is established.
+
+ This method must be called before the HTTP connection has been
+ established.
+
+ The headers argument should be a mapping of extra HTTP headers
+ to send with the CONNECT request.
+ """
+ # Verify if this is required.
+ if self.sock:
+ raise RuntimeError("Can't setup tunnel for established connection.")
+
+ self._tunnel_host, self._tunnel_port = self._get_hostport(host, port)
+ if headers:
+ self._tunnel_headers = headers
+ else:
+ self._tunnel_headers.clear()
+
+ def _get_hostport(self, host, port):
+ if port is None:
+ i = host.rfind(':')
+ j = host.rfind(']') # ipv6 addresses have [...]
+ if i > j:
+ try:
+ port = int(host[i+1:])
+ except ValueError:
+ if host[i+1:] == "": # http://foo.com:/ == http://foo.com/
+ port = self.default_port
+ else:
+ raise InvalidURL("nonnumeric port: '%s'" % host[i+1:])
+ host = host[:i]
+ else:
+ port = self.default_port
+ if host and host[0] == '[' and host[-1] == ']':
+ host = host[1:-1]
+ return (host, port)
+
+ def set_debuglevel(self, level):
+ self.debuglevel = level
+
+ def _tunnel(self):
+ self.send("CONNECT %s:%d HTTP/1.0\r\n" % (self._tunnel_host,
+ self._tunnel_port))
+ for header, value in self._tunnel_headers.iteritems():
+ self.send("%s: %s\r\n" % (header, value))
+ self.send("\r\n")
+ response = self.response_class(self.sock, strict = self.strict,
+ method = self._method)
+ (version, code, message) = response._read_status()
+
+ if version == "HTTP/0.9":
+ # HTTP/0.9 doesn't support the CONNECT verb, so if httplib has
+ # concluded HTTP/0.9 is being used something has gone wrong.
+ self.close()
+ raise socket.error("Invalid response from tunnel request")
+ if code != 200:
+ self.close()
+ raise socket.error("Tunnel connection failed: %d %s" % (code,
+ message.strip()))
+ while True:
+ line = response.fp.readline(_MAXLINE + 1)
+ if len(line) > _MAXLINE:
+ raise LineTooLong("header line")
+ if not line:
+ # for sites which EOF without sending trailer
+ break
+ if line == '\r\n':
+ break
+
+
+ def connect(self):
+ """Connect to the host and port specified in __init__."""
+ self.sock = self._create_connection((self.host,self.port),
+ self.timeout, self.source_address)
+
+ if self._tunnel_host:
+ self._tunnel()
+
+ def close(self):
+ """Close the connection to the HTTP server."""
+ self.__state = _CS_IDLE
+ try:
+ sock = self.sock
+ if sock:
+ self.sock = None
+ sock.close() # close it manually... there may be other refs
+ finally:
+ response = self.__response
+ if response:
+ self.__response = None
+ response.close()
+
+ def send(self, data):
+ """Send `data' to the server."""
+ if self.sock is None:
+ if self.auto_open:
+ self.connect()
+ else:
+ raise NotConnected()
+
+ if self.debuglevel > 0:
+ print "send:", repr(data)
+ blocksize = 8192
+ if hasattr(data,'read') and not isinstance(data, array):
+ if self.debuglevel > 0: print "sendIng a read()able"
+ datablock = data.read(blocksize)
+ while datablock:
+ self.sock.sendall(datablock)
+ datablock = data.read(blocksize)
+ else:
+ self.sock.sendall(data)
+
+ def _output(self, s):
+ """Add a line of output to the current request buffer.
+
+ Assumes that the line does *not* end with \\r\\n.
+ """
+ self._buffer.append(s)
+
+ def _send_output(self, message_body=None):
+ """Send the currently buffered request and clear the buffer.
+
+ Appends an extra \\r\\n to the buffer.
+ A message_body may be specified, to be appended to the request.
+ """
+ self._buffer.extend(("", ""))
+ msg = "\r\n".join(self._buffer)
+ del self._buffer[:]
+ # If msg and message_body are sent in a single send() call,
+ # it will avoid performance problems caused by the interaction
+ # between delayed ack and the Nagle algorithm.
+ if isinstance(message_body, str):
+ msg += message_body
+ message_body = None
+ self.send(msg)
+ if message_body is not None:
+ #message_body was not a string (i.e. it is a file) and
+ #we must run the risk of Nagle
+ self.send(message_body)
+
+ def putrequest(self, method, url, skip_host=0, skip_accept_encoding=0):
+ """Send a request to the server.
+
+ `method' specifies an HTTP request method, e.g. 'GET'.
+ `url' specifies the object being requested, e.g. '/index.html'.
+ `skip_host' if True does not add automatically a 'Host:' header
+ `skip_accept_encoding' if True does not add automatically an
+ 'Accept-Encoding:' header
+ """
+
+ # if a prior response has been completed, then forget about it.
+ if self.__response and self.__response.isclosed():
+ self.__response = None
+
+
+ # in certain cases, we cannot issue another request on this connection.
+ # this occurs when:
+ # 1) we are in the process of sending a request. (_CS_REQ_STARTED)
+ # 2) a response to a previous request has signalled that it is going
+ # to close the connection upon completion.
+ # 3) the headers for the previous response have not been read, thus
+ # we cannot determine whether point (2) is true. (_CS_REQ_SENT)
+ #
+ # if there is no prior response, then we can request at will.
+ #
+ # if point (2) is true, then we will have passed the socket to the
+ # response (effectively meaning, "there is no prior response"), and
+ # will open a new one when a new request is made.
+ #
+ # Note: if a prior response exists, then we *can* start a new request.
+ # We are not allowed to begin fetching the response to this new
+ # request, however, until that prior response is complete.
+ #
+ if self.__state == _CS_IDLE:
+ self.__state = _CS_REQ_STARTED
+ else:
+ raise CannotSendRequest()
+
+ # Save the method we use, we need it later in the response phase
+ self._method = method
+ if not url:
+ url = '/'
+ hdr = '%s %s %s' % (method, url, self._http_vsn_str)
+
+ self._output(hdr)
+
+ if self._http_vsn == 11:
+ # Issue some standard headers for better HTTP/1.1 compliance
+
+ if not skip_host:
+ # this header is issued *only* for HTTP/1.1
+ # connections. more specifically, this means it is
+ # only issued when the client uses the new
+ # HTTPConnection() class. backwards-compat clients
+ # will be using HTTP/1.0 and those clients may be
+ # issuing this header themselves. we should NOT issue
+ # it twice; some web servers (such as Apache) barf
+ # when they see two Host: headers
+
+ # If we need a non-standard port,include it in the
+ # header. If the request is going through a proxy,
+ # but the host of the actual URL, not the host of the
+ # proxy.
+
+ netloc = ''
+ if url.startswith('http'):
+ nil, netloc, nil, nil, nil = urlsplit(url)
+
+ if netloc:
+ try:
+ netloc_enc = netloc.encode("ascii")
+ except UnicodeEncodeError:
+ netloc_enc = netloc.encode("idna")
+ self.putheader('Host', netloc_enc)
+ else:
+ if self._tunnel_host:
+ host = self._tunnel_host
+ port = self._tunnel_port
+ else:
+ host = self.host
+ port = self.port
+
+ try:
+ host_enc = host.encode("ascii")
+ except UnicodeEncodeError:
+ host_enc = host.encode("idna")
+ # Wrap the IPv6 Host Header with [] (RFC 2732)
+ if host_enc.find(':') >= 0:
+ host_enc = "[" + host_enc + "]"
+ if port == self.default_port:
+ self.putheader('Host', host_enc)
+ else:
+ self.putheader('Host', "%s:%s" % (host_enc, port))
+
+ # note: we are assuming that clients will not attempt to set these
+ # headers since *this* library must deal with the
+ # consequences. this also means that when the supporting
+ # libraries are updated to recognize other forms, then this
+ # code should be changed (removed or updated).
+
+ # we only want a Content-Encoding of "identity" since we don't
+ # support encodings such as x-gzip or x-deflate.
+ if not skip_accept_encoding:
+ self.putheader('Accept-Encoding', 'identity')
+
+ # we can accept "chunked" Transfer-Encodings, but no others
+ # NOTE: no TE header implies *only* "chunked"
+ #self.putheader('TE', 'chunked')
+
+ # if TE is supplied in the header, then it must appear in a
+ # Connection header.
+ #self.putheader('Connection', 'TE')
+
+ else:
+ # For HTTP/1.0, the server will assume "not chunked"
+ pass
+
+ def putheader(self, header, *values):
+ """Send a request header line to the server.
+
+ For example: h.putheader('Accept', 'text/html')
+ """
+ if self.__state != _CS_REQ_STARTED:
+ raise CannotSendHeader()
+
+ header = '%s' % header
+ if not _is_legal_header_name(header):
+ raise ValueError('Invalid header name %r' % (header,))
+
+ values = [str(v) for v in values]
+ for one_value in values:
+ if _is_illegal_header_value(one_value):
+ raise ValueError('Invalid header value %r' % (one_value,))
+
+ hdr = '%s: %s' % (header, '\r\n\t'.join(values))
+ self._output(hdr)
+
+ def endheaders(self, message_body=None):
+ """Indicate that the last header line has been sent to the server.
+
+ This method sends the request to the server. The optional
+ message_body argument can be used to pass a message body
+ associated with the request. The message body will be sent in
+ the same packet as the message headers if it is string, otherwise it is
+ sent as a separate packet.
+ """
+ if self.__state == _CS_REQ_STARTED:
+ self.__state = _CS_REQ_SENT
+ else:
+ raise CannotSendHeader()
+ self._send_output(message_body)
+
+ def request(self, method, url, body=None, headers={}):
+ """Send a complete request to the server."""
+ self._send_request(method, url, body, headers)
+
+ def _set_content_length(self, body, method):
+ # Set the content-length based on the body. If the body is "empty", we
+ # set Content-Length: 0 for methods that expect a body (RFC 7230,
+ # Section 3.3.2). If the body is set for other methods, we set the
+ # header provided we can figure out what the length is.
+ thelen = None
+ if body is None and method.upper() in _METHODS_EXPECTING_BODY:
+ thelen = '0'
+ elif body is not None:
+ try:
+ thelen = str(len(body))
+ except (TypeError, AttributeError):
+ # If this is a file-like object, try to
+ # fstat its file descriptor
+ try:
+ thelen = str(os.fstat(body.fileno()).st_size)
+ except (AttributeError, OSError):
+ # Don't send a length if this failed
+ if self.debuglevel > 0: print "Cannot stat!!"
+
+ if thelen is not None:
+ self.putheader('Content-Length', thelen)
+
+ def _send_request(self, method, url, body, headers):
+ # Honor explicitly requested Host: and Accept-Encoding: headers.
+ header_names = dict.fromkeys([k.lower() for k in headers])
+ skips = {}
+ if 'host' in header_names:
+ skips['skip_host'] = 1
+ if 'accept-encoding' in header_names:
+ skips['skip_accept_encoding'] = 1
+
+ self.putrequest(method, url, **skips)
+
+ if 'content-length' not in header_names:
+ self._set_content_length(body, method)
+ for hdr, value in headers.iteritems():
+ self.putheader(hdr, value)
+ self.endheaders(body)
+
+ def getresponse(self, buffering=False):
+ "Get the response from the server."
+
+ # if a prior response has been completed, then forget about it.
+ if self.__response and self.__response.isclosed():
+ self.__response = None
+
+ #
+ # if a prior response exists, then it must be completed (otherwise, we
+ # cannot read this response's header to determine the connection-close
+ # behavior)
+ #
+ # note: if a prior response existed, but was connection-close, then the
+ # socket and response were made independent of this HTTPConnection
+ # object since a new request requires that we open a whole new
+ # connection
+ #
+ # this means the prior response had one of two states:
+ # 1) will_close: this connection was reset and the prior socket and
+ # response operate independently
+ # 2) persistent: the response was retained and we await its
+ # isclosed() status to become true.
+ #
+ if self.__state != _CS_REQ_SENT or self.__response:
+ raise ResponseNotReady()
+
+ args = (self.sock,)
+ kwds = {"strict":self.strict, "method":self._method}
+ if self.debuglevel > 0:
+ args += (self.debuglevel,)
+ if buffering:
+ #only add this keyword if non-default, for compatibility with
+ #other response_classes.
+ kwds["buffering"] = True;
+ response = self.response_class(*args, **kwds)
+
+ try:
+ response.begin()
+ assert response.will_close != _UNKNOWN
+ self.__state = _CS_IDLE
+
+ if response.will_close:
+ # this effectively passes the connection to the response
+ self.close()
+ else:
+ # remember this, so we can tell when it is complete
+ self.__response = response
+
+ return response
+ except:
+ response.close()
+ raise
+
+
+class HTTP:
+ "Compatibility class with httplib.py from 1.5."
+
+ _http_vsn = 10
+ _http_vsn_str = 'HTTP/1.0'
+
+ debuglevel = 0
+
+ _connection_class = HTTPConnection
+
+ def __init__(self, host='', port=None, strict=None):
+ "Provide a default host, since the superclass requires one."
+
+ # some joker passed 0 explicitly, meaning default port
+ if port == 0:
+ port = None
+
+ # Note that we may pass an empty string as the host; this will raise
+ # an error when we attempt to connect. Presumably, the client code
+ # will call connect before then, with a proper host.
+ self._setup(self._connection_class(host, port, strict))
+
+ def _setup(self, conn):
+ self._conn = conn
+
+ # set up delegation to flesh out interface
+ self.send = conn.send
+ self.putrequest = conn.putrequest
+ self.putheader = conn.putheader
+ self.endheaders = conn.endheaders
+ self.set_debuglevel = conn.set_debuglevel
+
+ conn._http_vsn = self._http_vsn
+ conn._http_vsn_str = self._http_vsn_str
+
+ self.file = None
+
+ def connect(self, host=None, port=None):
+ "Accept arguments to set the host/port, since the superclass doesn't."
+
+ if host is not None:
+ (self._conn.host, self._conn.port) = self._conn._get_hostport(host, port)
+ self._conn.connect()
+
+ def getfile(self):
+ "Provide a getfile, since the superclass' does not use this concept."
+ return self.file
+
+ def getreply(self, buffering=False):
+ """Compat definition since superclass does not define it.
+
+ Returns a tuple consisting of:
+ - server status code (e.g. '200' if all goes well)
+ - server "reason" corresponding to status code
+ - any RFC822 headers in the response from the server
+ """
+ try:
+ if not buffering:
+ response = self._conn.getresponse()
+ else:
+ #only add this keyword if non-default for compatibility
+ #with other connection classes
+ response = self._conn.getresponse(buffering)
+ except BadStatusLine, e:
+ ### hmm. if getresponse() ever closes the socket on a bad request,
+ ### then we are going to have problems with self.sock
+
+ ### should we keep this behavior? do people use it?
+ # keep the socket open (as a file), and return it
+ self.file = self._conn.sock.makefile('rb', 0)
+
+ # close our socket -- we want to restart after any protocol error
+ self.close()
+
+ self.headers = None
+ return -1, e.line, None
+
+ self.headers = response.msg
+ self.file = response.fp
+ return response.status, response.reason, response.msg
+
+ def close(self):
+ self._conn.close()
+
+ # note that self.file == response.fp, which gets closed by the
+ # superclass. just clear the object ref here.
+ ### hmm. messy. if status==-1, then self.file is owned by us.
+ ### well... we aren't explicitly closing, but losing this ref will
+ ### do it
+ self.file = None
+
+try:
+ import ssl
+except ImportError:
+ pass
+else:
+ class HTTPSConnection(HTTPConnection):
+ "This class allows communication via SSL."
+
+ default_port = HTTPS_PORT
+
+ def __init__(self, host, port=None, key_file=None, cert_file=None,
+ strict=None, timeout=socket._GLOBAL_DEFAULT_TIMEOUT,
+ source_address=None, context=None):
+ HTTPConnection.__init__(self, host, port, strict, timeout,
+ source_address)
+ self.key_file = key_file
+ self.cert_file = cert_file
+ if context is None:
+ context = ssl._create_default_https_context()
+ if key_file or cert_file:
+ context.load_cert_chain(cert_file, key_file)
+ self._context = context
+
+ def connect(self):
+ "Connect to a host on a given (SSL) port."
+
+ HTTPConnection.connect(self)
+
+ if self._tunnel_host:
+ server_hostname = self._tunnel_host
+ else:
+ server_hostname = self.host
+
+ self.sock = self._context.wrap_socket(self.sock,
+ server_hostname=server_hostname)
+
+ __all__.append("HTTPSConnection")
+
+ class HTTPS(HTTP):
+ """Compatibility with 1.5 httplib interface
+
+ Python 1.5.2 did not have an HTTPS class, but it defined an
+ interface for sending http requests that is also useful for
+ https.
+ """
+
+ _connection_class = HTTPSConnection
+
+ def __init__(self, host='', port=None, key_file=None, cert_file=None,
+ strict=None, context=None):
+ # provide a default host, pass the X509 cert info
+
+ # urf. compensate for bad input.
+ if port == 0:
+ port = None
+ self._setup(self._connection_class(host, port, key_file,
+ cert_file, strict,
+ context=context))
+
+ # we never actually use these for anything, but we keep them
+ # here for compatibility with post-1.5.2 CVS.
+ self.key_file = key_file
+ self.cert_file = cert_file
+
+
+ def FakeSocket (sock, sslobj):
+ warnings.warn("FakeSocket is deprecated, and won't be in 3.x. " +
+ "Use the result of ssl.wrap_socket() directly instead.",
+ DeprecationWarning, stacklevel=2)
+ return sslobj
+
+
+class HTTPException(Exception):
+ # Subclasses that define an __init__ must call Exception.__init__
+ # or define self.args. Otherwise, str() will fail.
+ pass
+
+class NotConnected(HTTPException):
+ pass
+
+class InvalidURL(HTTPException):
+ pass
+
+class UnknownProtocol(HTTPException):
+ def __init__(self, version):
+ self.args = version,
+ self.version = version
+
+class UnknownTransferEncoding(HTTPException):
+ pass
+
+class UnimplementedFileMode(HTTPException):
+ pass
+
+class IncompleteRead(HTTPException):
+ def __init__(self, partial, expected=None):
+ self.args = partial,
+ self.partial = partial
+ self.expected = expected
+ def __repr__(self):
+ if self.expected is not None:
+ e = ', %i more expected' % self.expected
+ else:
+ e = ''
+ return 'IncompleteRead(%i bytes read%s)' % (len(self.partial), e)
+ def __str__(self):
+ return repr(self)
+
+class ImproperConnectionState(HTTPException):
+ pass
+
+class CannotSendRequest(ImproperConnectionState):
+ pass
+
+class CannotSendHeader(ImproperConnectionState):
+ pass
+
+class ResponseNotReady(ImproperConnectionState):
+ pass
+
+class BadStatusLine(HTTPException):
+ def __init__(self, line):
+ if not line:
+ line = repr(line)
+ self.args = line,
+ self.line = line
+
+class LineTooLong(HTTPException):
+ def __init__(self, line_type):
+ HTTPException.__init__(self, "got more than %d bytes when reading %s"
+ % (_MAXLINE, line_type))
+
+# for backwards compatibility
+error = HTTPException
+
+class LineAndFileWrapper:
+ """A limited file-like object for HTTP/0.9 responses."""
+
+ # The status-line parsing code calls readline(), which normally
+ # get the HTTP status line. For a 0.9 response, however, this is
+ # actually the first line of the body! Clients need to get a
+ # readable file object that contains that line.
+
+ def __init__(self, line, file):
+ self._line = line
+ self._file = file
+ self._line_consumed = 0
+ self._line_offset = 0
+ self._line_left = len(line)
+
+ def __getattr__(self, attr):
+ return getattr(self._file, attr)
+
+ def _done(self):
+ # called when the last byte is read from the line. After the
+ # call, all read methods are delegated to the underlying file
+ # object.
+ self._line_consumed = 1
+ self.read = self._file.read
+ self.readline = self._file.readline
+ self.readlines = self._file.readlines
+
+ def read(self, amt=None):
+ if self._line_consumed:
+ return self._file.read(amt)
+ assert self._line_left
+ if amt is None or amt > self._line_left:
+ s = self._line[self._line_offset:]
+ self._done()
+ if amt is None:
+ return s + self._file.read()
+ else:
+ return s + self._file.read(amt - len(s))
+ else:
+ assert amt <= self._line_left
+ i = self._line_offset
+ j = i + amt
+ s = self._line[i:j]
+ self._line_offset = j
+ self._line_left -= amt
+ if self._line_left == 0:
+ self._done()
+ return s
+
+ def readline(self):
+ if self._line_consumed:
+ return self._file.readline()
+ assert self._line_left
+ s = self._line[self._line_offset:]
+ self._done()
+ return s
+
+ def readlines(self, size=None):
+ if self._line_consumed:
+ return self._file.readlines(size)
+ assert self._line_left
+ L = [self._line[self._line_offset:]]
+ self._done()
+ if size is None:
+ return L + self._file.readlines()
+ else:
+ return L + self._file.readlines(size)