Browse Source

Completely delete Jython in lib/jython

AFAIK, Jython is not used anywhere in out tests, also not in combination
with Ant. So I have decided to delete it altogether. If the build
passes, we should be fine and be able to travel more lightly in the
future.

Signed-off-by: Alexander Kriegisch <Alexander@Kriegisch.name>
tags/V1_9_7_M2
Alexander Kriegisch 3 years ago
parent
commit
a9f15df25a
100 changed files with 0 additions and 19883 deletions
  1. 0
    1
      lib/jython/.cvsignore
  2. 0
    144
      lib/jython/LICENSE.txt
  3. 0
    1
      lib/jython/Lib/.cvsignore
  4. 0
    482
      lib/jython/Lib/BaseHTTPServer.py
  5. 0
    304
      lib/jython/Lib/CGIHTTPServer.py
  6. 0
    480
      lib/jython/Lib/ConfigParser.py
  7. 0
    734
      lib/jython/Lib/Cookie.py
  8. 0
    232
      lib/jython/Lib/LICENSE
  9. 0
    128
      lib/jython/Lib/MimeWriter.py
  10. 0
    132
      lib/jython/Lib/Queue.py
  11. 0
    198
      lib/jython/Lib/SimpleHTTPServer.py
  12. 0
    566
      lib/jython/Lib/SocketServer.py
  13. 0
    208
      lib/jython/Lib/StringIO.py
  14. 0
    42
      lib/jython/Lib/UserDict.py
  15. 0
    85
      lib/jython/Lib/UserList.py
  16. 0
    173
      lib/jython/Lib/UserString.py
  17. 0
    69
      lib/jython/Lib/__future__.py
  18. 0
    86
      lib/jython/Lib/anydbm.py
  19. 0
    54
      lib/jython/Lib/atexit.py
  20. 0
    81
      lib/jython/Lib/base64.py
  21. 0
    565
      lib/jython/Lib/bdb.py
  22. 0
    531
      lib/jython/Lib/binhex.py
  23. 0
    78
      lib/jython/Lib/bisect.py
  24. 0
    209
      lib/jython/Lib/calendar.py
  25. 0
    1000
      lib/jython/Lib/cgi.py
  26. 0
    198
      lib/jython/Lib/cmd.py
  27. 0
    308
      lib/jython/Lib/code.py
  28. 0
    570
      lib/jython/Lib/codecs.py
  29. 0
    123
      lib/jython/Lib/colorsys.py
  30. 0
    84
      lib/jython/Lib/commands.py
  31. 0
    130
      lib/jython/Lib/compileall.py
  32. 0
    330
      lib/jython/Lib/copy.py
  33. 0
    35
      lib/jython/Lib/copy_reg.py
  34. 0
    690
      lib/jython/Lib/dbexts.py
  35. 0
    781
      lib/jython/Lib/difflib.py
  36. 0
    44
      lib/jython/Lib/dircache.py
  37. 0
    1118
      lib/jython/Lib/doctest.py
  38. 0
    332
      lib/jython/Lib/dospath.py
  39. 0
    148
      lib/jython/Lib/dumbdbm.py
  40. 0
    86
      lib/jython/Lib/encodings/__init__.py
  41. 0
    82
      lib/jython/Lib/encodings/aliases.py
  42. 0
    35
      lib/jython/Lib/encodings/ascii.py
  43. 0
    51
      lib/jython/Lib/encodings/charmap.py
  44. 0
    282
      lib/jython/Lib/encodings/cp037.py
  45. 0
    140
      lib/jython/Lib/encodings/cp1006.py
  46. 0
    282
      lib/jython/Lib/encodings/cp1026.py
  47. 0
    125
      lib/jython/Lib/encodings/cp1250.py
  48. 0
    159
      lib/jython/Lib/encodings/cp1251.py
  49. 0
    78
      lib/jython/Lib/encodings/cp1252.py
  50. 0
    153
      lib/jython/Lib/encodings/cp1253.py
  51. 0
    84
      lib/jython/Lib/encodings/cp1254.py
  52. 0
    145
      lib/jython/Lib/encodings/cp1255.py
  53. 0
    131
      lib/jython/Lib/encodings/cp1256.py
  54. 0
    133
      lib/jython/Lib/encodings/cp1257.py
  55. 0
    92
      lib/jython/Lib/encodings/cp1258.py
  56. 0
    282
      lib/jython/Lib/encodings/cp424.py
  57. 0
    174
      lib/jython/Lib/encodings/cp437.py
  58. 0
    282
      lib/jython/Lib/encodings/cp500.py
  59. 0
    174
      lib/jython/Lib/encodings/cp737.py
  60. 0
    174
      lib/jython/Lib/encodings/cp775.py
  61. 0
    174
      lib/jython/Lib/encodings/cp850.py
  62. 0
    174
      lib/jython/Lib/encodings/cp852.py
  63. 0
    174
      lib/jython/Lib/encodings/cp855.py
  64. 0
    174
      lib/jython/Lib/encodings/cp856.py
  65. 0
    173
      lib/jython/Lib/encodings/cp857.py
  66. 0
    174
      lib/jython/Lib/encodings/cp860.py
  67. 0
    174
      lib/jython/Lib/encodings/cp861.py
  68. 0
    174
      lib/jython/Lib/encodings/cp862.py
  69. 0
    174
      lib/jython/Lib/encodings/cp863.py
  70. 0
    172
      lib/jython/Lib/encodings/cp864.py
  71. 0
    174
      lib/jython/Lib/encodings/cp865.py
  72. 0
    174
      lib/jython/Lib/encodings/cp866.py
  73. 0
    174
      lib/jython/Lib/encodings/cp869.py
  74. 0
    173
      lib/jython/Lib/encodings/cp874.py
  75. 0
    283
      lib/jython/Lib/encodings/cp875.py
  76. 0
    46
      lib/jython/Lib/encodings/iso8859_1.py
  77. 0
    92
      lib/jython/Lib/encodings/iso8859_10.py
  78. 0
    102
      lib/jython/Lib/encodings/iso8859_13.py
  79. 0
    77
      lib/jython/Lib/encodings/iso8859_14.py
  80. 0
    54
      lib/jython/Lib/encodings/iso8859_15.py
  81. 0
    103
      lib/jython/Lib/encodings/iso8859_2.py
  82. 0
    81
      lib/jython/Lib/encodings/iso8859_3.py
  83. 0
    96
      lib/jython/Lib/encodings/iso8859_4.py
  84. 0
    140
      lib/jython/Lib/encodings/iso8859_5.py
  85. 0
    139
      lib/jython/Lib/encodings/iso8859_6.py
  86. 0
    126
      lib/jython/Lib/encodings/iso8859_7.py
  87. 0
    114
      lib/jython/Lib/encodings/iso8859_8.py
  88. 0
    52
      lib/jython/Lib/encodings/iso8859_9.py
  89. 0
    174
      lib/jython/Lib/encodings/koi8_r.py
  90. 0
    35
      lib/jython/Lib/encodings/latin_1.py
  91. 0
    169
      lib/jython/Lib/encodings/mac_cyrillic.py
  92. 0
    172
      lib/jython/Lib/encodings/mac_greek.py
  93. 0
    168
      lib/jython/Lib/encodings/mac_iceland.py
  94. 0
    172
      lib/jython/Lib/encodings/mac_latin2.py
  95. 0
    169
      lib/jython/Lib/encodings/mac_roman.py
  96. 0
    169
      lib/jython/Lib/encodings/mac_turkish.py
  97. 0
    36
      lib/jython/Lib/encodings/mbcs.py
  98. 0
    30
      lib/jython/Lib/encodings/raw_unicode_escape.py
  99. 0
    34
      lib/jython/Lib/encodings/undefined.py
  100. 0
    0
      lib/jython/Lib/encodings/unicode_escape.py

+ 0
- 1
lib/jython/.cvsignore View File

@@ -1 +0,0 @@
cachedir

+ 0
- 144
lib/jython/LICENSE.txt View File

@@ -1,144 +0,0 @@
HISTORY OF THE SOFTWARE
=======================

JPython was created in late 1997 by Jim Hugunin. Jim was also the
primary developer while he was at CNRI. In February 1999 Barry Warsaw
took over as primary developer and released JPython version 1.1.
In October 2000 Barry helped move the software to SourceForge
where it was renamed to Jython. Jython 2.0 is developed by a group
of volunteers.


The standard library is covered by the BeOpen / CNRI license. See the
Lib/LICENSE file for details.

The oro regular expresion matcher is covered by the apache license.
See the org/apache/LICENSE file for details.

The zxJDBC package was written by Brian Zimmer and originally licensed
under the GNU Public License. The package is now covered by the Jython
Software License.

Jython changes Software License.
================================

Copyright (c) 2000, Jython Developers
All rights reserved.

Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:

- Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.

- Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in
the documentation and/or other materials provided with the distribution.

- Neither the name of the Jython Developers nor the names of
its contributors may be used to endorse or promote products
derived from this software without specific prior written permission.

THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR
CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.




JPython Software License.
=========================

______________________________________________________________________

IMPORTANT: PLEASE READ THE FOLLOWING AGREEMENT CAREFULLY.

BY CLICKING ON THE "ACCEPT" BUTTON WHERE INDICATED, OR BY INSTALLING,
COPYING OR OTHERWISE USING THE SOFTWARE, YOU ARE DEEMED TO HAVE AGREED TO
THE TERMS AND CONDITIONS OF THIS AGREEMENT.

______________________________________________________________________

JPython version 1.1.x

1. This LICENSE AGREEMENT is between the Corporation for National Research
Initiatives, having an office at 1895 Preston White Drive, Reston, VA
20191 ("CNRI"), and the Individual or Organization ("Licensee")
accessing and using JPython version 1.1.x in source or binary form and
its associated documentation as provided herein ("Software").

2. Subject to the terms and conditions of this License Agreement, CNRI
hereby grants Licensee a non-exclusive, non-transferable, royalty-free,
world-wide license to reproduce, analyze, test, perform and/or display
publicly, prepare derivative works, distribute, and otherwise use the
Software alone or in any derivative version, provided, however, that
CNRI's License Agreement and CNRI's notice of copyright, i.e.,
"Copyright ©1996-1999 Corporation for National Research Initiatives;
All Rights Reserved" are both retained in the Software, alone or in any
derivative version prepared by Licensee.

Alternatively, in lieu of CNRI's License Agreement, Licensee may
substitute the following text (omitting the quotes), provided, however,
that such text is displayed prominently in the Software alone or in any
derivative version prepared by Licensee: "JPython (Version 1.1.x) is
made available subject to the terms and conditions in CNRI's License
Agreement. This Agreement may be located on the Internet using the
following unique, persistent identifier (known as a handle):
1895.22/1006. The License may also be obtained from a proxy server on
the Web using the following URL: http://hdl.handle.net/1895.22/1006."

3. In the event Licensee prepares a derivative work that is based on or
incorporates the Software or any part thereof, and wants to make the
derivative work available to the public as provided herein, then
Licensee hereby agrees to indicate in any such work, in a prominently
visible way, the nature of the modifications made to CNRI's Software.

4. Licensee may not use CNRI trademarks or trade name, including JPython
or CNRI, in a trademark sense to endorse or promote products or
services of Licensee, or any third party. Licensee may use the mark
JPython in connection with Licensee's derivative versions that are
based on or incorporate the Software, but only in the form
"JPython-based ___________________," or equivalent.

5. CNRI is making the Software available to Licensee on an "AS IS" basis.
CNRI MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR IMPLIED. BY WAY
OF EXAMPLE, BUT NOT LIMITATION, CNRI MAKES NO AND DISCLAIMS ANY
REPRESENTATION OR WARRANTY OF MERCHANTABILITY OR FITNESS FOR ANY
PARTICULAR PURPOSE OR THAT THE USE OF THE SOFTWARE WILL NOT INFRINGE
ANY THIRD PARTY RIGHTS.

6. CNRI SHALL NOT BE LIABLE TO LICENSEE OR OTHER USERS OF THE SOFTWARE FOR
ANY INCIDENTAL, SPECIAL OR CONSEQUENTIAL DAMAGES OR LOSS AS A RESULT OF
USING, MODIFYING OR DISTRIBUTING THE SOFTWARE, OR ANY DERIVATIVE
THEREOF, EVEN IF ADVISED OF THE POSSIBILITY THEREOF. SOME STATES DO NOT
ALLOW THE LIMITATION OR EXCLUSION OF LIABILITY SO THE ABOVE DISCLAIMER
MAY NOT APPLY TO LICENSEE.

7. This License Agreement may be terminated by CNRI (i) immediately upon
written notice from CNRI of any material breach by the Licensee, if the
nature of the breach is such that it cannot be promptly remedied; or
(ii) sixty (60) days following notice from CNRI to Licensee of a
material remediable breach, if Licensee has not remedied such breach
within that sixty-day period.

8. This License Agreement shall be governed by and interpreted in all
respects by the law of the State of Virginia, excluding conflict of law
provisions. Nothing in this Agreement shall be deemed to create any
relationship of agency, partnership, or joint venture between CNRI and
Licensee.

9. By clicking on the "ACCEPT" button where indicated, or by installing,
copying or otherwise using the Software, Licensee agrees to be bound by
the terms and conditions of this License Agreement.

[ACCEPT BUTTON]


+ 0
- 1
lib/jython/Lib/.cvsignore View File

@@ -1 +0,0 @@
*$py.class

+ 0
- 482
lib/jython/Lib/BaseHTTPServer.py View File

@@ -1,482 +0,0 @@
"""HTTP server base class.
Note: the class in this module doesn't implement any HTTP request; see
SimpleHTTPServer for simple implementations of GET, HEAD and POST
(including CGI scripts).
Contents:
- BaseHTTPRequestHandler: HTTP request handler base class
- test: test function
XXX To do:
- send server version
- log requests even later (to capture byte count)
- log user-agent header and other interesting goodies
- send error log to separate file
- are request names really case sensitive?
"""
# See also:
#
# HTTP Working Group T. Berners-Lee
# INTERNET-DRAFT R. T. Fielding
# <draft-ietf-http-v10-spec-00.txt> H. Frystyk Nielsen
# Expires September 8, 1995 March 8, 1995
#
# URL: http://www.ics.uci.edu/pub/ietf/http/draft-ietf-http-v10-spec-00.txt
# Log files
# ---------
#
# Here's a quote from the NCSA httpd docs about log file format.
#
# | The logfile format is as follows. Each line consists of:
# |
# | host rfc931 authuser [DD/Mon/YYYY:hh:mm:ss] "request" ddd bbbb
# |
# | host: Either the DNS name or the IP number of the remote client
# | rfc931: Any information returned by identd for this person,
# | - otherwise.
# | authuser: If user sent a userid for authentication, the user name,
# | - otherwise.
# | DD: Day
# | Mon: Month (calendar name)
# | YYYY: Year
# | hh: hour (24-hour format, the machine's timezone)
# | mm: minutes
# | ss: seconds
# | request: The first line of the HTTP request as sent by the client.
# | ddd: the status code returned by the server, - if not available.
# | bbbb: the total number of bytes sent,
# | *not including the HTTP/1.0 header*, - if not available
# |
# | You can determine the name of the file accessed through request.
#
# (Actually, the latter is only true if you know the server configuration
# at the time the request was made!)
__version__ = "0.2"
__all__ = ["HTTPServer", "BaseHTTPRequestHandler"]
import sys
import time
import socket # For gethostbyaddr()
import mimetools
import SocketServer
# Default error message
DEFAULT_ERROR_MESSAGE = """\
<head>
<title>Error response</title>
</head>
<body>
<h1>Error response</h1>
<p>Error code %(code)d.
<p>Message: %(message)s.
<p>Error code explanation: %(code)s = %(explain)s.
</body>
"""
class HTTPServer(SocketServer.TCPServer):
allow_reuse_address = 1 # Seems to make sense in testing environment
def server_bind(self):
"""Override server_bind to store the server name."""
SocketServer.TCPServer.server_bind(self)
host, port = self.socket.getsockname()
self.server_name = socket.getfqdn(host)
self.server_port = port
class BaseHTTPRequestHandler(SocketServer.StreamRequestHandler):
"""HTTP request handler base class.
The following explanation of HTTP serves to guide you through the
code as well as to expose any misunderstandings I may have about
HTTP (so you don't need to read the code to figure out I'm wrong
:-).
HTTP (HyperText Transfer Protocol) is an extensible protocol on
top of a reliable stream transport (e.g. TCP/IP). The protocol
recognizes three parts to a request:
1. One line identifying the request type and path
2. An optional set of RFC-822-style headers
3. An optional data part
The headers and data are separated by a blank line.
The first line of the request has the form
<command> <path> <version>
where <command> is a (case-sensitive) keyword such as GET or POST,
<path> is a string containing path information for the request,
and <version> should be the string "HTTP/1.0". <path> is encoded
using the URL encoding scheme (using %xx to signify the ASCII
character with hex code xx).
The protocol is vague about whether lines are separated by LF
characters or by CRLF pairs -- for compatibility with the widest
range of clients, both should be accepted. Similarly, whitespace
in the request line should be treated sensibly (allowing multiple
spaces between components and allowing trailing whitespace).
Similarly, for output, lines ought to be separated by CRLF pairs
but most clients grok LF characters just fine.
If the first line of the request has the form
<command> <path>
(i.e. <version> is left out) then this is assumed to be an HTTP
0.9 request; this form has no optional headers and data part and
the reply consists of just the data.
The reply form of the HTTP 1.0 protocol again has three parts:
1. One line giving the response code
2. An optional set of RFC-822-style headers
3. The data
Again, the headers and data are separated by a blank line.
The response code line has the form
<version> <responsecode> <responsestring>
where <version> is the protocol version (always "HTTP/1.0"),
<responsecode> is a 3-digit response code indicating success or
failure of the request, and <responsestring> is an optional
human-readable string explaining what the response code means.
This server parses the request and the headers, and then calls a
function specific to the request type (<command>). Specifically,
a request SPAM will be handled by a method do_SPAM(). If no
such method exists the server sends an error response to the
client. If it exists, it is called with no arguments:
do_SPAM()
Note that the request name is case sensitive (i.e. SPAM and spam
are different requests).
The various request details are stored in instance variables:
- client_address is the client IP address in the form (host,
port);
- command, path and version are the broken-down request line;
- headers is an instance of mimetools.Message (or a derived
class) containing the header information;
- rfile is a file object open for reading positioned at the
start of the optional input data part;
- wfile is a file object open for writing.
IT IS IMPORTANT TO ADHERE TO THE PROTOCOL FOR WRITING!
The first thing to be written must be the response line. Then
follow 0 or more header lines, then a blank line, and then the
actual data (if any). The meaning of the header lines depends on
the command executed by the server; in most cases, when data is
returned, there should be at least one header line of the form
Content-type: <type>/<subtype>
where <type> and <subtype> should be registered MIME types,
e.g. "text/html" or "text/plain".
"""
# The Python system version, truncated to its first component.
sys_version = "Python/" + sys.version.split()[0]
# The server software version. You may want to override this.
# The format is multiple whitespace-separated strings,
# where each string is of the form name[/version].
server_version = "BaseHTTP/" + __version__
def parse_request(self):
"""Parse a request (internal).
The request should be stored in self.raw_request; the results
are in self.command, self.path, self.request_version and
self.headers.
Return value is 1 for success, 0 for failure; on failure, an
error is sent back.
"""
self.request_version = version = "HTTP/0.9" # Default
requestline = self.raw_requestline
if requestline[-2:] == '\r\n':
requestline = requestline[:-2]
elif requestline[-1:] == '\n':
requestline = requestline[:-1]
self.requestline = requestline
words = requestline.split()
if len(words) == 3:
[command, path, version] = words
if version[:5] != 'HTTP/':
self.send_error(400, "Bad request version (%s)" % `version`)
return 0
elif len(words) == 2:
[command, path] = words
if command != 'GET':
self.send_error(400,
"Bad HTTP/0.9 request type (%s)" % `command`)
return 0
else:
self.send_error(400, "Bad request syntax (%s)" % `requestline`)
return 0
self.command, self.path, self.request_version = command, path, version
self.headers = self.MessageClass(self.rfile, 0)
return 1
def handle(self):
"""Handle a single HTTP request.
You normally don't need to override this method; see the class
__doc__ string for information on how to handle specific HTTP
commands such as GET and POST.
"""
self.raw_requestline = self.rfile.readline()
if not self.parse_request(): # An error code has been sent, just exit
return
mname = 'do_' + self.command
if not hasattr(self, mname):
self.send_error(501, "Unsupported method (%s)" % `self.command`)
return
method = getattr(self, mname)
method()
def send_error(self, code, message=None):
"""Send and log an error reply.
Arguments are the error code, and a detailed message.
The detailed message defaults to the short entry matching the
response code.
This sends an error response (so it must be called before any
output has been generated), logs the error, and finally sends
a piece of HTML explaining the error to the user.
"""
try:
short, long = self.responses[code]
except KeyError:
short, long = '???', '???'
if not message:
message = short
explain = long
self.log_error("code %d, message %s", code, message)
self.send_response(code, message)
self.end_headers()
self.wfile.write(self.error_message_format %
{'code': code,
'message': message,
'explain': explain})
error_message_format = DEFAULT_ERROR_MESSAGE
def send_response(self, code, message=None):
"""Send the response header and log the response code.
Also send two standard headers with the server software
version and the current date.
"""
self.log_request(code)
if message is None:
if self.responses.has_key(code):
message = self.responses[code][0]
else:
message = ''
if self.request_version != 'HTTP/0.9':
self.wfile.write("%s %s %s\r\n" %
(self.protocol_version, str(code), message))
self.send_header('Server', self.version_string())
self.send_header('Date', self.date_time_string())
def send_header(self, keyword, value):
"""Send a MIME header."""
if self.request_version != 'HTTP/0.9':
self.wfile.write("%s: %s\r\n" % (keyword, value))
def end_headers(self):
"""Send the blank line ending the MIME headers."""
if self.request_version != 'HTTP/0.9':
self.wfile.write("\r\n")
def log_request(self, code='-', size='-'):
"""Log an accepted request.
This is called by send_reponse().
"""
self.log_message('"%s" %s %s',
self.requestline, str(code), str(size))
def log_error(self, *args):
"""Log an error.
This is called when a request cannot be fulfilled. By
default it passes the message on to log_message().
Arguments are the same as for log_message().
XXX This should go to the separate error log.
"""
apply(self.log_message, args)
def log_message(self, format, *args):
"""Log an arbitrary message.
This is used by all other logging functions. Override
it if you have specific logging wishes.
The first argument, FORMAT, is a format string for the
message to be logged. If the format string contains
any % escapes requiring parameters, they should be
specified as subsequent arguments (it's just like
printf!).
The client host and current date/time are prefixed to
every message.
"""
sys.stderr.write("%s - - [%s] %s\n" %
(self.address_string(),
self.log_date_time_string(),
format%args))
def version_string(self):
"""Return the server software version string."""
return self.server_version + ' ' + self.sys_version
def date_time_string(self):
"""Return the current date and time formatted for a message header."""
now = time.time()
year, month, day, hh, mm, ss, wd, y, z = time.gmtime(now)
s = "%s, %02d %3s %4d %02d:%02d:%02d GMT" % (
self.weekdayname[wd],
day, self.monthname[month], year,
hh, mm, ss)
return s
def log_date_time_string(self):
"""Return the current time formatted for logging."""
now = time.time()
year, month, day, hh, mm, ss, x, y, z = time.localtime(now)
s = "%02d/%3s/%04d %02d:%02d:%02d" % (
day, self.monthname[month], year, hh, mm, ss)
return s
weekdayname = ['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun']
monthname = [None,
'Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun',
'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec']
def address_string(self):
"""Return the client address formatted for logging.
This version looks up the full hostname using gethostbyaddr(),
and tries to find a name that contains at least one dot.
"""
host, port = self.client_address
return socket.getfqdn(host)
# Essentially static class variables
# The version of the HTTP protocol we support.
# Don't override unless you know what you're doing (hint: incoming
# requests are required to have exactly this version string).
protocol_version = "HTTP/1.0"
# The Message-like class used to parse headers
MessageClass = mimetools.Message
# Table mapping response codes to messages; entries have the
# form {code: (shortmessage, longmessage)}.
# See http://www.w3.org/hypertext/WWW/Protocols/HTTP/HTRESP.html
responses = {
200: ('OK', 'Request fulfilled, document follows'),
201: ('Created', 'Document created, URL follows'),
202: ('Accepted',
'Request accepted, processing continues off-line'),
203: ('Partial information', 'Request fulfilled from cache'),
204: ('No response', 'Request fulfilled, nothing follows'),
301: ('Moved', 'Object moved permanently -- see URI list'),
302: ('Found', 'Object moved temporarily -- see URI list'),
303: ('Method', 'Object moved -- see Method and URL list'),
304: ('Not modified',
'Document has not changed singe given time'),
400: ('Bad request',
'Bad request syntax or unsupported method'),
401: ('Unauthorized',
'No permission -- see authorization schemes'),
402: ('Payment required',
'No payment -- see charging schemes'),
403: ('Forbidden',
'Request forbidden -- authorization will not help'),
404: ('Not found', 'Nothing matches the given URI'),
500: ('Internal error', 'Server got itself in trouble'),
501: ('Not implemented',
'Server does not support this operation'),
502: ('Service temporarily overloaded',
'The server cannot process the request due to a high load'),
503: ('Gateway timeout',
'The gateway server did not receive a timely response'),
}
def test(HandlerClass = BaseHTTPRequestHandler,
ServerClass = HTTPServer):
"""Test the HTTP request handler class.
This runs an HTTP server on port 8000 (or the first command line
argument).
"""
if sys.argv[1:]:
port = int(sys.argv[1])
else:
port = 8000
server_address = ('', port)
httpd = ServerClass(server_address, HandlerClass)
print "Serving HTTP on port", port, "..."
httpd.serve_forever()
if __name__ == '__main__':
test()

+ 0
- 304
lib/jython/Lib/CGIHTTPServer.py View File

@@ -1,305 +0,0 @@
"""CGI-savvy HTTP Server.
This module builds on SimpleHTTPServer by implementing GET and POST
requests to cgi-bin scripts.
If the os.fork() function is not present (e.g. on Windows),
os.popen2() is used as a fallback, with slightly altered semantics; if
that function is not present either (e.g. on Macintosh), only Python
scripts are supported, and they are executed by the current process.
In all cases, the implementation is intentionally naive -- all
requests are executed sychronously.
SECURITY WARNING: DON'T USE THIS CODE UNLESS YOU ARE INSIDE A FIREWALL
"""
__version__ = "0.4"
__all__ = ["CGIHTTPRequestHandler"]
import os
import sys
import urllib
import BaseHTTPServer
import SimpleHTTPServer
class CGIHTTPRequestHandler(SimpleHTTPServer.SimpleHTTPRequestHandler):
"""Complete HTTP server with GET, HEAD and POST commands.
GET and HEAD also support running CGI scripts.
The POST command is *only* implemented for CGI scripts.
"""
# Determine platform specifics
have_fork = hasattr(os, 'fork')
have_popen2 = hasattr(os, 'popen2')
# Make rfile unbuffered -- we need to read one line and then pass
# the rest to a subprocess, so we can't use buffered input.
rbufsize = 0
def do_POST(self):
"""Serve a POST request.
This is only implemented for CGI scripts.
"""
if self.is_cgi():
self.run_cgi()
else:
self.send_error(501, "Can only POST to CGI scripts")
def send_head(self):
"""Version of send_head that support CGI scripts"""
if self.is_cgi():
return self.run_cgi()
else:
return SimpleHTTPServer.SimpleHTTPRequestHandler.send_head(self)
def is_cgi(self):
"""Test whether self.path corresponds to a CGI script.
Return a tuple (dir, rest) if self.path requires running a
CGI script, None if not. Note that rest begins with a
slash if it is not empty.
The default implementation tests whether the path
begins with one of the strings in the list
self.cgi_directories (and the next character is a '/'
or the end of the string).
"""
path = self.path
for x in self.cgi_directories:
i = len(x)
if path[:i] == x and (not path[i:] or path[i] == '/'):
self.cgi_info = path[:i], path[i+1:]
return 1
return 0
cgi_directories = ['/cgi-bin', '/htbin']
def is_executable(self, path):
"""Test whether argument path is an executable file."""
return executable(path)
def is_python(self, path):
"""Test whether argument path is a Python script."""
head, tail = os.path.splitext(path)
return tail.lower() in (".py", ".pyw")
def run_cgi(self):
"""Execute a CGI script."""
dir, rest = self.cgi_info
i = rest.rfind('?')
if i >= 0:
rest, query = rest[:i], rest[i+1:]
else:
query = ''
i = rest.find('/')
if i >= 0:
script, rest = rest[:i], rest[i:]
else:
script, rest = rest, ''
scriptname = dir + '/' + script
scriptfile = self.translate_path(scriptname)
if not os.path.exists(scriptfile):
self.send_error(404, "No such CGI script (%s)" % `scriptname`)
return
if not os.path.isfile(scriptfile):
self.send_error(403, "CGI script is not a plain file (%s)" %
`scriptname`)
return
ispy = self.is_python(scriptname)
if not ispy:
if not (self.have_fork or self.have_popen2):
self.send_error(403, "CGI script is not a Python script (%s)" %
`scriptname`)
return
if not self.is_executable(scriptfile):
self.send_error(403, "CGI script is not executable (%s)" %
`scriptname`)
return
# Reference: http://hoohoo.ncsa.uiuc.edu/cgi/env.html
# XXX Much of the following could be prepared ahead of time!
env = {}
env['SERVER_SOFTWARE'] = self.version_string()
env['SERVER_NAME'] = self.server.server_name
env['GATEWAY_INTERFACE'] = 'CGI/1.1'
env['SERVER_PROTOCOL'] = self.protocol_version
env['SERVER_PORT'] = str(self.server.server_port)
env['REQUEST_METHOD'] = self.command
uqrest = urllib.unquote(rest)
env['PATH_INFO'] = uqrest
env['PATH_TRANSLATED'] = self.translate_path(uqrest)
env['SCRIPT_NAME'] = scriptname
if query:
env['QUERY_STRING'] = query
host = self.address_string()
if host != self.client_address[0]:
env['REMOTE_HOST'] = host
env['REMOTE_ADDR'] = self.client_address[0]
# XXX AUTH_TYPE
# XXX REMOTE_USER
# XXX REMOTE_IDENT
if self.headers.typeheader is None:
env['CONTENT_TYPE'] = self.headers.type
else:
env['CONTENT_TYPE'] = self.headers.typeheader
length = self.headers.getheader('content-length')
if length:
env['CONTENT_LENGTH'] = length
accept = []
for line in self.headers.getallmatchingheaders('accept'):
if line[:1] in "\t\n\r ":
accept.append(line.strip())
else:
accept = accept + line[7:].split(',')
env['HTTP_ACCEPT'] = ','.join(accept)
ua = self.headers.getheader('user-agent')
if ua:
env['HTTP_USER_AGENT'] = ua
co = filter(None, self.headers.getheaders('cookie'))
if co:
env['HTTP_COOKIE'] = ', '.join(co)
# XXX Other HTTP_* headers
if not self.have_fork:
# Since we're setting the env in the parent, provide empty
# values to override previously set values
for k in ('QUERY_STRING', 'REMOTE_HOST', 'CONTENT_LENGTH',
'HTTP_USER_AGENT', 'HTTP_COOKIE'):
env.setdefault(k, "")
self.send_response(200, "Script output follows")
decoded_query = query.replace('+', ' ')
if self.have_fork:
# Unix -- fork as we should
args = [script]
if '=' not in decoded_query:
args.append(decoded_query)
nobody = nobody_uid()
self.wfile.flush() # Always flush before forking
pid = os.fork()
if pid != 0:
# Parent
pid, sts = os.waitpid(pid, 0)
if sts:
self.log_error("CGI script exit status %#x", sts)
return
# Child
try:
try:
os.setuid(nobody)
except os.error:
pass
os.dup2(self.rfile.fileno(), 0)
os.dup2(self.wfile.fileno(), 1)
os.execve(scriptfile, args, env)
except:
self.server.handle_error(self.request, self.client_address)
os._exit(127)
elif self.have_popen2:
# Windows -- use popen2 to create a subprocess
import shutil
os.environ.update(env)
cmdline = scriptfile
if self.is_python(scriptfile):
interp = sys.executable
if interp.lower().endswith("w.exe"):
# On Windows, use python.exe, not python.exe
interp = interp[:-5] = interp[-4:]
cmdline = "%s %s" % (interp, cmdline)
if '=' not in query and '"' not in query:
cmdline = '%s "%s"' % (cmdline, query)
self.log_error("command: %s", cmdline)
try:
nbytes = int(length)
except:
nbytes = 0
fi, fo = os.popen2(cmdline)
if self.command.lower() == "post" and nbytes > 0:
data = self.rfile.read(nbytes)
fi.write(data)
fi.close()
shutil.copyfileobj(fo, self.wfile)
sts = fo.close()
if sts:
self.log_error("CGI script exit status %#x", sts)
else:
self.log_error("CGI script exited OK")
else:
# Other O.S. -- execute script in this process
os.environ.update(env)
save_argv = sys.argv
save_stdin = sys.stdin
save_stdout = sys.stdout
save_stderr = sys.stderr
try:
try:
sys.argv = [scriptfile]
if '=' not in decoded_query:
sys.argv.append(decoded_query)
sys.stdout = self.wfile
sys.stdin = self.rfile
execfile(scriptfile, {"__name__": "__main__"})
finally:
sys.argv = save_argv
sys.stdin = save_stdin
sys.stdout = save_stdout
sys.stderr = save_stderr
except SystemExit, sts:
self.log_error("CGI script exit status %s", str(sts))
else:
self.log_error("CGI script exited OK")
nobody = None
def nobody_uid():
"""Internal routine to get nobody's uid"""
global nobody
if nobody:
return nobody
try:
import pwd
except ImportError:
return -1
try:
nobody = pwd.getpwnam('nobody')[2]
except KeyError:
nobody = 1 + max(map(lambda x: x[2], pwd.getpwall()))
return nobody
def executable(path):
"""Test for executable file."""
try:
st = os.stat(path)
except os.error:
return 0
return st[0] & 0111 != 0
def test(HandlerClass = CGIHTTPRequestHandler,
ServerClass = BaseHTTPServer.HTTPServer):
SimpleHTTPServer.test(HandlerClass, ServerClass)
if __name__ == '__main__':
test()

+ 0
- 480
lib/jython/Lib/ConfigParser.py View File

@@ -1,480 +0,0 @@
"""Configuration file parser.
A setup file consists of sections, lead by a "[section]" header,
and followed by "name: value" entries, with continuations and such in
the style of RFC 822.
The option values can contain format strings which refer to other values in
the same section, or values in a special [DEFAULT] section.
For example:
something: %(dir)s/whatever
would resolve the "%(dir)s" to the value of dir. All reference
expansions are done late, on demand.
Intrinsic defaults can be specified by passing them into the
ConfigParser constructor as a dictionary.
class:
ConfigParser -- responsible for for parsing a list of
configuration files, and managing the parsed database.
methods:
__init__(defaults=None)
create the parser and specify a dictionary of intrinsic defaults. The
keys must be strings, the values must be appropriate for %()s string
interpolation. Note that `__name__' is always an intrinsic default;
it's value is the section's name.
sections()
return all the configuration section names, sans DEFAULT
has_section(section)
return whether the given section exists
has_option(section, option)
return whether the given option exists in the given section
options(section)
return list of configuration options for the named section
has_option(section, option)
return whether the given section has the given option
read(filenames)
read and parse the list of named configuration files, given by
name. A single filename is also allowed. Non-existing files
are ignored.
readfp(fp, filename=None)
read and parse one configuration file, given as a file object.
The filename defaults to fp.name; it is only used in error
messages (if fp has no `name' attribute, the string `<???>' is used).
get(section, option, raw=0, vars=None)
return a string value for the named option. All % interpolations are
expanded in the return values, based on the defaults passed into the
constructor and the DEFAULT section. Additional substitutions may be
provided using the `vars' argument, which must be a dictionary whose
contents override any pre-existing defaults.
getint(section, options)
like get(), but convert value to an integer
getfloat(section, options)
like get(), but convert value to a float
getboolean(section, options)
like get(), but convert value to a boolean (currently defined as 0 or
1, only)
remove_section(section)
remove the given file section and all its options
remove_option(section, option)
remove the given option from the given section
set(section, option, value)
set the given option
write(fp)
write the configuration state in .ini format
"""
import sys
import string
import re
__all__ = ["NoSectionError","DuplicateSectionError","NoOptionError",
"InterpolationError","InterpolationDepthError","ParsingError",
"MissingSectionHeaderError","ConfigParser",
"MAX_INTERPOLATION_DEPTH"]
DEFAULTSECT = "DEFAULT"
MAX_INTERPOLATION_DEPTH = 10
# exception classes
class Error(Exception):
def __init__(self, msg=''):
self._msg = msg
Exception.__init__(self, msg)
def __repr__(self):
return self._msg
__str__ = __repr__
class NoSectionError(Error):
def __init__(self, section):
Error.__init__(self, 'No section: %s' % section)
self.section = section
class DuplicateSectionError(Error):
def __init__(self, section):
Error.__init__(self, "Section %s already exists" % section)
self.section = section
class NoOptionError(Error):
def __init__(self, option, section):
Error.__init__(self, "No option `%s' in section: %s" %
(option, section))
self.option = option
self.section = section
class InterpolationError(Error):
def __init__(self, reference, option, section, rawval):
Error.__init__(self,
"Bad value substitution:\n"
"\tsection: [%s]\n"
"\toption : %s\n"
"\tkey : %s\n"
"\trawval : %s\n"
% (section, option, reference, rawval))
self.reference = reference
self.option = option
self.section = section
class InterpolationDepthError(Error):
def __init__(self, option, section, rawval):
Error.__init__(self,
"Value interpolation too deeply recursive:\n"
"\tsection: [%s]\n"
"\toption : %s\n"
"\trawval : %s\n"
% (section, option, rawval))
self.option = option
self.section = section
class ParsingError(Error):
def __init__(self, filename):
Error.__init__(self, 'File contains parsing errors: %s' % filename)
self.filename = filename
self.errors = []
def append(self, lineno, line):
self.errors.append((lineno, line))
self._msg = self._msg + '\n\t[line %2d]: %s' % (lineno, line)
class MissingSectionHeaderError(ParsingError):
def __init__(self, filename, lineno, line):
Error.__init__(
self,
'File contains no section headers.\nfile: %s, line: %d\n%s' %
(filename, lineno, line))
self.filename = filename
self.lineno = lineno
self.line = line
class ConfigParser:
def __init__(self, defaults=None):
self.__sections = {}
if defaults is None:
self.__defaults = {}
else:
self.__defaults = defaults
def defaults(self):
return self.__defaults
def sections(self):
"""Return a list of section names, excluding [DEFAULT]"""
# self.__sections will never have [DEFAULT] in it
return self.__sections.keys()
def add_section(self, section):
"""Create a new section in the configuration.
Raise DuplicateSectionError if a section by the specified name
already exists.
"""
if self.__sections.has_key(section):
raise DuplicateSectionError(section)
self.__sections[section] = {}
def has_section(self, section):
"""Indicate whether the named section is present in the configuration.
The DEFAULT section is not acknowledged.
"""
return section in self.sections()
def options(self, section):
"""Return a list of option names for the given section name."""
try:
opts = self.__sections[section].copy()
except KeyError:
raise NoSectionError(section)
opts.update(self.__defaults)
if opts.has_key('__name__'):
del opts['__name__']
return opts.keys()
def has_option(self, section, option):
"""Return whether the given section has the given option."""
return option in self.options(section)
def read(self, filenames):
"""Read and parse a filename or a list of filenames.
Files that cannot be opened are silently ignored; this is
designed so that you can specify a list of potential
configuration file locations (e.g. current directory, user's
home directory, systemwide directory), and all existing
configuration files in the list will be read. A single
filename may also be given.
"""
if type(filenames) in [type(''), type(u'')]:
filenames = [filenames]
for filename in filenames:
try:
fp = open(filename)
except IOError:
continue
self.__read(fp, filename)
fp.close()
def readfp(self, fp, filename=None):
"""Like read() but the argument must be a file-like object.
The `fp' argument must have a `readline' method. Optional
second argument is the `filename', which if not given, is
taken from fp.name. If fp has no `name' attribute, `<???>' is
used.
"""
if filename is None:
try:
filename = fp.name
except AttributeError:
filename = '<???>'
self.__read(fp, filename)
def get(self, section, option, raw=0, vars=None):
"""Get an option value for a given section.
All % interpolations are expanded in the return values, based on the
defaults passed into the constructor, unless the optional argument
`raw' is true. Additional substitutions may be provided using the
`vars' argument, which must be a dictionary whose contents overrides
any pre-existing defaults.
The section DEFAULT is special.
"""
try:
sectdict = self.__sections[section].copy()
except KeyError:
if section == DEFAULTSECT:
sectdict = {}
else:
raise NoSectionError(section)
d = self.__defaults.copy()
d.update(sectdict)
# Update with the entry specific variables
if vars:
d.update(vars)
option = self.optionxform(option)
try:
rawval = d[option]
except KeyError:
raise NoOptionError(option, section)
if raw:
return rawval
# do the string interpolation
value = rawval # Make it a pretty variable name
depth = 0
while depth < 10: # Loop through this until it's done
depth = depth + 1
if value.find("%(") >= 0:
try:
value = value % d
except KeyError, key:
raise InterpolationError(key, option, section, rawval)
else:
break
if value.find("%(") >= 0:
raise InterpolationDepthError(option, section, rawval)
return value
def __get(self, section, conv, option):
return conv(self.get(section, option))
def getint(self, section, option):
return self.__get(section, string.atoi, option)
def getfloat(self, section, option):
return self.__get(section, string.atof, option)
def getboolean(self, section, option):
v = self.get(section, option)
val = int(v)
if val not in (0, 1):
raise ValueError, 'Not a boolean: %s' % v
return val
def optionxform(self, optionstr):
return optionstr.lower()
def has_option(self, section, option):
"""Check for the existence of a given option in a given section."""
if not section or section == "DEFAULT":
return self.__defaults.has_key(option)
elif not self.has_section(section):
return 0
else:
option = self.optionxform(option)
return self.__sections[section].has_key(option)
def set(self, section, option, value):
"""Set an option."""
if not section or section == "DEFAULT":
sectdict = self.__defaults
else:
try:
sectdict = self.__sections[section]
except KeyError:
raise NoSectionError(section)
option = self.optionxform(option)
sectdict[option] = value
def write(self, fp):
"""Write an .ini-format representation of the configuration state."""
if self.__defaults:
fp.write("[DEFAULT]\n")
for (key, value) in self.__defaults.items():
fp.write("%s = %s\n" % (key, value))
fp.write("\n")
for section in self.sections():
fp.write("[" + section + "]\n")
sectdict = self.__sections[section]
for (key, value) in sectdict.items():
if key == "__name__":
continue
fp.write("%s = %s\n" % (key, value))
fp.write("\n")
def remove_option(self, section, option):
"""Remove an option."""
if not section or section == "DEFAULT":
sectdict = self.__defaults
else:
try:
sectdict = self.__sections[section]
except KeyError:
raise NoSectionError(section)
option = self.optionxform(option)
existed = sectdict.has_key(option)
if existed:
del sectdict[option]
return existed
def remove_section(self, section):
"""Remove a file section."""
if self.__sections.has_key(section):
del self.__sections[section]
return 1
else:
return 0
#
# Regular expressions for parsing section headers and options. Note a
# slight semantic change from the previous version, because of the use
# of \w, _ is allowed in section header names.
SECTCRE = re.compile(
r'\[' # [
r'(?P<header>[^]]+)' # very permissive!
r'\]' # ]
)
OPTCRE = re.compile(
r'(?P<option>[]\-[\w_.*,(){}]+)' # a lot of stuff found by IvL
r'[ \t]*(?P<vi>[:=])[ \t]*' # any number of space/tab,
# followed by separator
# (either : or =), followed
# by any # space/tab
r'(?P<value>.*)$' # everything up to eol
)
def __read(self, fp, fpname):
"""Parse a sectioned setup file.
The sections in setup file contains a title line at the top,
indicated by a name in square brackets (`[]'), plus key/value
options lines, indicated by `name: value' format lines.
Continuation are represented by an embedded newline then
leading whitespace. Blank lines, lines beginning with a '#',
and just about everything else is ignored.
"""
cursect = None # None, or a dictionary
optname = None
lineno = 0
e = None # None, or an exception
while 1:
line = fp.readline()
if not line:
break
lineno = lineno + 1
# comment or blank line?
if line.strip() == '' or line[0] in '#;':
continue
if line.split()[0].lower() == 'rem' \
and line[0] in "rR": # no leading whitespace
continue
# continuation line?
if line[0] in ' \t' and cursect is not None and optname:
value = line.strip()
if value:
k = self.optionxform(optname)
cursect[k] = "%s\n%s" % (cursect[k], value)
# a section header or option header?
else:
# is it a section header?
mo = self.SECTCRE.match(line)
if mo:
sectname = mo.group('header')
if self.__sections.has_key(sectname):
cursect = self.__sections[sectname]
elif sectname == DEFAULTSECT:
cursect = self.__defaults
else:
cursect = {'__name__': sectname}
self.__sections[sectname] = cursect
# So sections can't start with a continuation line
optname = None
# no section header in the file?
elif cursect is None:
raise MissingSectionHeaderError(fpname, lineno, `line`)
# an option line?
else:
mo = self.OPTCRE.match(line)
if mo:
optname, vi, optval = mo.group('option', 'vi', 'value')
if vi in ('=', ':') and ';' in optval:
# ';' is a comment delimiter only if it follows
# a spacing character
pos = optval.find(';')
if pos and optval[pos-1] in string.whitespace:
optval = optval[:pos]
optval = optval.strip()
# allow empty values
if optval == '""':
optval = ''
cursect[self.optionxform(optname)] = optval
else:
# a non-fatal parsing error occurred. set up the
# exception but keep going. the exception will be
# raised at the end of the file and will contain a
# list of all bogus lines
if not e:
e = ParsingError(fpname)
e.append(lineno, `line`)
# if any parsing errors occurred, raise an exception
if e:
raise e

+ 0
- 734
lib/jython/Lib/Cookie.py View File

@@ -1,734 +0,0 @@
#!/usr/bin/env python
#
####
# Copyright 2000 by Timothy O'Malley <timo@alum.mit.edu>
#
# All Rights Reserved
#
# Permission to use, copy, modify, and distribute this software
# and its documentation for any purpose and without fee is hereby
# granted, provided that the above copyright notice appear in all
# copies and that both that copyright notice and this permission
# notice appear in supporting documentation, and that the name of
# Timothy O'Malley not be used in advertising or publicity
# pertaining to distribution of the software without specific, written
# prior permission.
#
# Timothy O'Malley DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS
# SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
# AND FITNESS, IN NO EVENT SHALL Timothy O'Malley BE LIABLE FOR
# ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
# PERFORMANCE OF THIS SOFTWARE.
#
####
#
# Id: Cookie.py,v 2.29 2000/08/23 05:28:49 timo Exp
# by Timothy O'Malley <timo@alum.mit.edu>
#
# Cookie.py is a Python module for the handling of HTTP
# cookies as a Python dictionary. See RFC 2109 for more
# information on cookies.
#
# The original idea to treat Cookies as a dictionary came from
# Dave Mitchell (davem@magnet.com) in 1995, when he released the
# first version of nscookie.py.
#
####
r"""
Here's a sample session to show how to use this module.
At the moment, this is the only documentation.
The Basics
----------
Importing is easy..
>>> import Cookie
Most of the time you start by creating a cookie. Cookies come in
three flavors, each with slighly different encoding semanitcs, but
more on that later.
>>> C = Cookie.SimpleCookie()
>>> C = Cookie.SerialCookie()
>>> C = Cookie.SmartCookie()
[Note: Long-time users of Cookie.py will remember using
Cookie.Cookie() to create an Cookie object. Although deprecated, it
is still supported by the code. See the Backward Compatibility notes
for more information.]
Once you've created your Cookie, you can add values just as if it were
a dictionary.
>>> C = Cookie.SmartCookie()
>>> C["fig"] = "newton"
>>> C["sugar"] = "wafer"
>>> print C
Set-Cookie: sugar=wafer;
Set-Cookie: fig=newton;
Notice that the printable representation of a Cookie is the
appropriate format for a Set-Cookie: header. This is the
default behavior. You can change the header and printed
attributes by using the the .output() function
>>> C = Cookie.SmartCookie()
>>> C["rocky"] = "road"
>>> C["rocky"]["path"] = "/cookie"
>>> print C.output(header="Cookie:")
Cookie: rocky=road; Path=/cookie;
>>> print C.output(attrs=[], header="Cookie:")
Cookie: rocky=road;
The load() method of a Cookie extracts cookies from a string. In a
CGI script, you would use this method to extract the cookies from the
HTTP_COOKIE environment variable.
>>> C = Cookie.SmartCookie()
>>> C.load("chips=ahoy; vienna=finger")
>>> print C
Set-Cookie: vienna=finger;
Set-Cookie: chips=ahoy;
The load() method is darn-tootin smart about identifying cookies
within a string. Escaped quotation marks, nested semicolons, and other
such trickeries do not confuse it.
>>> C = Cookie.SmartCookie()
>>> C.load('keebler="E=everybody; L=\\"Loves\\"; fudge=\\012;";')
>>> print C
Set-Cookie: keebler="E=everybody; L=\"Loves\"; fudge=\012;";
Each element of the Cookie also supports all of the RFC 2109
Cookie attributes. Here's an example which sets the Path
attribute.
>>> C = Cookie.SmartCookie()
>>> C["oreo"] = "doublestuff"
>>> C["oreo"]["path"] = "/"
>>> print C
Set-Cookie: oreo=doublestuff; Path=/;
Each dictionary element has a 'value' attribute, which gives you
back the value associated with the key.
>>> C = Cookie.SmartCookie()
>>> C["twix"] = "none for you"
>>> C["twix"].value
'none for you'
A Bit More Advanced
-------------------
As mentioned before, there are three different flavors of Cookie
objects, each with different encoding/decoding semantics. This
section briefly discusses the differences.
SimpleCookie
The SimpleCookie expects that all values should be standard strings.
Just to be sure, SimpleCookie invokes the str() builtin to convert
the value to a string, when the values are set dictionary-style.
>>> C = Cookie.SimpleCookie()
>>> C["number"] = 7
>>> C["string"] = "seven"
>>> C["number"].value
'7'
>>> C["string"].value
'seven'
>>> print C
Set-Cookie: number=7;
Set-Cookie: string=seven;
SerialCookie
The SerialCookie expects that all values should be serialized using
cPickle (or pickle, if cPickle isn't available). As a result of
serializing, SerialCookie can save almost any Python object to a
value, and recover the exact same object when the cookie has been
returned. (SerialCookie can yield some strange-looking cookie
values, however.)
>>> C = Cookie.SerialCookie()
>>> C["number"] = 7
>>> C["string"] = "seven"
>>> C["number"].value
7
>>> C["string"].value
'seven'
>>> print C
Set-Cookie: number="I7\012.";
Set-Cookie: string="S'seven'\012p1\012.";
Be warned, however, if SerialCookie cannot de-serialize a value (because
it isn't a valid pickle'd object), IT WILL RAISE AN EXCEPTION.
SmartCookie
The SmartCookie combines aspects of each of the other two flavors.
When setting a value in a dictionary-fashion, the SmartCookie will
serialize (ala cPickle) the value *if and only if* it isn't a
Python string. String objects are *not* serialized. Similarly,
when the load() method parses out values, it attempts to de-serialize
the value. If it fails, then it fallsback to treating the value
as a string.
>>> C = Cookie.SmartCookie()
>>> C["number"] = 7
>>> C["string"] = "seven"
>>> C["number"].value
7
>>> C["string"].value
'seven'
>>> print C
Set-Cookie: number="I7\012.";
Set-Cookie: string=seven;
Backwards Compatibility
-----------------------
In order to keep compatibilty with earlier versions of Cookie.py,
it is still possible to use Cookie.Cookie() to create a Cookie. In
fact, this simply returns a SmartCookie.
>>> C = Cookie.Cookie()
>>> print C.__class__.__name__
SmartCookie
Finis.
""" #"
# ^
# |----helps out font-lock
#
# Import our required modules
#
import string, sys
from UserDict import UserDict
try:
from cPickle import dumps, loads
except ImportError:
from pickle import dumps, loads
try:
import re
except ImportError:
raise ImportError, "Cookie.py requires 're' from Python 1.5 or later"
__all__ = ["CookieError","BaseCookie","SimpleCookie","SerialCookie",
"SmartCookie","Cookie"]
#
# Define an exception visible to External modules
#
class CookieError(Exception):
pass
# These quoting routines conform to the RFC2109 specification, which in
# turn references the character definitions from RFC2068. They provide
# a two-way quoting algorithm. Any non-text character is translated
# into a 4 character sequence: a forward-slash followed by the
# three-digit octal equivalent of the character. Any '\' or '"' is
# quoted with a preceeding '\' slash.
#
# These are taken from RFC2068 and RFC2109.
# _LegalChars is the list of chars which don't require "'s
# _Translator hash-table for fast quoting
#
_LegalChars = string.letters + string.digits + "!#$%&'*+-.^_`|~"
_Translator = {
'\000' : '\\000', '\001' : '\\001', '\002' : '\\002',
'\003' : '\\003', '\004' : '\\004', '\005' : '\\005',
'\006' : '\\006', '\007' : '\\007', '\010' : '\\010',
'\011' : '\\011', '\012' : '\\012', '\013' : '\\013',
'\014' : '\\014', '\015' : '\\015', '\016' : '\\016',
'\017' : '\\017', '\020' : '\\020', '\021' : '\\021',
'\022' : '\\022', '\023' : '\\023', '\024' : '\\024',
'\025' : '\\025', '\026' : '\\026', '\027' : '\\027',
'\030' : '\\030', '\031' : '\\031', '\032' : '\\032',
'\033' : '\\033', '\034' : '\\034', '\035' : '\\035',
'\036' : '\\036', '\037' : '\\037',
'"' : '\\"', '\\' : '\\\\',
'\177' : '\\177', '\200' : '\\200', '\201' : '\\201',
'\202' : '\\202', '\203' : '\\203', '\204' : '\\204',
'\205' : '\\205', '\206' : '\\206', '\207' : '\\207',
'\210' : '\\210', '\211' : '\\211', '\212' : '\\212',
'\213' : '\\213', '\214' : '\\214', '\215' : '\\215',
'\216' : '\\216', '\217' : '\\217', '\220' : '\\220',
'\221' : '\\221', '\222' : '\\222', '\223' : '\\223',
'\224' : '\\224', '\225' : '\\225', '\226' : '\\226',
'\227' : '\\227', '\230' : '\\230', '\231' : '\\231',
'\232' : '\\232', '\233' : '\\233', '\234' : '\\234',
'\235' : '\\235', '\236' : '\\236', '\237' : '\\237',
'\240' : '\\240', '\241' : '\\241', '\242' : '\\242',
'\243' : '\\243', '\244' : '\\244', '\245' : '\\245',
'\246' : '\\246', '\247' : '\\247', '\250' : '\\250',
'\251' : '\\251', '\252' : '\\252', '\253' : '\\253',
'\254' : '\\254', '\255' : '\\255', '\256' : '\\256',
'\257' : '\\257', '\260' : '\\260', '\261' : '\\261',
'\262' : '\\262', '\263' : '\\263', '\264' : '\\264',
'\265' : '\\265', '\266' : '\\266', '\267' : '\\267',
'\270' : '\\270', '\271' : '\\271', '\272' : '\\272',
'\273' : '\\273', '\274' : '\\274', '\275' : '\\275',
'\276' : '\\276', '\277' : '\\277', '\300' : '\\300',
'\301' : '\\301', '\302' : '\\302', '\303' : '\\303',
'\304' : '\\304', '\305' : '\\305', '\306' : '\\306',
'\307' : '\\307', '\310' : '\\310', '\311' : '\\311',
'\312' : '\\312', '\313' : '\\313', '\314' : '\\314',
'\315' : '\\315', '\316' : '\\316', '\317' : '\\317',
'\320' : '\\320', '\321' : '\\321', '\322' : '\\322',
'\323' : '\\323', '\324' : '\\324', '\325' : '\\325',
'\326' : '\\326', '\327' : '\\327', '\330' : '\\330',
'\331' : '\\331', '\332' : '\\332', '\333' : '\\333',
'\334' : '\\334', '\335' : '\\335', '\336' : '\\336',
'\337' : '\\337', '\340' : '\\340', '\341' : '\\341',
'\342' : '\\342', '\343' : '\\343', '\344' : '\\344',
'\345' : '\\345', '\346' : '\\346', '\347' : '\\347',
'\350' : '\\350', '\351' : '\\351', '\352' : '\\352',
'\353' : '\\353', '\354' : '\\354', '\355' : '\\355',
'\356' : '\\356', '\357' : '\\357', '\360' : '\\360',
'\361' : '\\361', '\362' : '\\362', '\363' : '\\363',
'\364' : '\\364', '\365' : '\\365', '\366' : '\\366',
'\367' : '\\367', '\370' : '\\370', '\371' : '\\371',
'\372' : '\\372', '\373' : '\\373', '\374' : '\\374',
'\375' : '\\375', '\376' : '\\376', '\377' : '\\377'
}
def _quote(str, LegalChars=_LegalChars,
join=string.join, idmap=string._idmap, translate=string.translate):
#
# If the string does not need to be double-quoted,
# then just return the string. Otherwise, surround
# the string in doublequotes and precede quote (with a \)
# special characters.
#
if "" == translate(str, idmap, LegalChars):
return str
else:
return '"' + join( map(_Translator.get, str, str), "" ) + '"'
# end _quote
_OctalPatt = re.compile(r"\\[0-3][0-7][0-7]")
_QuotePatt = re.compile(r"[\\].")
def _unquote(str, join=string.join, atoi=string.atoi):
# If there aren't any doublequotes,
# then there can't be any special characters. See RFC 2109.
if len(str) < 2:
return str
if str[0] != '"' or str[-1] != '"':
return str
# We have to assume that we must decode this string.
# Down to work.
# Remove the "s
str = str[1:-1]
# Check for special sequences. Examples:
# \012 --> \n
# \" --> "
#
i = 0
n = len(str)
res = []
while 0 <= i < n:
Omatch = _OctalPatt.search(str, i)
Qmatch = _QuotePatt.search(str, i)
if not Omatch and not Qmatch: # Neither matched
res.append(str[i:])
break
# else:
j = k = -1
if Omatch: j = Omatch.start(0)
if Qmatch: k = Qmatch.start(0)
if Qmatch and ( not Omatch or k < j ): # QuotePatt matched
res.append(str[i:k])
res.append(str[k+1])
i = k+2
else: # OctalPatt matched
res.append(str[i:j])
res.append( chr( atoi(str[j+1:j+4], 8) ) )
i = j+4
return join(res, "")
# end _unquote
# The _getdate() routine is used to set the expiration time in
# the cookie's HTTP header. By default, _getdate() returns the
# current time in the appropriate "expires" format for a
# Set-Cookie header. The one optional argument is an offset from
# now, in seconds. For example, an offset of -3600 means "one hour ago".
# The offset may be a floating point number.
#
_weekdayname = ['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun']
_monthname = [None,
'Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun',
'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec']
def _getdate(future=0, weekdayname=_weekdayname, monthname=_monthname):
from time import gmtime, time
now = time()
year, month, day, hh, mm, ss, wd, y, z = gmtime(now + future)
return "%s, %02d-%3s-%4d %02d:%02d:%02d GMT" % \
(weekdayname[wd], day, monthname[month], year, hh, mm, ss)
#
# A class to hold ONE key,value pair.
# In a cookie, each such pair may have several attributes.
# so this class is used to keep the attributes associated
# with the appropriate key,value pair.
# This class also includes a coded_value attribute, which
# is used to hold the network representation of the
# value. This is most useful when Python objects are
# pickled for network transit.
#
class Morsel(UserDict):
# RFC 2109 lists these attributes as reserved:
# path comment domain
# max-age secure version
#
# For historical reasons, these attributes are also reserved:
# expires
#
# This dictionary provides a mapping from the lowercase
# variant on the left to the appropriate traditional
# formatting on the right.
_reserved = { "expires" : "expires",
"path" : "Path",
"comment" : "Comment",
"domain" : "Domain",
"max-age" : "Max-Age",
"secure" : "secure",
"version" : "Version",
}
_reserved_keys = _reserved.keys()
def __init__(self):
# Set defaults
self.key = self.value = self.coded_value = None
UserDict.__init__(self)
# Set default attributes
for K in self._reserved_keys:
UserDict.__setitem__(self, K, "")
# end __init__
def __setitem__(self, K, V):
K = string.lower(K)
if not K in self._reserved_keys:
raise CookieError("Invalid Attribute %s" % K)
UserDict.__setitem__(self, K, V)
# end __setitem__
def isReservedKey(self, K):
return string.lower(K) in self._reserved_keys
# end isReservedKey
def set(self, key, val, coded_val,
LegalChars=_LegalChars,
idmap=string._idmap, translate=string.translate ):
# First we verify that the key isn't a reserved word
# Second we make sure it only contains legal characters
if string.lower(key) in self._reserved_keys:
raise CookieError("Attempt to set a reserved key: %s" % key)
if "" != translate(key, idmap, LegalChars):
raise CookieError("Illegal key value: %s" % key)
# It's a good key, so save it.
self.key = key
self.value = val
self.coded_value = coded_val
# end set
def output(self, attrs=None, header = "Set-Cookie:"):
return "%s %s" % ( header, self.OutputString(attrs) )
__str__ = output
def __repr__(self):
return '<%s: %s=%s>' % (self.__class__.__name__,
self.key, repr(self.value) )
def js_output(self, attrs=None):
# Print javascript
return """
<SCRIPT LANGUAGE="JavaScript">
<!-- begin hiding
document.cookie = \"%s\"
// end hiding -->
</script>
""" % ( self.OutputString(attrs), )
# end js_output()
def OutputString(self, attrs=None):
# Build up our result
#
result = []
RA = result.append
# First, the key=value pair
RA("%s=%s;" % (self.key, self.coded_value))
# Now add any defined attributes
if attrs is None:
attrs = self._reserved_keys
for K,V in self.items():
if V == "": continue
if K not in attrs: continue
if K == "expires" and type(V) == type(1):
RA("%s=%s;" % (self._reserved[K], _getdate(V)))
elif K == "max-age" and type(V) == type(1):
RA("%s=%d;" % (self._reserved[K], V))
elif K == "secure":
RA("%s;" % self._reserved[K])
else:
RA("%s=%s;" % (self._reserved[K], V))
# Return the result
return string.join(result, " ")
# end OutputString
# end Morsel class
#
# Pattern for finding cookie
#
# This used to be strict parsing based on the RFC2109 and RFC2068
# specifications. I have since discovered that MSIE 3.0x doesn't
# follow the character rules outlined in those specs. As a
# result, the parsing rules here are less strict.
#
_LegalCharsPatt = r"[\w\d!#%&'~_`><@,:/\$\*\+\-\.\^\|\)\(\?\}\{\=]"
_CookiePattern = re.compile(
r"(?x)" # This is a Verbose pattern
r"(?P<key>" # Start of group 'key'
""+ _LegalCharsPatt +"+?" # Any word of at least one letter, nongreedy
r")" # End of group 'key'
r"\s*=\s*" # Equal Sign
r"(?P<val>" # Start of group 'val'
r'"(?:[^\\"]|\\.)*"' # Any doublequoted string
r"|" # or
""+ _LegalCharsPatt +"*" # Any word or empty string
r")" # End of group 'val'
r"\s*;?" # Probably ending in a semi-colon
)
# At long last, here is the cookie class.
# Using this class is almost just like using a dictionary.
# See this module's docstring for example usage.
#
class BaseCookie(UserDict):
# A container class for a set of Morsels
#
def value_decode(self, val):
"""real_value, coded_value = value_decode(STRING)
Called prior to setting a cookie's value from the network
representation. The VALUE is the value read from HTTP
header.
Override this function to modify the behavior of cookies.
"""
return val, val
# end value_encode
def value_encode(self, val):
"""real_value, coded_value = value_encode(VALUE)
Called prior to setting a cookie's value from the dictionary
representation. The VALUE is the value being assigned.
Override this function to modify the behavior of cookies.
"""
strval = str(val)
return strval, strval
# end value_encode
def __init__(self, input=None):
UserDict.__init__(self)
if input: self.load(input)
# end __init__
def __set(self, key, real_value, coded_value):
"""Private method for setting a cookie's value"""
M = self.get(key, Morsel())
M.set(key, real_value, coded_value)
UserDict.__setitem__(self, key, M)
# end __set
def __setitem__(self, key, value):
"""Dictionary style assignment."""
rval, cval = self.value_encode(value)
self.__set(key, rval, cval)
# end __setitem__
def output(self, attrs=None, header="Set-Cookie:", sep="\n"):
"""Return a string suitable for HTTP."""
result = []
for K,V in self.items():
result.append( V.output(attrs, header) )
return string.join(result, sep)
# end output
__str__ = output
def __repr__(self):
L = []
for K,V in self.items():
L.append( '%s=%s' % (K,repr(V.value) ) )
return '<%s: %s>' % (self.__class__.__name__, string.join(L))
def js_output(self, attrs=None):
"""Return a string suitable for JavaScript."""
result = []
for K,V in self.items():
result.append( V.js_output(attrs) )
return string.join(result, "")
# end js_output
def load(self, rawdata):
"""Load cookies from a string (presumably HTTP_COOKIE) or
from a dictionary. Loading cookies from a dictionary 'd'
is equivalent to calling:
map(Cookie.__setitem__, d.keys(), d.values())
"""
if type(rawdata) == type(""):
self.__ParseString(rawdata)
else:
self.update(rawdata)
return
# end load()
def __ParseString(self, str, patt=_CookiePattern):
i = 0 # Our starting point
n = len(str) # Length of string
M = None # current morsel
while 0 <= i < n:
# Start looking for a cookie
match = patt.search(str, i)
if not match: break # No more cookies
K,V = match.group("key"), match.group("val")
i = match.end(0)
# Parse the key, value in case it's metainfo
if K[0] == "$":
# We ignore attributes which pertain to the cookie
# mechanism as a whole. See RFC 2109.
# (Does anyone care?)
if M:
M[ K[1:] ] = V
elif string.lower(K) in Morsel._reserved_keys:
if M:
M[ K ] = _unquote(V)
else:
rval, cval = self.value_decode(V)
self.__set(K, rval, cval)
M = self[K]
# end __ParseString
# end BaseCookie class
class SimpleCookie(BaseCookie):
"""SimpleCookie
SimpleCookie supports strings as cookie values. When setting
the value using the dictionary assignment notation, SimpleCookie
calls the builtin str() to convert the value to a string. Values
received from HTTP are kept as strings.
"""
def value_decode(self, val):
return _unquote( val ), val
def value_encode(self, val):
strval = str(val)
return strval, _quote( strval )
# end SimpleCookie
class SerialCookie(BaseCookie):
"""SerialCookie
SerialCookie supports arbitrary objects as cookie values. All
values are serialized (using cPickle) before being sent to the
client. All incoming values are assumed to be valid Pickle
representations. IF AN INCOMING VALUE IS NOT IN A VALID PICKLE
FORMAT, THEN AN EXCEPTION WILL BE RAISED.
Note: Large cookie values add overhead because they must be
retransmitted on every HTTP transaction.
Note: HTTP has a 2k limit on the size of a cookie. This class
does not check for this limit, so be careful!!!
"""
def value_decode(self, val):
# This could raise an exception!
return loads( _unquote(val) ), val
def value_encode(self, val):
return val, _quote( dumps(val) )
# end SerialCookie
class SmartCookie(BaseCookie):
"""SmartCookie
SmartCookie supports arbitrary objects as cookie values. If the
object is a string, then it is quoted. If the object is not a
string, however, then SmartCookie will use cPickle to serialize
the object into a string representation.
Note: Large cookie values add overhead because they must be
retransmitted on every HTTP transaction.
Note: HTTP has a 2k limit on the size of a cookie. This class
does not check for this limit, so be careful!!!
"""
def value_decode(self, val):
strval = _unquote(val)
try:
return loads(strval), val
except:
return strval, val
def value_encode(self, val):
if type(val) == type(""):
return val, _quote(val)
else:
return val, _quote( dumps(val) )
# end SmartCookie
###########################################################
# Backwards Compatibility: Don't break any existing code!
# We provide Cookie() as an alias for SmartCookie()
Cookie = SmartCookie
#
###########################################################
def _test():
import doctest, Cookie
return doctest.testmod(Cookie)
if __name__ == "__main__":
_test()
#Local Variables:
#tab-width: 4
#end:

+ 0
- 232
lib/jython/Lib/LICENSE View File

@@ -1,232 +0,0 @@
A. HISTORY OF THE SOFTWARE
==========================
Python was created in the early 1990s by Guido van Rossum at Stichting
Mathematisch Centrum (CWI) in the Netherlands as a successor of a
language called ABC. Guido is Python's principal author, although it
includes many contributions from others. The last version released
from CWI was Python 1.2. In 1995, Guido continued his work on Python
at the Corporation for National Research Initiatives (CNRI) in Reston,
Virginia where he released several versions of the software. Python
1.6 was the last of the versions released by CNRI. In 2000, Guido and
the Python core development team moved to BeOpen.com to form the
BeOpen PythonLabs team. Python 2.0 was the first and only release
from BeOpen.com.
Following the release of Python 1.6, and after Guido van Rossum left
CNRI to work with commercial software developers, it became clear that
the ability to use Python with software available under the GNU Public
License (GPL) was very desirable. CNRI and the Free Software
Foundation (FSF) interacted to develop enabling wording changes to the
Python license. Python 1.6.1 is essentially the same as Python 1.6,
with a few minor bug fixes, and with a different license that enables
later versions to be GPL-compatible. Python 2.1 is a derivative work
of Python 1.6.1, as well as of Python 2.0.
After Python 2.0 was released by BeOpen.com, Guido van Rossum and the
other PythonLabs developers joined Digital Creations. All
intellectual property added from this point on, starting with Python
2.1 and its alpha and beta releases, is owned by the Python Software
Foundation (PSF), a non-profit modeled after the Apache Software
Foundation. See http://www.python.org/psf/ for more information about
the PSF.
Thanks to the many outside volunteers who have worked under Guido's
direction to make these releases possible.
B. TERMS AND CONDITIONS FOR ACCESSING OR OTHERWISE USING PYTHON
===============================================================
PSF LICENSE AGREEMENT
---------------------
1. This LICENSE AGREEMENT is between the Python Software Foundation
("PSF"), and the Individual or Organization ("Licensee") accessing and
otherwise using Python 2.1.1 software in source or binary form and its
associated documentation.
2. Subject to the terms and conditions of this License Agreement, PSF
hereby grants Licensee a nonexclusive, royalty-free, world-wide
license to reproduce, analyze, test, perform and/or display publicly,
prepare derivative works, distribute, and otherwise use Python 2.1.1
alone or in any derivative version, provided, however, that PSF's
License Agreement and PSF's notice of copyright, i.e., "Copyright (c)
2001 Python Software Foundation; All Rights Reserved" are retained in
Python 2.1.1 alone or in any derivative version prepared by Licensee.
3. In the event Licensee prepares a derivative work that is based on
or incorporates Python 2.1.1 or any part thereof, and wants to make
the derivative work available to others as provided herein, then
Licensee hereby agrees to include in any such work a brief summary of
the changes made to Python 2.1.1.
4. PSF is making Python 2.1.1 available to Licensee on an "AS IS"
basis. PSF MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR
IMPLIED. BY WAY OF EXAMPLE, BUT NOT LIMITATION, PSF MAKES NO AND
DISCLAIMS ANY REPRESENTATION OR WARRANTY OF MERCHANTABILITY OR FITNESS
FOR ANY PARTICULAR PURPOSE OR THAT THE USE OF PYTHON 2.1.1 WILL NOT
INFRINGE ANY THIRD PARTY RIGHTS.
5. PSF SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF PYTHON
2.1.1 FOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR LOSS AS
A RESULT OF MODIFYING, DISTRIBUTING, OR OTHERWISE USING PYTHON 2.1.1,
OR ANY DERIVATIVE THEREOF, EVEN IF ADVISED OF THE POSSIBILITY THEREOF.
6. This License Agreement will automatically terminate upon a material
breach of its terms and conditions.
7. Nothing in this License Agreement shall be deemed to create any
relationship of agency, partnership, or joint venture between PSF and
Licensee. This License Agreement does not grant permission to use PSF
trademarks or trade name in a trademark sense to endorse or promote
products or services of Licensee, or any third party.
8. By copying, installing or otherwise using Python 2.1.1, Licensee
agrees to be bound by the terms and conditions of this License
Agreement.
BEOPEN.COM TERMS AND CONDITIONS FOR PYTHON 2.0
----------------------------------------------
BEOPEN PYTHON OPEN SOURCE LICENSE AGREEMENT VERSION 1
1. This LICENSE AGREEMENT is between BeOpen.com ("BeOpen"), having an
office at 160 Saratoga Avenue, Santa Clara, CA 95051, and the
Individual or Organization ("Licensee") accessing and otherwise using
this software in source or binary form and its associated
documentation ("the Software").
2. Subject to the terms and conditions of this BeOpen Python License
Agreement, BeOpen hereby grants Licensee a non-exclusive,
royalty-free, world-wide license to reproduce, analyze, test, perform
and/or display publicly, prepare derivative works, distribute, and
otherwise use the Software alone or in any derivative version,
provided, however, that the BeOpen Python License is retained in the
Software, alone or in any derivative version prepared by Licensee.
3. BeOpen is making the Software available to Licensee on an "AS IS"
basis. BEOPEN MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR
IMPLIED. BY WAY OF EXAMPLE, BUT NOT LIMITATION, BEOPEN MAKES NO AND
DISCLAIMS ANY REPRESENTATION OR WARRANTY OF MERCHANTABILITY OR FITNESS
FOR ANY PARTICULAR PURPOSE OR THAT THE USE OF THE SOFTWARE WILL NOT
INFRINGE ANY THIRD PARTY RIGHTS.
4. BEOPEN SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF THE
SOFTWARE FOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR LOSS
AS A RESULT OF USING, MODIFYING OR DISTRIBUTING THE SOFTWARE, OR ANY
DERIVATIVE THEREOF, EVEN IF ADVISED OF THE POSSIBILITY THEREOF.
5. This License Agreement will automatically terminate upon a material
breach of its terms and conditions.
6. This License Agreement shall be governed by and interpreted in all
respects by the law of the State of California, excluding conflict of
law provisions. Nothing in this License Agreement shall be deemed to
create any relationship of agency, partnership, or joint venture
between BeOpen and Licensee. This License Agreement does not grant
permission to use BeOpen trademarks or trade names in a trademark
sense to endorse or promote products or services of Licensee, or any
third party. As an exception, the "BeOpen Python" logos available at
http://www.pythonlabs.com/logos.html may be used according to the
permissions granted on that web page.
7. By copying, installing or otherwise using the software, Licensee
agrees to be bound by the terms and conditions of this License
Agreement.
CNRI OPEN SOURCE GPL-COMPATIBLE LICENSE AGREEMENT
-------------------------------------------------
1. This LICENSE AGREEMENT is between the Corporation for National
Research Initiatives, having an office at 1895 Preston White Drive,
Reston, VA 20191 ("CNRI"), and the Individual or Organization
("Licensee") accessing and otherwise using Python 1.6.1 software in
source or binary form and its associated documentation.
2. Subject to the terms and conditions of this License Agreement, CNRI
hereby grants Licensee a nonexclusive, royalty-free, world-wide
license to reproduce, analyze, test, perform and/or display publicly,
prepare derivative works, distribute, and otherwise use Python 1.6.1
alone or in any derivative version, provided, however, that CNRI's
License Agreement and CNRI's notice of copyright, i.e., "Copyright (c)
1995-2001 Corporation for National Research Initiatives; All Rights
Reserved" are retained in Python 1.6.1 alone or in any derivative
version prepared by Licensee. Alternately, in lieu of CNRI's License
Agreement, Licensee may substitute the following text (omitting the
quotes): "Python 1.6.1 is made available subject to the terms and
conditions in CNRI's License Agreement. This Agreement together with
Python 1.6.1 may be located on the Internet using the following
unique, persistent identifier (known as a handle): 1895.22/1013. This
Agreement may also be obtained from a proxy server on the Internet
using the following URL: http://hdl.handle.net/1895.22/1013".
3. In the event Licensee prepares a derivative work that is based on
or incorporates Python 1.6.1 or any part thereof, and wants to make
the derivative work available to others as provided herein, then
Licensee hereby agrees to include in any such work a brief summary of
the changes made to Python 1.6.1.
4. CNRI is making Python 1.6.1 available to Licensee on an "AS IS"
basis. CNRI MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR
IMPLIED. BY WAY OF EXAMPLE, BUT NOT LIMITATION, CNRI MAKES NO AND
DISCLAIMS ANY REPRESENTATION OR WARRANTY OF MERCHANTABILITY OR FITNESS
FOR ANY PARTICULAR PURPOSE OR THAT THE USE OF PYTHON 1.6.1 WILL NOT
INFRINGE ANY THIRD PARTY RIGHTS.
5. CNRI SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF PYTHON
1.6.1 FOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR LOSS AS
A RESULT OF MODIFYING, DISTRIBUTING, OR OTHERWISE USING PYTHON 1.6.1,
OR ANY DERIVATIVE THEREOF, EVEN IF ADVISED OF THE POSSIBILITY THEREOF.
6. This License Agreement will automatically terminate upon a material
breach of its terms and conditions.
7. This License Agreement shall be governed by the federal
intellectual property law of the United States, including without
limitation the federal copyright law, and, to the extent such
U.S. federal law does not apply, by the law of the Commonwealth of
Virginia, excluding Virginia's conflict of law provisions.
Notwithstanding the foregoing, with regard to derivative works based
on Python 1.6.1 that incorporate non-separable material that was
previously distributed under the GNU General Public License (GPL), the
law of the Commonwealth of Virginia shall govern this License
Agreement only as to issues arising under or with respect to
Paragraphs 4, 5, and 7 of this License Agreement. Nothing in this
License Agreement shall be deemed to create any relationship of
agency, partnership, or joint venture between CNRI and Licensee. This
License Agreement does not grant permission to use CNRI trademarks or
trade name in a trademark sense to endorse or promote products or
services of Licensee, or any third party.
8. By clicking on the "ACCEPT" button where indicated, or by copying,
installing or otherwise using Python 1.6.1, Licensee agrees to be
bound by the terms and conditions of this License Agreement.
ACCEPT
CWI PERMISSIONS STATEMENT AND DISCLAIMER
----------------------------------------
Copyright (c) 1991 - 1995, Stichting Mathematisch Centrum Amsterdam,
The Netherlands. All rights reserved.
Permission to use, copy, modify, and distribute this software and its
documentation for any purpose and without fee is hereby granted,
provided that the above copyright notice appear in all copies and that
both that copyright notice and this permission notice appear in
supporting documentation, and that the name of Stichting Mathematisch
Centrum or CWI not be used in advertising or publicity pertaining to
distribution of the software without specific, written prior
permission.
STICHTING MATHEMATISCH CENTRUM DISCLAIMS ALL WARRANTIES WITH REGARD TO
THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND
FITNESS, IN NO EVENT SHALL STICHTING MATHEMATISCH CENTRUM BE LIABLE
FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.

+ 0
- 128
lib/jython/Lib/MimeWriter.py View File

@@ -1,128 +0,0 @@
"""Generic MIME writer.
Classes:
MimeWriter - the only thing here.
"""
import mimetools
__all__ = ["MimeWriter"]
class MimeWriter:
"""Generic MIME writer.
Methods:
__init__()
addheader()
flushheaders()
startbody()
startmultipartbody()
nextpart()
lastpart()
A MIME writer is much more primitive than a MIME parser. It
doesn't seek around on the output file, and it doesn't use large
amounts of buffer space, so you have to write the parts in the
order they should occur on the output file. It does buffer the
headers you add, allowing you to rearrange their order.
General usage is:
f = <open the output file>
w = MimeWriter(f)
...call w.addheader(key, value) 0 or more times...
followed by either:
f = w.startbody(content_type)
...call f.write(data) for body data...
or:
w.startmultipartbody(subtype)
for each part:
subwriter = w.nextpart()
...use the subwriter's methods to create the subpart...
w.lastpart()
The subwriter is another MimeWriter instance, and should be
treated in the same way as the toplevel MimeWriter. This way,
writing recursive body parts is easy.
Warning: don't forget to call lastpart()!
XXX There should be more state so calls made in the wrong order
are detected.
Some special cases:
- startbody() just returns the file passed to the constructor;
but don't use this knowledge, as it may be changed.
- startmultipartbody() actually returns a file as well;
this can be used to write the initial 'if you can read this your
mailer is not MIME-aware' message.
- If you call flushheaders(), the headers accumulated so far are
written out (and forgotten); this is useful if you don't need a
body part at all, e.g. for a subpart of type message/rfc822
that's (mis)used to store some header-like information.
- Passing a keyword argument 'prefix=<flag>' to addheader(),
start*body() affects where the header is inserted; 0 means
append at the end, 1 means insert at the start; default is
append for addheader(), but insert for start*body(), which use
it to determine where the Content-Type header goes.
"""
def __init__(self, fp):
self._fp = fp
self._headers = []
def addheader(self, key, value, prefix=0):
lines = value.split("\n")
while lines and not lines[-1]: del lines[-1]
while lines and not lines[0]: del lines[0]
for i in range(1, len(lines)):
lines[i] = " " + lines[i].strip()
value = "\n".join(lines) + "\n"
line = key + ": " + value
if prefix:
self._headers.insert(0, line)
else:
self._headers.append(line)
def flushheaders(self):
self._fp.writelines(self._headers)
self._headers = []
def startbody(self, ctype, plist=[], prefix=1):
for name, value in plist:
ctype = ctype + ';\n %s=\"%s\"' % (name, value)
self.addheader("Content-Type", ctype, prefix=prefix)
self.flushheaders()
self._fp.write("\n")
return self._fp
def startmultipartbody(self, subtype, boundary=None, plist=[], prefix=1):
self._boundary = boundary or mimetools.choose_boundary()
return self.startbody("multipart/" + subtype,
[("boundary", self._boundary)] + plist,
prefix=prefix)
def nextpart(self):
self._fp.write("\n--" + self._boundary + "\n")
return self.__class__(self._fp)
def lastpart(self):
self._fp.write("\n--" + self._boundary + "--\n")
if __name__ == '__main__':
import test.test_MimeWriter

+ 0
- 132
lib/jython/Lib/Queue.py View File

@@ -1,132 +0,0 @@
"""A multi-producer, multi-consumer queue."""
class Empty(Exception):
"Exception raised by Queue.get(block=0)/get_nowait()."
pass
class Full(Exception):
"Exception raised by Queue.put(block=0)/put_nowait()."
pass
class Queue:
def __init__(self, maxsize=0):
"""Initialize a queue object with a given maximum size.
If maxsize is <= 0, the queue size is infinite.
"""
import thread
self._init(maxsize)
self.mutex = thread.allocate_lock()
self.esema = thread.allocate_lock()
self.esema.acquire()
self.fsema = thread.allocate_lock()
def qsize(self):
"""Return the approximate size of the queue (not reliable!)."""
self.mutex.acquire()
n = self._qsize()
self.mutex.release()
return n
def empty(self):
"""Return 1 if the queue is empty, 0 otherwise (not reliable!)."""
self.mutex.acquire()
n = self._empty()
self.mutex.release()
return n
def full(self):
"""Return 1 if the queue is full, 0 otherwise (not reliable!)."""
self.mutex.acquire()
n = self._full()
self.mutex.release()
return n
def put(self, item, block=1):
"""Put an item into the queue.
If optional arg 'block' is 1 (the default), block if
necessary until a free slot is available. Otherwise (block
is 0), put an item on the queue if a free slot is immediately
available, else raise the Full exception.
"""
if block:
self.fsema.acquire()
elif not self.fsema.acquire(0):
raise Full
self.mutex.acquire()
was_empty = self._empty()
self._put(item)
if was_empty:
self.esema.release()
if not self._full():
self.fsema.release()
self.mutex.release()
def put_nowait(self, item):
"""Put an item into the queue without blocking.
Only enqueue the item if a free slot is immediately available.
Otherwise raise the Full exception.
"""
return self.put(item, 0)
def get(self, block=1):
"""Remove and return an item from the queue.
If optional arg 'block' is 1 (the default), block if
necessary until an item is available. Otherwise (block is 0),
return an item if one is immediately available, else raise the
Empty exception.
"""
if block:
self.esema.acquire()
elif not self.esema.acquire(0):
raise Empty
self.mutex.acquire()
was_full = self._full()
item = self._get()
if was_full:
self.fsema.release()
if not self._empty():
self.esema.release()
self.mutex.release()
return item
def get_nowait(self):
"""Remove and return an item from the queue without blocking.
Only get an item if one is immediately available. Otherwise
raise the Empty exception.
"""
return self.get(0)
# Override these methods to implement other queue organizations
# (e.g. stack or priority queue).
# These will only be called with appropriate locks held
# Initialize the queue representation
def _init(self, maxsize):
self.maxsize = maxsize
self.queue = []
def _qsize(self):
return len(self.queue)
# Check whether the queue is empty
def _empty(self):
return not self.queue
# Check whether the queue is full
def _full(self):
return self.maxsize > 0 and len(self.queue) == self.maxsize
# Put a new item in the queue
def _put(self, item):
self.queue.append(item)
# Get an item from the queue
def _get(self):
item = self.queue[0]
del self.queue[0]
return item

+ 0
- 198
lib/jython/Lib/SimpleHTTPServer.py View File

@@ -1,198 +0,0 @@
"""Simple HTTP Server.
This module builds on BaseHTTPServer by implementing the standard GET
and HEAD requests in a fairly straightforward manner.
"""
__version__ = "0.6"
__all__ = ["SimpleHTTPRequestHandler"]
import os
import posixpath
import BaseHTTPServer
import urllib
import cgi
import shutil
import mimetypes
from StringIO import StringIO
class SimpleHTTPRequestHandler(BaseHTTPServer.BaseHTTPRequestHandler):
"""Simple HTTP request handler with GET and HEAD commands.
This serves files from the current directory and any of its
subdirectories. It assumes that all files are plain text files
unless they have the extension ".html" in which case it assumes
they are HTML files.
The GET and HEAD requests are identical except that the HEAD
request omits the actual contents of the file.
"""
server_version = "SimpleHTTP/" + __version__
def do_GET(self):
"""Serve a GET request."""
f = self.send_head()
if f:
self.copyfile(f, self.wfile)
f.close()
def do_HEAD(self):
"""Serve a HEAD request."""
f = self.send_head()
if f:
f.close()
def send_head(self):
"""Common code for GET and HEAD commands.
This sends the response code and MIME headers.
Return value is either a file object (which has to be copied
to the outputfile by the caller unless the command was HEAD,
and must be closed by the caller under all circumstances), or
None, in which case the caller has nothing further to do.
"""
path = self.translate_path(self.path)
f = None
if os.path.isdir(path):
for index in "index.html", "index.htm":
index = os.path.join(path, index)
if os.path.exists(index):
path = index
break
else:
return self.list_directory(path)
ctype = self.guess_type(path)
if ctype.startswith('text/'):
mode = 'r'
else:
mode = 'rb'
try:
f = open(path, mode)
except IOError:
self.send_error(404, "File not found")
return None
self.send_response(200)
self.send_header("Content-type", ctype)
self.end_headers()
return f
def list_directory(self, path):
"""Helper to produce a directory listing (absent index.html).
Return value is either a file object, or None (indicating an
error). In either case, the headers are sent, making the
interface the same as for send_head().
"""
try:
list = os.listdir(path)
except os.error:
self.send_error(404, "No permission to list directory")
return None
list.sort(lambda a, b: cmp(a.lower(), b.lower()))
f = StringIO()
f.write("<title>Directory listing for %s</title>\n" % self.path)
f.write("<h2>Directory listing for %s</h2>\n" % self.path)
f.write("<hr>\n<ul>\n")
for name in list:
fullname = os.path.join(path, name)
displayname = linkname = name = cgi.escape(name)
# Append / for directories or @ for symbolic links
if os.path.isdir(fullname):
displayname = name + "/"
linkname = name + "/"
if os.path.islink(fullname):
displayname = name + "@"
# Note: a link to a directory displays with @ and links with /
f.write('<li><a href="%s">%s</a>\n' % (linkname, displayname))
f.write("</ul>\n<hr>\n")
f.seek(0)
self.send_response(200)
self.send_header("Content-type", "text/html")
self.end_headers()
return f
def translate_path(self, path):
"""Translate a /-separated PATH to the local filename syntax.
Components that mean special things to the local file system
(e.g. drive or directory names) are ignored. (XXX They should
probably be diagnosed.)
"""
path = posixpath.normpath(urllib.unquote(path))
words = path.split('/')
words = filter(None, words)
path = os.getcwd()
for word in words:
drive, word = os.path.splitdrive(word)
head, word = os.path.split(word)
if word in (os.curdir, os.pardir): continue
path = os.path.join(path, word)
return path
def copyfile(self, source, outputfile):
"""Copy all data between two file objects.
The SOURCE argument is a file object open for reading
(or anything with a read() method) and the DESTINATION
argument is a file object open for writing (or
anything with a write() method).
The only reason for overriding this would be to change
the block size or perhaps to replace newlines by CRLF
-- note however that this the default server uses this
to copy binary data as well.
"""
shutil.copyfileobj(source, outputfile)
def guess_type(self, path):
"""Guess the type of a file.
Argument is a PATH (a filename).
Return value is a string of the form type/subtype,
usable for a MIME Content-type header.
The default implementation looks the file's extension
up in the table self.extensions_map, using text/plain
as a default; however it would be permissible (if
slow) to look inside the data to make a better guess.
"""
base, ext = posixpath.splitext(path)
if self.extensions_map.has_key(ext):
return self.extensions_map[ext]
ext = ext.lower()
if self.extensions_map.has_key(ext):
return self.extensions_map[ext]
else:
return self.extensions_map['']
extensions_map = mimetypes.types_map.copy()
extensions_map.update({
'': 'application/octet-stream', # Default
'.py': 'text/plain',
'.c': 'text/plain',
'.h': 'text/plain',
})
def test(HandlerClass = SimpleHTTPRequestHandler,
ServerClass = BaseHTTPServer.HTTPServer):
BaseHTTPServer.test(HandlerClass, ServerClass)
if __name__ == '__main__':
test()

+ 0
- 566
lib/jython/Lib/SocketServer.py View File

@@ -1,566 +0,0 @@
"""Generic socket server classes.
This module tries to capture the various aspects of defining a server:
For socket-based servers:
- address family:
- AF_INET: IP (Internet Protocol) sockets (default)
- AF_UNIX: Unix domain sockets
- others, e.g. AF_DECNET are conceivable (see <socket.h>
- socket type:
- SOCK_STREAM (reliable stream, e.g. TCP)
- SOCK_DGRAM (datagrams, e.g. UDP)
For request-based servers (including socket-based):
- client address verification before further looking at the request
(This is actually a hook for any processing that needs to look
at the request before anything else, e.g. logging)
- how to handle multiple requests:
- synchronous (one request is handled at a time)
- forking (each request is handled by a new process)
- threading (each request is handled by a new thread)
The classes in this module favor the server type that is simplest to
write: a synchronous TCP/IP server. This is bad class design, but
save some typing. (There's also the issue that a deep class hierarchy
slows down method lookups.)
There are five classes in an inheritance diagram, four of which represent
synchronous servers of four types:
+------------+
| BaseServer |
+------------+
|
v
+-----------+ +------------------+
| TCPServer |------->| UnixStreamServer |
+-----------+ +------------------+
|
v
+-----------+ +--------------------+
| UDPServer |------->| UnixDatagramServer |
+-----------+ +--------------------+
Note that UnixDatagramServer derives from UDPServer, not from
UnixStreamServer -- the only difference between an IP and a Unix
stream server is the address family, which is simply repeated in both
unix server classes.
Forking and threading versions of each type of server can be created
using the ForkingServer and ThreadingServer mix-in classes. For
instance, a threading UDP server class is created as follows:
class ThreadingUDPServer(ThreadingMixIn, UDPServer): pass
The Mix-in class must come first, since it overrides a method defined
in UDPServer!
To implement a service, you must derive a class from
BaseRequestHandler and redefine its handle() method. You can then run
various versions of the service by combining one of the server classes
with your request handler class.
The request handler class must be different for datagram or stream
services. This can be hidden by using the mix-in request handler
classes StreamRequestHandler or DatagramRequestHandler.
Of course, you still have to use your head!
For instance, it makes no sense to use a forking server if the service
contains state in memory that can be modified by requests (since the
modifications in the child process would never reach the initial state
kept in the parent process and passed to each child). In this case,
you can use a threading server, but you will probably have to use
locks to avoid two requests that come in nearly simultaneous to apply
conflicting changes to the server state.
On the other hand, if you are building e.g. an HTTP server, where all
data is stored externally (e.g. in the file system), a synchronous
class will essentially render the service "deaf" while one request is
being handled -- which may be for a very long time if a client is slow
to reqd all the data it has requested. Here a threading or forking
server is appropriate.
In some cases, it may be appropriate to process part of a request
synchronously, but to finish processing in a forked child depending on
the request data. This can be implemented by using a synchronous
server and doing an explicit fork in the request handler class
handle() method.
Another approach to handling multiple simultaneous requests in an
environment that supports neither threads nor fork (or where these are
too expensive or inappropriate for the service) is to maintain an
explicit table of partially finished requests and to use select() to
decide which request to work on next (or whether to handle a new
incoming request). This is particularly important for stream services
where each client can potentially be connected for a long time (if
threads or subprocesses cannot be used).
Future work:
- Standard classes for Sun RPC (which uses either UDP or TCP)
- Standard mix-in classes to implement various authentication
and encryption schemes
- Standard framework for select-based multiplexing
XXX Open problems:
- What to do with out-of-band data?
BaseServer:
- split generic "request" functionality out into BaseServer class.
Copyright (C) 2000 Luke Kenneth Casson Leighton <lkcl@samba.org>
example: read entries from a SQL database (requires overriding
get_request() to return a table entry from the database).
entry is processed by a RequestHandlerClass.
"""
# Author of the BaseServer patch: Luke Kenneth Casson Leighton
# XXX Warning!
# There is a test suite for this module, but it cannot be run by the
# standard regression test.
# To run it manually, run Lib/test/test_socketserver.py.
__version__ = "0.4"
import socket
import sys
import os
__all__ = ["TCPServer","UDPServer","ForkingUDPServer","ForkingTCPServer",
"ThreadingUDPServer","ThreadingTCPServer","BaseRequestHandler",
"StreamRequestHandler","DatagramRequestHandler",
"ThreadingMixIn", "ForkingMixIn"]
if hasattr(socket, "AF_UNIX"):
__all__.extend(["UnixStreamServer","UnixDatagramServer",
"ThreadingUnixStreamServer",
"ThreadingUnixDatagramServer"])
class BaseServer:
"""Base class for server classes.
Methods for the caller:
- __init__(server_address, RequestHandlerClass)
- serve_forever()
- handle_request() # if you do not use serve_forever()
- fileno() -> int # for select()
Methods that may be overridden:
- server_bind()
- server_activate()
- get_request() -> request, client_address
- verify_request(request, client_address)
- server_close()
- process_request(request, client_address)
- close_request(request)
- handle_error()
Methods for derived classes:
- finish_request(request, client_address)
Class variables that may be overridden by derived classes or
instances:
- address_family
- socket_type
- reuse_address
Instance variables:
- RequestHandlerClass
- socket
"""
def __init__(self, server_address, RequestHandlerClass):
"""Constructor. May be extended, do not override."""
self.server_address = server_address
self.RequestHandlerClass = RequestHandlerClass
def server_activate(self):
"""Called by constructor to activate the server.
May be overridden.
"""
pass
def serve_forever(self):
"""Handle one request at a time until doomsday."""
while 1:
self.handle_request()
# The distinction between handling, getting, processing and
# finishing a request is fairly arbitrary. Remember:
#
# - handle_request() is the top-level call. It calls
# get_request(), verify_request() and process_request()
# - get_request() is different for stream or datagram sockets
# - process_request() is the place that may fork a new process
# or create a new thread to finish the request
# - finish_request() instantiates the request handler class;
# this constructor will handle the request all by itself
def handle_request(self):
"""Handle one request, possibly blocking."""
try:
request, client_address = self.get_request()
except socket.error:
return
if self.verify_request(request, client_address):
try:
self.process_request(request, client_address)
except:
self.handle_error(request, client_address)
self.close_request(request)
def verify_request(self, request, client_address):
"""Verify the request. May be overridden.
Return true if we should proceed with this request.
"""
return 1
def process_request(self, request, client_address):
"""Call finish_request.
Overridden by ForkingMixIn and ThreadingMixIn.
"""
self.finish_request(request, client_address)
self.close_request(request)
def server_close(self):
"""Called to clean-up the server.
May be overridden.
"""
pass
def finish_request(self, request, client_address):
"""Finish one request by instantiating RequestHandlerClass."""
self.RequestHandlerClass(request, client_address, self)
def close_request(self, request):
"""Called to clean up an individual request."""
pass
def handle_error(self, request, client_address):
"""Handle an error gracefully. May be overridden.
The default is to print a traceback and continue.
"""
print '-'*40
print 'Exception happened during processing of request from',
print client_address
import traceback
traceback.print_exc() # XXX But this goes to stderr!
print '-'*40
class TCPServer(BaseServer):
"""Base class for various socket-based server classes.
Defaults to synchronous IP stream (i.e., TCP).
Methods for the caller:
- __init__(server_address, RequestHandlerClass)
- serve_forever()
- handle_request() # if you don't use serve_forever()
- fileno() -> int # for select()
Methods that may be overridden:
- server_bind()
- server_activate()
- get_request() -> request, client_address
- verify_request(request, client_address)
- process_request(request, client_address)
- close_request(request)
- handle_error()
Methods for derived classes:
- finish_request(request, client_address)
Class variables that may be overridden by derived classes or
instances:
- address_family
- socket_type
- request_queue_size (only for stream sockets)
- reuse_address
Instance variables:
- server_address
- RequestHandlerClass
- socket
"""
address_family = socket.AF_INET
socket_type = socket.SOCK_STREAM
request_queue_size = 5
allow_reuse_address = 0
def __init__(self, server_address, RequestHandlerClass):
"""Constructor. May be extended, do not override."""
BaseServer.__init__(self, server_address, RequestHandlerClass)
self.socket = socket.socket(self.address_family,
self.socket_type)
self.server_bind()
self.server_activate()
def server_bind(self):
"""Called by constructor to bind the socket.
May be overridden.
"""
if self.allow_reuse_address:
self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.socket.bind(self.server_address)
def server_activate(self):
"""Called by constructor to activate the server.
May be overridden.
"""
self.socket.listen(self.request_queue_size)
def server_close(self):
"""Called to clean-up the server.
May be overridden.
"""
self.socket.close()
def fileno(self):
"""Return socket file number.
Interface required by select().
"""
return self.socket.fileno()
def get_request(self):
"""Get the request and client address from the socket.
May be overridden.
"""
return self.socket.accept()
def close_request(self, request):
"""Called to clean up an individual request."""
request.close()
class UDPServer(TCPServer):
"""UDP server class."""
allow_reuse_address = 0
socket_type = socket.SOCK_DGRAM
max_packet_size = 8192
def get_request(self):
data, client_addr = self.socket.recvfrom(self.max_packet_size)
return (data, self.socket), client_addr
def server_activate(self):
# No need to call listen() for UDP.
pass
def close_request(self, request):
# No need to close anything.
pass
class ForkingMixIn:
"""Mix-in class to handle each request in a new process."""
active_children = None
max_children = 40
def collect_children(self):
"""Internal routine to wait for died children."""
while self.active_children:
if len(self.active_children) < self.max_children:
options = os.WNOHANG
else:
# If the maximum number of children are already
# running, block while waiting for a child to exit
options = 0
try:
pid, status = os.waitpid(0, options)
except os.error:
pid = None
if not pid: break
self.active_children.remove(pid)
def process_request(self, request, client_address):
"""Fork a new subprocess to process the request."""
self.collect_children()
pid = os.fork()
if pid:
# Parent process
if self.active_children is None:
self.active_children = []
self.active_children.append(pid)
self.close_request(request)
return
else:
# Child process.
# This must never return, hence os._exit()!
try:
self.finish_request(request, client_address)
os._exit(0)
except:
try:
self.handle_error(request, client_address)
finally:
os._exit(1)
class ThreadingMixIn:
"""Mix-in class to handle each request in a new thread."""
def process_request(self, request, client_address):
"""Start a new thread to process the request."""
import threading
t = threading.Thread(target = self.finish_request,
args = (request, client_address))
t.start()
class ForkingUDPServer(ForkingMixIn, UDPServer): pass
class ForkingTCPServer(ForkingMixIn, TCPServer): pass
class ThreadingUDPServer(ThreadingMixIn, UDPServer): pass
class ThreadingTCPServer(ThreadingMixIn, TCPServer): pass
if hasattr(socket, 'AF_UNIX'):
class UnixStreamServer(TCPServer):
address_family = socket.AF_UNIX
class UnixDatagramServer(UDPServer):
address_family = socket.AF_UNIX
class ThreadingUnixStreamServer(ThreadingMixIn, UnixStreamServer): pass
class ThreadingUnixDatagramServer(ThreadingMixIn, UnixDatagramServer): pass
class BaseRequestHandler:
"""Base class for request handler classes.
This class is instantiated for each request to be handled. The
constructor sets the instance variables request, client_address
and server, and then calls the handle() method. To implement a
specific service, all you need to do is to derive a class which
defines a handle() method.
The handle() method can find the request as self.request, the
client address as self.client_address, and the server (in case it
needs access to per-server information) as self.server. Since a
separate instance is created for each request, the handle() method
can define arbitrary other instance variariables.
"""
def __init__(self, request, client_address, server):
self.request = request
self.client_address = client_address
self.server = server
try:
self.setup()
self.handle()
self.finish()
finally:
sys.exc_traceback = None # Help garbage collection
def setup(self):
pass
def __del__(self):
pass
def handle(self):
pass
def finish(self):
pass
# The following two classes make it possible to use the same service
# class for stream or datagram servers.
# Each class sets up these instance variables:
# - rfile: a file object from which receives the request is read
# - wfile: a file object to which the reply is written
# When the handle() method returns, wfile is flushed properly
class StreamRequestHandler(BaseRequestHandler):
"""Define self.rfile and self.wfile for stream sockets."""
# Default buffer sizes for rfile, wfile.
# We default rfile to buffered because otherwise it could be
# really slow for large data (a getc() call per byte); we make
# wfile unbuffered because (a) often after a write() we want to
# read and we need to flush the line; (b) big writes to unbuffered
# files are typically optimized by stdio even when big reads
# aren't.
rbufsize = -1
wbufsize = 0
def setup(self):
self.connection = self.request
self.rfile = self.connection.makefile('rb', self.rbufsize)
self.wfile = self.connection.makefile('wb', self.wbufsize)
def finish(self):
self.wfile.flush()
self.wfile.close()
self.rfile.close()
class DatagramRequestHandler(BaseRequestHandler):
# XXX Regrettably, I cannot get this working on Linux;
# s.recvfrom() doesn't return a meaningful client address.
"""Define self.rfile and self.wfile for datagram sockets."""
def setup(self):
import StringIO
self.packet, self.socket = self.request
self.rfile = StringIO.StringIO(self.packet)
self.wfile = StringIO.StringIO(self.packet)
def finish(self):
self.socket.sendto(self.wfile.getvalue(), self.client_address)

+ 0
- 208
lib/jython/Lib/StringIO.py View File

@@ -1,208 +0,0 @@
"""File-like objects that read from or write to a string buffer.
This implements (nearly) all stdio methods.
f = StringIO() # ready for writing
f = StringIO(buf) # ready for reading
f.close() # explicitly release resources held
flag = f.isatty() # always false
pos = f.tell() # get current position
f.seek(pos) # set current position
f.seek(pos, mode) # mode 0: absolute; 1: relative; 2: relative to EOF
buf = f.read() # read until EOF
buf = f.read(n) # read up to n bytes
buf = f.readline() # read until end of line ('\n') or EOF
list = f.readlines()# list of f.readline() results until EOF
f.truncate([size]) # truncate file at to at most size (default: current pos)
f.write(buf) # write at current position
f.writelines(list) # for line in list: f.write(line)
f.getvalue() # return whole file's contents as a string
Notes:
- Using a real file is often faster (but less convenient).
- There's also a much faster implementation in C, called cStringIO, but
it's not subclassable.
- fileno() is left unimplemented so that code which uses it triggers
an exception early.
- Seeking far beyond EOF and then writing will insert real null
bytes that occupy space in the buffer.
- There's a simple test set (see end of this file).
"""
try:
from errno import EINVAL
except ImportError:
EINVAL = 22
__all__ = ["StringIO"]
class StringIO:
def __init__(self, buf = ''):
self.buf = buf
self.len = len(buf)
self.buflist = []
self.pos = 0
self.closed = 0
self.softspace = 0
def close(self):
if not self.closed:
self.closed = 1
del self.buf, self.pos
def isatty(self):
if self.closed:
raise ValueError, "I/O operation on closed file"
return 0
def seek(self, pos, mode = 0):
if self.closed:
raise ValueError, "I/O operation on closed file"
if self.buflist:
self.buf += ''.join(self.buflist)
self.buflist = []
if mode == 1:
pos += self.pos
elif mode == 2:
pos += self.len
self.pos = max(0, pos)
def tell(self):
if self.closed:
raise ValueError, "I/O operation on closed file"
return self.pos
def read(self, n = -1):
if self.closed:
raise ValueError, "I/O operation on closed file"
if self.buflist:
self.buf += ''.join(self.buflist)
self.buflist = []
if n < 0:
newpos = self.len
else:
newpos = min(self.pos+n, self.len)
r = self.buf[self.pos:newpos]
self.pos = newpos
return r
def readline(self, length=None):
if self.closed:
raise ValueError, "I/O operation on closed file"
if self.buflist:
self.buf += ''.join(self.buflist)
self.buflist = []
i = self.buf.find('\n', self.pos)
if i < 0:
newpos = self.len
else:
newpos = i+1
if length is not None:
if self.pos + length < newpos:
newpos = self.pos + length
r = self.buf[self.pos:newpos]
self.pos = newpos
return r
def readlines(self, sizehint = 0):
total = 0
lines = []
line = self.readline()
while line:
lines.append(line)
total += len(line)
if 0 < sizehint <= total:
break
line = self.readline()
return lines
def truncate(self, size=None):
if self.closed:
raise ValueError, "I/O operation on closed file"
if size is None:
size = self.pos
elif size < 0:
raise IOError(EINVAL, "Negative size not allowed")
elif size < self.pos:
self.pos = size
self.buf = self.getvalue()[:size]
def write(self, s):
if self.closed:
raise ValueError, "I/O operation on closed file"
if not s: return
if self.pos > self.len:
self.buflist.append('\0'*(self.pos - self.len))
self.len = self.pos
newpos = self.pos + len(s)
if self.pos < self.len:
if self.buflist:
self.buf += ''.join(self.buflist)
self.buflist = []
self.buflist = [self.buf[:self.pos], s, self.buf[newpos:]]
self.buf = ''
if newpos > self.len:
self.len = newpos
else:
self.buflist.append(s)
self.len = newpos
self.pos = newpos
def writelines(self, list):
self.write(''.join(list))
def flush(self):
if self.closed:
raise ValueError, "I/O operation on closed file"
def getvalue(self):
if self.buflist:
self.buf += ''.join(self.buflist)
self.buflist = []
return self.buf
# A little test suite
def test():
import sys
if sys.argv[1:]:
file = sys.argv[1]
else:
file = '/etc/passwd'
lines = open(file, 'r').readlines()
text = open(file, 'r').read()
f = StringIO()
for line in lines[:-2]:
f.write(line)
f.writelines(lines[-2:])
if f.getvalue() != text:
raise RuntimeError, 'write failed'
length = f.tell()
print 'File length =', length
f.seek(len(lines[0]))
f.write(lines[1])
f.seek(0)
print 'First line =', `f.readline()`
here = f.tell()
line = f.readline()
print 'Second line =', `line`
f.seek(-len(line), 1)
line2 = f.read(len(line))
if line != line2:
raise RuntimeError, 'bad result after seek back'
f.seek(len(line2), 1)
list = f.readlines()
line = list[-1]
f.seek(f.tell() - len(line))
line2 = f.read()
if line != line2:
raise RuntimeError, 'bad result after seek back from EOF'
print 'Read', len(list), 'more lines'
print 'File length =', f.tell()
if f.tell() != length:
raise RuntimeError, 'bad length'
f.close()
if __name__ == '__main__':
test()

+ 0
- 42
lib/jython/Lib/UserDict.py View File

@@ -1,42 +0,0 @@
"""A more or less complete user-defined wrapper around dictionary objects."""
class UserDict:
def __init__(self, dict=None):
self.data = {}
if dict is not None: self.update(dict)
def __repr__(self): return repr(self.data)
def __cmp__(self, dict):
if isinstance(dict, UserDict):
return cmp(self.data, dict.data)
else:
return cmp(self.data, dict)
def __len__(self): return len(self.data)
def __getitem__(self, key): return self.data[key]
def __setitem__(self, key, item): self.data[key] = item
def __delitem__(self, key): del self.data[key]
def clear(self): self.data.clear()
def copy(self):
if self.__class__ is UserDict:
return UserDict(self.data)
import copy
return copy.copy(self)
def keys(self): return self.data.keys()
def items(self): return self.data.items()
def values(self): return self.data.values()
def has_key(self, key): return self.data.has_key(key)
def update(self, dict):
if isinstance(dict, UserDict):
self.data.update(dict.data)
elif isinstance(dict, type(self.data)):
self.data.update(dict)
else:
for k, v in dict.items():
self.data[k] = v
def get(self, key, failobj=None):
return self.data.get(key, failobj)
def setdefault(self, key, failobj=None):
if not self.data.has_key(key):
self.data[key] = failobj
return self.data[key]
def popitem(self):
return self.data.popitem()

+ 0
- 85
lib/jython/Lib/UserList.py View File

@@ -1,85 +0,0 @@
"""A more or less complete user-defined wrapper around list objects."""
class UserList:
def __init__(self, initlist=None):
self.data = []
if initlist is not None:
# XXX should this accept an arbitrary sequence?
if type(initlist) == type(self.data):
self.data[:] = initlist
elif isinstance(initlist, UserList):
self.data[:] = initlist.data[:]
else:
self.data = list(initlist)
def __repr__(self): return repr(self.data)
def __lt__(self, other): return self.data < self.__cast(other)
def __le__(self, other): return self.data <= self.__cast(other)
def __eq__(self, other): return self.data == self.__cast(other)
def __ne__(self, other): return self.data != self.__cast(other)
def __gt__(self, other): return self.data > self.__cast(other)
def __ge__(self, other): return self.data >= self.__cast(other)
def __cast(self, other):
if isinstance(other, UserList): return other.data
else: return other
def __cmp__(self, other):
raise RuntimeError, "UserList.__cmp__() is obsolete"
def __contains__(self, item): return item in self.data
def __len__(self): return len(self.data)
def __getitem__(self, i): return self.data[i]
def __setitem__(self, i, item): self.data[i] = item
def __delitem__(self, i): del self.data[i]
def __getslice__(self, i, j):
i = max(i, 0); j = max(j, 0)
return self.__class__(self.data[i:j])
def __setslice__(self, i, j, other):
i = max(i, 0); j = max(j, 0)
if isinstance(other, UserList):
self.data[i:j] = other.data
elif isinstance(other, type(self.data)):
self.data[i:j] = other
else:
self.data[i:j] = list(other)
def __delslice__(self, i, j):
i = max(i, 0); j = max(j, 0)
del self.data[i:j]
def __add__(self, other):
if isinstance(other, UserList):
return self.__class__(self.data + other.data)
elif isinstance(other, type(self.data)):
return self.__class__(self.data + other)
else:
return self.__class__(self.data + list(other))
def __radd__(self, other):
if isinstance(other, UserList):
return self.__class__(other.data + self.data)
elif isinstance(other, type(self.data)):
return self.__class__(other + self.data)
else:
return self.__class__(list(other) + self.data)
def __iadd__(self, other):
if isinstance(other, UserList):
self.data += other.data
elif isinstance(other, type(self.data)):
self.data += other
else:
self.data += list(other)
return self
def __mul__(self, n):
return self.__class__(self.data*n)
__rmul__ = __mul__
def __imul__(self, n):
self.data *= n
return self
def append(self, item): self.data.append(item)
def insert(self, i, item): self.data.insert(i, item)
def pop(self, i=-1): return self.data.pop(i)
def remove(self, item): self.data.remove(item)
def count(self, item): return self.data.count(item)
def index(self, item): return self.data.index(item)
def reverse(self): self.data.reverse()
def sort(self, *args): apply(self.data.sort, args)
def extend(self, other):
if isinstance(other, UserList):
self.data.extend(other.data)
else:
self.data.extend(other)

+ 0
- 173
lib/jython/Lib/UserString.py View File

@@ -1,173 +0,0 @@
#!/usr/bin/env python
## vim:ts=4:et:nowrap
"""A user-defined wrapper around string objects
Note: string objects have grown methods in Python 1.6
This module requires Python 1.6 or later.
"""
from types import StringType, UnicodeType
import sys
__all__ = ["UserString","MutableString"]
class UserString:
def __init__(self, seq):
if isinstance(seq, StringType) or isinstance(seq, UnicodeType):
self.data = seq
elif isinstance(seq, UserString):
self.data = seq.data[:]
else:
self.data = str(seq)
def __str__(self): return str(self.data)
def __repr__(self): return repr(self.data)
def __int__(self): return int(self.data)
def __long__(self): return long(self.data)
def __float__(self): return float(self.data)
def __complex__(self): return complex(self.data)
def __hash__(self): return hash(self.data)
def __cmp__(self, string):
if isinstance(string, UserString):
return cmp(self.data, string.data)
else:
return cmp(self.data, string)
def __contains__(self, char):
return char in self.data
def __len__(self): return len(self.data)
def __getitem__(self, index): return self.__class__(self.data[index])
def __getslice__(self, start, end):
start = max(start, 0); end = max(end, 0)
return self.__class__(self.data[start:end])
def __add__(self, other):
if isinstance(other, UserString):
return self.__class__(self.data + other.data)
elif isinstance(other, StringType) or isinstance(other, UnicodeType):
return self.__class__(self.data + other)
else:
return self.__class__(self.data + str(other))
def __radd__(self, other):
if isinstance(other, StringType) or isinstance(other, UnicodeType):
return self.__class__(other + self.data)
else:
return self.__class__(str(other) + self.data)
def __iadd__(self, other):
if isinstance(other, UserString):
self.data += other.data
elif isinstance(other, StringType) or isinstance(other, UnicodeType):
self.data += other
else:
self.data += str(other)
return self
def __mul__(self, n):
return self.__class__(self.data*n)
__rmul__ = __mul__
def __imul__(self, n):
self.data *= n
return self
# the following methods are defined in alphabetical order:
def capitalize(self): return self.__class__(self.data.capitalize())
def center(self, width): return self.__class__(self.data.center(width))
def count(self, sub, start=0, end=sys.maxint):
return self.data.count(sub, start, end)
def encode(self, encoding=None, errors=None): # XXX improve this?
if encoding:
if errors:
return self.__class__(self.data.encode(encoding, errors))
else:
return self.__class__(self.data.encode(encoding))
else:
return self.__class__(self.data.encode())
def endswith(self, suffix, start=0, end=sys.maxint):
return self.data.endswith(suffix, start, end)
def expandtabs(self, tabsize=8):
return self.__class__(self.data.expandtabs(tabsize))
def find(self, sub, start=0, end=sys.maxint):
return self.data.find(sub, start, end)
def index(self, sub, start=0, end=sys.maxint):
return self.data.index(sub, start, end)
def isalpha(self): return self.data.isalpha()
def isalnum(self): return self.data.isalnum()
def isdecimal(self): return self.data.isdecimal()
def isdigit(self): return self.data.isdigit()
def islower(self): return self.data.islower()
def isnumeric(self): return self.data.isnumeric()
def isspace(self): return self.data.isspace()
def istitle(self): return self.data.istitle()
def isupper(self): return self.data.isupper()
def join(self, seq): return self.data.join(seq)
def ljust(self, width): return self.__class__(self.data.ljust(width))
def lower(self): return self.__class__(self.data.lower())
def lstrip(self): return self.__class__(self.data.lstrip())
def replace(self, old, new, maxsplit=-1):
return self.__class__(self.data.replace(old, new, maxsplit))
def rfind(self, sub, start=0, end=sys.maxint):
return self.data.rfind(sub, start, end)
def rindex(self, sub, start=0, end=sys.maxint):
return self.data.rindex(sub, start, end)
def rjust(self, width): return self.__class__(self.data.rjust(width))
def rstrip(self): return self.__class__(self.data.rstrip())
def split(self, sep=None, maxsplit=-1):
return self.data.split(sep, maxsplit)
def splitlines(self, keepends=0): return self.data.splitlines(keepends)
def startswith(self, prefix, start=0, end=sys.maxint):
return self.data.startswith(prefix, start, end)
def strip(self): return self.__class__(self.data.strip())
def swapcase(self): return self.__class__(self.data.swapcase())
def title(self): return self.__class__(self.data.title())
def translate(self, *args):
return self.__class__(self.data.translate(*args))
def upper(self): return self.__class__(self.data.upper())
class MutableString(UserString):
"""mutable string objects
Python strings are immutable objects. This has the advantage, that
strings may be used as dictionary keys. If this property isn't needed
and you insist on changing string values in place instead, you may cheat
and use MutableString.
But the purpose of this class is an educational one: to prevent
people from inventing their own mutable string class derived
from UserString and than forget thereby to remove (override) the
__hash__ method inherited from ^UserString. This would lead to
errors that would be very hard to track down.
A faster and better solution is to rewrite your program using lists."""
def __init__(self, string=""):
self.data = string
def __hash__(self):
raise TypeError, "unhashable type (it is mutable)"
def __setitem__(self, index, sub):
if index < 0 or index >= len(self.data): raise IndexError
self.data = self.data[:index] + sub + self.data[index+1:]
def __delitem__(self, index):
if index < 0 or index >= len(self.data): raise IndexError
self.data = self.data[:index] + self.data[index+1:]
def __setslice__(self, start, end, sub):
start = max(start, 0); end = max(end, 0)
if isinstance(sub, UserString):
self.data = self.data[:start]+sub.data+self.data[end:]
elif isinstance(sub, StringType) or isinstance(sub, UnicodeType):
self.data = self.data[:start]+sub+self.data[end:]
else:
self.data = self.data[:start]+str(sub)+self.data[end:]
def __delslice__(self, start, end):
start = max(start, 0); end = max(end, 0)
self.data = self.data[:start] + self.data[end:]
def immutable(self):
return UserString(self.data)
if __name__ == "__main__":
# execute the regression test to stdout, if called as a script:
import os
called_in_dir, called_as = os.path.split(sys.argv[0])
called_in_dir = os.path.abspath(called_in_dir)
called_as, py = os.path.splitext(called_as)
sys.path.append(os.path.join(called_in_dir, 'test'))
if '-q' in sys.argv:
import test_support
test_support.verbose = 0
__import__('test_' + called_as.lower())

+ 0
- 69
lib/jython/Lib/__future__.py View File

@@ -1,69 +0,0 @@
"""Record of phased-in incompatible language changes.
Each line is of the form:
FeatureName = "_Feature(" OptionalRelease "," MandatoryRelease ")"
where, normally, OptionalRelease < MandatoryRelease, and both are 5-tuples
of the same form as sys.version_info:
(PY_MAJOR_VERSION, # the 2 in 2.1.0a3; an int
PY_MINOR_VERSION, # the 1; an int
PY_MICRO_VERSION, # the 0; an int
PY_RELEASE_LEVEL, # "alpha", "beta", "candidate" or "final"; string
PY_RELEASE_SERIAL # the 3; an int
)
OptionalRelease records the first release in which
from __future__ import FeatureName
was accepted.
In the case of MandatoryReleases that have not yet occurred,
MandatoryRelease predicts the release in which the feature will become part
of the language.
Else MandatoryRelease records when the feature became part of the language;
in releases at or after that, modules no longer need
from __future__ import FeatureName
to use the feature in question, but may continue to use such imports.
MandatoryRelease may also be None, meaning that a planned feature got
dropped.
Instances of class _Feature have two corresponding methods,
.getOptionalRelease() and .getMandatoryRelease().
No feature line is ever to be deleted from this file.
"""
class _Feature:
def __init__(self, optionalRelease, mandatoryRelease):
self.optional = optionalRelease
self.mandatory = mandatoryRelease
def getOptionalRelease(self):
"""Return first release in which this feature was recognized.
This is a 5-tuple, of the same form as sys.version_info.
"""
return self.optional
def getMandatoryRelease(self):
"""Return release in which this feature will become mandatory.
This is a 5-tuple, of the same form as sys.version_info, or, if
the feature was dropped, is None.
"""
return self.mandatory
def __repr__(self):
return "Feature(" + `self.getOptionalRelease()` + ", " + \
`self.getMandatoryRelease()` + ")"
nested_scopes = _Feature((2, 1, 0, "beta", 1), (2, 2, 0, "final", 0))

+ 0
- 86
lib/jython/Lib/anydbm.py View File

@@ -1,86 +0,0 @@
"""Generic interface to all dbm clones.
Instead of
import dbm
d = dbm.open(file, 'w', 0666)
use
import anydbm
d = anydbm.open(file, 'w')
The returned object is a dbhash, gdbm, dbm or dumbdbm object,
dependent on the type of database being opened (determined by whichdb
module) in the case of an existing dbm. If the dbm does not exist and
the create or new flag ('c' or 'n') was specified, the dbm type will
be determined by the availability of the modules (tested in the above
order).
It has the following interface (key and data are strings):
d[key] = data # store data at key (may override data at
# existing key)
data = d[key] # retrieve data at key (raise KeyError if no
# such key)
del d[key] # delete data stored at key (raises KeyError
# if no such key)
flag = d.has_key(key) # true if the key exists
list = d.keys() # return a list of all existing keys (slow!)
Future versions may change the order in which implementations are
tested for existence, add interfaces to other dbm-like
implementations.
The open function has an optional second argument. This can be 'r',
for read-only access, 'w', for read-write access of an existing
database, 'c' for read-write access to a new or existing database, and
'n' for read-write access to a new database. The default is 'r'.
Note: 'r' and 'w' fail if the database doesn't exist; 'c' creates it
only if it doesn't exist; and 'n' always creates a new database.
"""
try:
class error(Exception):
pass
except:
error = "anydbm.error"
_names = ['dbhash', 'gdbm', 'dbm', 'dumbdbm']
_errors = [error]
_defaultmod = None
for _name in _names:
try:
_mod = __import__(_name)
except ImportError:
continue
if not _defaultmod:
_defaultmod = _mod
_errors.append(_mod.error)
if not _defaultmod:
raise ImportError, "no dbm clone found; tried %s" % _names
error = tuple(_errors)
def open(file, flag = 'r', mode = 0666):
# guess the type of an existing database
from whichdb import whichdb
result=whichdb(file)
if result is None:
# db doesn't exist
if 'c' in flag or 'n' in flag:
# file doesn't exist and the new
# flag was used so use default type
mod = _defaultmod
else:
raise error, "need 'c' or 'n' flag to open new db"
elif result == "":
# db type cannot be determined
raise error, "db type could not be determined"
else:
mod = __import__(result)
return mod.open(file, flag, mode)

+ 0
- 54
lib/jython/Lib/atexit.py View File

@@ -1,54 +0,0 @@
"""
atexit.py - allow programmer to define multiple exit functions to be executed
upon normal program termination.
One public function, register, is defined.
"""
__all__ = ["register"]
_exithandlers = []
def _run_exitfuncs():
"""run any registered exit functions
_exithandlers is traversed in reverse order so functions are executed
last in, first out.
"""
while _exithandlers:
func, targs, kargs = _exithandlers.pop()
apply(func, targs, kargs)
def register(func, *targs, **kargs):
"""register a function to be executed upon normal program termination
func - function to be called at exit
targs - optional arguments to pass to func
kargs - optional keyword arguments to pass to func
"""
_exithandlers.append((func, targs, kargs))
import sys
try:
x = sys.exitfunc
except AttributeError:
sys.exitfunc = _run_exitfuncs
else:
# if x isn't our own exit func executive, assume it's another
# registered exit function - append it to our list...
if x != _run_exitfuncs:
register(x)
del sys
if __name__ == "__main__":
def x1():
print "running x1"
def x2(n):
print "running x2(%s)" % `n`
def x3(n, kwd=None):
print "running x3(%s, kwd=%s)" % (`n`, `kwd`)
register(x1)
register(x2, 12)
register(x3, 5, "bar")
register(x3, "no kwd args")

+ 0
- 81
lib/jython/Lib/base64.py View File

@@ -1,81 +0,0 @@
#! /usr/bin/env python
"""Conversions to/from base64 transport encoding as per RFC-1521."""
# Modified 04-Oct-95 by Jack to use binascii module
import binascii
__all__ = ["encode","decode","encodestring","decodestring"]
MAXLINESIZE = 76 # Excluding the CRLF
MAXBINSIZE = (MAXLINESIZE/4)*3
def encode(input, output):
"""Encode a file."""
while 1:
s = input.read(MAXBINSIZE)
if not s: break
while len(s) < MAXBINSIZE:
ns = input.read(MAXBINSIZE-len(s))
if not ns: break
s = s + ns
line = binascii.b2a_base64(s)
output.write(line)
def decode(input, output):
"""Decode a file."""
while 1:
line = input.readline()
if not line: break
s = binascii.a2b_base64(line)
output.write(s)
def encodestring(s):
"""Encode a string."""
import StringIO
f = StringIO.StringIO(s)
g = StringIO.StringIO()
encode(f, g)
return g.getvalue()
def decodestring(s):
"""Decode a string."""
import StringIO
f = StringIO.StringIO(s)
g = StringIO.StringIO()
decode(f, g)
return g.getvalue()
def test():
"""Small test program"""
import sys, getopt
try:
opts, args = getopt.getopt(sys.argv[1:], 'deut')
except getopt.error, msg:
sys.stdout = sys.stderr
print msg
print """usage: %s [-d|-e|-u|-t] [file|-]
-d, -u: decode
-e: encode (default)
-t: encode and decode string 'Aladdin:open sesame'"""%sys.argv[0]
sys.exit(2)
func = encode
for o, a in opts:
if o == '-e': func = encode
if o == '-d': func = decode
if o == '-u': func = decode
if o == '-t': test1(); return
if args and args[0] != '-':
func(open(args[0], 'rb'), sys.stdout)
else:
func(sys.stdin, sys.stdout)
def test1():
s0 = "Aladdin:open sesame"
s1 = encodestring(s0)
s2 = decodestring(s1)
print s0, `s1`, s2
if __name__ == '__main__':
test()

+ 0
- 565
lib/jython/Lib/bdb.py View File

@@ -1,565 +0,0 @@
"""Debugger basics"""
import sys
import os
import types
__all__ = ["BdbQuit","Bdb","Breakpoint"]
BdbQuit = 'bdb.BdbQuit' # Exception to give up completely
class Bdb:
"""Generic Python debugger base class.
This class takes care of details of the trace facility;
a derived class should implement user interaction.
The standard debugger class (pdb.Pdb) is an example.
"""
def __init__(self):
self.breaks = {}
self.fncache = {}
def canonic(self, filename):
canonic = self.fncache.get(filename)
if not canonic:
canonic = os.path.abspath(filename)
self.fncache[filename] = canonic
return canonic
def reset(self):
import linecache
linecache.checkcache()
self.botframe = None
self.stopframe = None
self.returnframe = None
self.quitting = 0
def trace_dispatch(self, frame, event, arg):
if self.quitting:
return # None
if event == 'line':
return self.dispatch_line(frame)
if event == 'call':
return self.dispatch_call(frame, arg)
if event == 'return':
return self.dispatch_return(frame, arg)
if event == 'exception':
return self.dispatch_exception(frame, arg)
print 'bdb.Bdb.dispatch: unknown debugging event:', `event`
return self.trace_dispatch
def dispatch_line(self, frame):
if self.stop_here(frame) or self.break_here(frame):
self.user_line(frame)
if self.quitting: raise BdbQuit
return self.trace_dispatch
def dispatch_call(self, frame, arg):
# XXX 'arg' is no longer used
if self.botframe is None:
# First call of dispatch since reset()
self.botframe = frame
return self.trace_dispatch
if not (self.stop_here(frame) or self.break_anywhere(frame)):
# No need to trace this function
return # None
self.user_call(frame, arg)
if self.quitting: raise BdbQuit
return self.trace_dispatch
def dispatch_return(self, frame, arg):
if self.stop_here(frame) or frame == self.returnframe:
self.user_return(frame, arg)
if self.quitting: raise BdbQuit
def dispatch_exception(self, frame, arg):
if self.stop_here(frame):
self.user_exception(frame, arg)
if self.quitting: raise BdbQuit
return self.trace_dispatch
# Normally derived classes don't override the following
# methods, but they may if they want to redefine the
# definition of stopping and breakpoints.
def stop_here(self, frame):
if self.stopframe is None:
return 1
if frame is self.stopframe:
return 1
while frame is not None and frame is not self.stopframe:
if frame is self.botframe:
return 1
frame = frame.f_back
return 0
def break_here(self, frame):
filename = self.canonic(frame.f_code.co_filename)
if not self.breaks.has_key(filename):
return 0
lineno = frame.f_lineno
if not lineno in self.breaks[filename]:
return 0
# flag says ok to delete temp. bp
(bp, flag) = effective(filename, lineno, frame)
if bp:
self.currentbp = bp.number
if (flag and bp.temporary):
self.do_clear(str(bp.number))
return 1
else:
return 0
def do_clear(self, arg):
raise NotImplementedError, "subclass of bdb must implement do_clear()"
def break_anywhere(self, frame):
return self.breaks.has_key(
self.canonic(frame.f_code.co_filename))
# Derived classes should override the user_* methods
# to gain control.
def user_call(self, frame, argument_list):
"""This method is called when there is the remote possibility
that we ever need to stop in this function."""
pass
def user_line(self, frame):
"""This method is called when we stop or break at this line."""
pass
def user_return(self, frame, return_value):
"""This method is called when a return trap is set here."""
pass
def user_exception(self, frame, (exc_type, exc_value, exc_traceback)):
"""This method is called if an exception occurs,
but only if we are to stop at or just below this level."""
pass
# Derived classes and clients can call the following methods
# to affect the stepping state.
def set_step(self):
"""Stop after one line of code."""
self.stopframe = None
self.returnframe = None
self.quitting = 0
def set_next(self, frame):
"""Stop on the next line in or below the given frame."""
self.stopframe = frame
self.returnframe = None
self.quitting = 0
def set_return(self, frame):
"""Stop when returning from the given frame."""
self.stopframe = frame.f_back
self.returnframe = frame
self.quitting = 0
def set_trace(self):
"""Start debugging from here."""
try:
1 + ''
except:
frame = sys.exc_info()[2].tb_frame.f_back
self.reset()
while frame:
frame.f_trace = self.trace_dispatch
self.botframe = frame
frame = frame.f_back
self.set_step()
sys.settrace(self.trace_dispatch)
def set_continue(self):
# Don't stop except at breakpoints or when finished
self.stopframe = self.botframe
self.returnframe = None
self.quitting = 0
if not self.breaks:
# no breakpoints; run without debugger overhead
sys.settrace(None)
try:
1 + '' # raise an exception
except:
frame = sys.exc_info()[2].tb_frame.f_back
while frame and frame is not self.botframe:
del frame.f_trace
frame = frame.f_back
def set_quit(self):
self.stopframe = self.botframe
self.returnframe = None
self.quitting = 1
sys.settrace(None)
# Derived classes and clients can call the following methods
# to manipulate breakpoints. These methods return an
# error message is something went wrong, None if all is well.
# Set_break prints out the breakpoint line and file:lineno.
# Call self.get_*break*() to see the breakpoints or better
# for bp in Breakpoint.bpbynumber: if bp: bp.bpprint().
def set_break(self, filename, lineno, temporary=0, cond = None):
filename = self.canonic(filename)
import linecache # Import as late as possible
line = linecache.getline(filename, lineno)
if not line:
return 'Line %s:%d does not exist' % (filename,
lineno)
if not self.breaks.has_key(filename):
self.breaks[filename] = []
list = self.breaks[filename]
if not lineno in list:
list.append(lineno)
bp = Breakpoint(filename, lineno, temporary, cond)
def clear_break(self, filename, lineno):
filename = self.canonic(filename)
if not self.breaks.has_key(filename):
return 'There are no breakpoints in %s' % filename
if lineno not in self.breaks[filename]:
return 'There is no breakpoint at %s:%d' % (filename,
lineno)
# If there's only one bp in the list for that file,line
# pair, then remove the breaks entry
for bp in Breakpoint.bplist[filename, lineno][:]:
bp.deleteMe()
if not Breakpoint.bplist.has_key((filename, lineno)):
self.breaks[filename].remove(lineno)
if not self.breaks[filename]:
del self.breaks[filename]
def clear_bpbynumber(self, arg):
try:
number = int(arg)
except:
return 'Non-numeric breakpoint number (%s)' % arg
try:
bp = Breakpoint.bpbynumber[number]
except IndexError:
return 'Breakpoint number (%d) out of range' % number
if not bp:
return 'Breakpoint (%d) already deleted' % number
self.clear_break(bp.file, bp.line)
def clear_all_file_breaks(self, filename):
filename = self.canonic(filename)
if not self.breaks.has_key(filename):
return 'There are no breakpoints in %s' % filename
for line in self.breaks[filename]:
blist = Breakpoint.bplist[filename, line]
for bp in blist:
bp.deleteMe()
del self.breaks[filename]
def clear_all_breaks(self):
if not self.breaks:
return 'There are no breakpoints'
for bp in Breakpoint.bpbynumber:
if bp:
bp.deleteMe()
self.breaks = {}
def get_break(self, filename, lineno):
filename = self.canonic(filename)
return self.breaks.has_key(filename) and \
lineno in self.breaks[filename]
def get_breaks(self, filename, lineno):
filename = self.canonic(filename)
return self.breaks.has_key(filename) and \
lineno in self.breaks[filename] and \
Breakpoint.bplist[filename, lineno] or []
def get_file_breaks(self, filename):
filename = self.canonic(filename)
if self.breaks.has_key(filename):
return self.breaks[filename]
else:
return []
def get_all_breaks(self):
return self.breaks
# Derived classes and clients can call the following method
# to get a data structure representing a stack trace.
def get_stack(self, f, t):
stack = []
if t and t.tb_frame is f:
t = t.tb_next
while f is not None:
stack.append((f, f.f_lineno))
if f is self.botframe:
break
f = f.f_back
stack.reverse()
i = max(0, len(stack) - 1)
while t is not None:
stack.append((t.tb_frame, t.tb_lineno))
t = t.tb_next
return stack, i
#
def format_stack_entry(self, frame_lineno, lprefix=': '):
import linecache, repr
frame, lineno = frame_lineno
filename = self.canonic(frame.f_code.co_filename)
s = filename + '(' + `lineno` + ')'
if frame.f_code.co_name:
s = s + frame.f_code.co_name
else:
s = s + "<lambda>"
if frame.f_locals.has_key('__args__'):
args = frame.f_locals['__args__']
else:
args = None
if args:
s = s + repr.repr(args)
else:
s = s + '()'
if frame.f_locals.has_key('__return__'):
rv = frame.f_locals['__return__']
s = s + '->'
s = s + repr.repr(rv)
line = linecache.getline(filename, lineno)
if line: s = s + lprefix + line.strip()
return s
# The following two methods can be called by clients to use
# a debugger to debug a statement, given as a string.
def run(self, cmd, globals=None, locals=None):
if globals is None:
import __main__
globals = __main__.__dict__
if locals is None:
locals = globals
self.reset()
sys.settrace(self.trace_dispatch)
if not isinstance(cmd, types.CodeType):
cmd = cmd+'\n'
try:
try:
exec cmd in globals, locals
except BdbQuit:
pass
finally:
self.quitting = 1
sys.settrace(None)
def runeval(self, expr, globals=None, locals=None):
if globals is None:
import __main__
globals = __main__.__dict__
if locals is None:
locals = globals
self.reset()
sys.settrace(self.trace_dispatch)
if not isinstance(expr, types.CodeType):
expr = expr+'\n'
try:
try:
return eval(expr, globals, locals)
except BdbQuit:
pass
finally:
self.quitting = 1
sys.settrace(None)
def runctx(self, cmd, globals, locals):
# B/W compatibility
self.run(cmd, globals, locals)
# This method is more useful to debug a single function call.
def runcall(self, func, *args):
self.reset()
sys.settrace(self.trace_dispatch)
res = None
try:
try:
res = apply(func, args)
except BdbQuit:
pass
finally:
self.quitting = 1
sys.settrace(None)
return res
def set_trace():
Bdb().set_trace()
class Breakpoint:
"""Breakpoint class
Implements temporary breakpoints, ignore counts, disabling and
(re)-enabling, and conditionals.
Breakpoints are indexed by number through bpbynumber and by
the file,line tuple using bplist. The former points to a
single instance of class Breakpoint. The latter points to a
list of such instances since there may be more than one
breakpoint per line.
"""
# XXX Keeping state in the class is a mistake -- this means
# you cannot have more than one active Bdb instance.
next = 1 # Next bp to be assigned
bplist = {} # indexed by (file, lineno) tuple
bpbynumber = [None] # Each entry is None or an instance of Bpt
# index 0 is unused, except for marking an
# effective break .... see effective()
def __init__(self, file, line, temporary=0, cond = None):
self.file = file # This better be in canonical form!
self.line = line
self.temporary = temporary
self.cond = cond
self.enabled = 1
self.ignore = 0
self.hits = 0
self.number = Breakpoint.next
Breakpoint.next = Breakpoint.next + 1
# Build the two lists
self.bpbynumber.append(self)
if self.bplist.has_key((file, line)):
self.bplist[file, line].append(self)
else:
self.bplist[file, line] = [self]
def deleteMe(self):
index = (self.file, self.line)
self.bpbynumber[self.number] = None # No longer in list
self.bplist[index].remove(self)
if not self.bplist[index]:
# No more bp for this f:l combo
del self.bplist[index]
def enable(self):
self.enabled = 1
def disable(self):
self.enabled = 0
def bpprint(self):
if self.temporary:
disp = 'del '
else:
disp = 'keep '
if self.enabled:
disp = disp + 'yes'
else:
disp = disp + 'no '
print '%-4dbreakpoint %s at %s:%d' % (self.number, disp,
self.file, self.line)
if self.cond:
print '\tstop only if %s' % (self.cond,)
if self.ignore:
print '\tignore next %d hits' % (self.ignore)
if (self.hits):
if (self.hits > 1): ss = 's'
else: ss = ''
print ('\tbreakpoint already hit %d time%s' %
(self.hits, ss))
# -----------end of Breakpoint class----------
# Determines if there is an effective (active) breakpoint at this
# line of code. Returns breakpoint number or 0 if none
def effective(file, line, frame):
"""Determine which breakpoint for this file:line is to be acted upon.
Called only if we know there is a bpt at this
location. Returns breakpoint that was triggered and a flag
that indicates if it is ok to delete a temporary bp.
"""
possibles = Breakpoint.bplist[file,line]
for i in range(0, len(possibles)):
b = possibles[i]
if b.enabled == 0:
continue
# Count every hit when bp is enabled
b.hits = b.hits + 1
if not b.cond:
# If unconditional, and ignoring,
# go on to next, else break
if b.ignore > 0:
b.ignore = b.ignore -1
continue
else:
# breakpoint and marker that's ok
# to delete if temporary
return (b,1)
else:
# Conditional bp.
# Ignore count applies only to those bpt hits where the
# condition evaluates to true.
try:
val = eval(b.cond, frame.f_globals,
frame.f_locals)
if val:
if b.ignore > 0:
b.ignore = b.ignore -1
# continue
else:
return (b,1)
# else:
# continue
except:
# if eval fails, most conservative
# thing is to stop on breakpoint
# regardless of ignore count.
# Don't delete temporary,
# as another hint to user.
return (b,0)
return (None, None)
# -------------------- testing --------------------
class Tdb(Bdb):
def user_call(self, frame, args):
name = frame.f_code.co_name
if not name: name = '???'
print '+++ call', name, args
def user_line(self, frame):
import linecache
name = frame.f_code.co_name
if not name: name = '???'
fn = self.canonic(frame.f_code.co_filename)
line = linecache.getline(fn, frame.f_lineno)
print '+++', fn, frame.f_lineno, name, ':', line.strip()
def user_return(self, frame, retval):
print '+++ return', retval
def user_exception(self, frame, exc_stuff):
print '+++ exception', exc_stuff
self.set_continue()
def foo(n):
print 'foo(', n, ')'
x = bar(n*10)
print 'bar returned', x
def bar(a):
print 'bar(', a, ')'
return a/2
def test():
t = Tdb()
t.run('import bdb; bdb.foo(10)')
# end

+ 0
- 531
lib/jython/Lib/binhex.py View File

@@ -1,531 +0,0 @@
"""Macintosh binhex compression/decompression.
easy interface:
binhex(inputfilename, outputfilename)
hexbin(inputfilename, outputfilename)
"""
#
# Jack Jansen, CWI, August 1995.
#
# The module is supposed to be as compatible as possible. Especially the
# easy interface should work "as expected" on any platform.
# XXXX Note: currently, textfiles appear in mac-form on all platforms.
# We seem to lack a simple character-translate in python.
# (we should probably use ISO-Latin-1 on all but the mac platform).
# XXXX The simple routines are too simple: they expect to hold the complete
# files in-core. Should be fixed.
# XXXX It would be nice to handle AppleDouble format on unix
# (for servers serving macs).
# XXXX I don't understand what happens when you get 0x90 times the same byte on
# input. The resulting code (xx 90 90) would appear to be interpreted as an
# escaped *value* of 0x90. All coders I've seen appear to ignore this nicety...
#
import sys
import os
import struct
import binascii
__all__ = ["binhex","hexbin","Error"]
class Error(Exception):
pass
# States (what have we written)
[_DID_HEADER, _DID_DATA, _DID_RSRC] = range(3)
# Various constants
REASONABLY_LARGE=32768 # Minimal amount we pass the rle-coder
LINELEN=64
RUNCHAR=chr(0x90) # run-length introducer
#
# This code is no longer byte-order dependent
#
# Workarounds for non-mac machines.
if os.name == 'mac':
import macfs
import MacOS
try:
openrf = MacOS.openrf
except AttributeError:
# Backward compatibility
openrf = open
def FInfo():
return macfs.FInfo()
def getfileinfo(name):
finfo = macfs.FSSpec(name).GetFInfo()
dir, file = os.path.split(name)
# XXXX Get resource/data sizes
fp = open(name, 'rb')
fp.seek(0, 2)
dlen = fp.tell()
fp = openrf(name, '*rb')
fp.seek(0, 2)
rlen = fp.tell()
return file, finfo, dlen, rlen
def openrsrc(name, *mode):
if not mode:
mode = '*rb'
else:
mode = '*' + mode[0]
return openrf(name, mode)
else:
#
# Glue code for non-macintosh usage
#
class FInfo:
def __init__(self):
self.Type = '????'
self.Creator = '????'
self.Flags = 0
def getfileinfo(name):
finfo = FInfo()
# Quick check for textfile
fp = open(name)
data = open(name).read(256)
for c in data:
if not c.isspace() and (c<' ' or ord(c) > 0177):
break
else:
finfo.Type = 'TEXT'
fp.seek(0, 2)
dsize = fp.tell()
fp.close()
dir, file = os.path.split(name)
file = file.replace(':', '-', 1)
return file, finfo, dsize, 0
class openrsrc:
def __init__(self, *args):
pass
def read(self, *args):
return ''
def write(self, *args):
pass
def close(self):
pass
class _Hqxcoderengine:
"""Write data to the coder in 3-byte chunks"""
def __init__(self, ofp):
self.ofp = ofp
self.data = ''
self.hqxdata = ''
self.linelen = LINELEN-1
def write(self, data):
self.data = self.data + data
datalen = len(self.data)
todo = (datalen/3)*3
data = self.data[:todo]
self.data = self.data[todo:]
if not data:
return
self.hqxdata = self.hqxdata + binascii.b2a_hqx(data)
self._flush(0)
def _flush(self, force):
first = 0
while first <= len(self.hqxdata)-self.linelen:
last = first + self.linelen
self.ofp.write(self.hqxdata[first:last]+'\n')
self.linelen = LINELEN
first = last
self.hqxdata = self.hqxdata[first:]
if force:
self.ofp.write(self.hqxdata + ':\n')
def close(self):
if self.data:
self.hqxdata = \
self.hqxdata + binascii.b2a_hqx(self.data)
self._flush(1)
self.ofp.close()
del self.ofp
class _Rlecoderengine:
"""Write data to the RLE-coder in suitably large chunks"""
def __init__(self, ofp):
self.ofp = ofp
self.data = ''
def write(self, data):
self.data = self.data + data
if len(self.data) < REASONABLY_LARGE:
return
rledata = binascii.rlecode_hqx(self.data)
self.ofp.write(rledata)
self.data = ''
def close(self):
if self.data:
rledata = binascii.rlecode_hqx(self.data)
self.ofp.write(rledata)
self.ofp.close()
del self.ofp
class BinHex:
def __init__(self, (name, finfo, dlen, rlen), ofp):
if type(ofp) == type(''):
ofname = ofp
ofp = open(ofname, 'w')
if os.name == 'mac':
fss = macfs.FSSpec(ofname)
fss.SetCreatorType('BnHq', 'TEXT')
ofp.write('(This file must be converted with BinHex 4.0)\n\n:')
hqxer = _Hqxcoderengine(ofp)
self.ofp = _Rlecoderengine(hqxer)
self.crc = 0
if finfo is None:
finfo = FInfo()
self.dlen = dlen
self.rlen = rlen
self._writeinfo(name, finfo)
self.state = _DID_HEADER
def _writeinfo(self, name, finfo):
name = name
nl = len(name)
if nl > 63:
raise Error, 'Filename too long'
d = chr(nl) + name + '\0'
d2 = finfo.Type + finfo.Creator
# Force all structs to be packed with big-endian
d3 = struct.pack('>h', finfo.Flags)
d4 = struct.pack('>ii', self.dlen, self.rlen)
info = d + d2 + d3 + d4
self._write(info)
self._writecrc()
def _write(self, data):
self.crc = binascii.crc_hqx(data, self.crc)
self.ofp.write(data)
def _writecrc(self):
# XXXX Should this be here??
# self.crc = binascii.crc_hqx('\0\0', self.crc)
self.ofp.write(struct.pack('>h', self.crc))
self.crc = 0
def write(self, data):
if self.state != _DID_HEADER:
raise Error, 'Writing data at the wrong time'
self.dlen = self.dlen - len(data)
self._write(data)
def close_data(self):
if self.dlen != 0:
raise Error, 'Incorrect data size, diff='+`self.rlen`
self._writecrc()
self.state = _DID_DATA
def write_rsrc(self, data):
if self.state < _DID_DATA:
self.close_data()
if self.state != _DID_DATA:
raise Error, 'Writing resource data at the wrong time'
self.rlen = self.rlen - len(data)
self._write(data)
def close(self):
if self.state < _DID_DATA:
self.close_data()
if self.state != _DID_DATA:
raise Error, 'Close at the wrong time'
if self.rlen != 0:
raise Error, \
"Incorrect resource-datasize, diff="+`self.rlen`
self._writecrc()
self.ofp.close()
self.state = None
del self.ofp
def binhex(inp, out):
"""(infilename, outfilename) - Create binhex-encoded copy of a file"""
finfo = getfileinfo(inp)
ofp = BinHex(finfo, out)
ifp = open(inp, 'rb')
# XXXX Do textfile translation on non-mac systems
while 1:
d = ifp.read(128000)
if not d: break
ofp.write(d)
ofp.close_data()
ifp.close()
ifp = openrsrc(inp, 'rb')
while 1:
d = ifp.read(128000)
if not d: break
ofp.write_rsrc(d)
ofp.close()
ifp.close()
class _Hqxdecoderengine:
"""Read data via the decoder in 4-byte chunks"""
def __init__(self, ifp):
self.ifp = ifp
self.eof = 0
def read(self, totalwtd):
"""Read at least wtd bytes (or until EOF)"""
decdata = ''
wtd = totalwtd
#
# The loop here is convoluted, since we don't really now how
# much to decode: there may be newlines in the incoming data.
while wtd > 0:
if self.eof: return decdata
wtd = ((wtd+2)/3)*4
data = self.ifp.read(wtd)
#
# Next problem: there may not be a complete number of
# bytes in what we pass to a2b. Solve by yet another
# loop.
#
while 1:
try:
decdatacur, self.eof = \
binascii.a2b_hqx(data)
break
except binascii.Incomplete:
pass
newdata = self.ifp.read(1)
if not newdata:
raise Error, \
'Premature EOF on binhex file'
data = data + newdata
decdata = decdata + decdatacur
wtd = totalwtd - len(decdata)
if not decdata and not self.eof:
raise Error, 'Premature EOF on binhex file'
return decdata
def close(self):
self.ifp.close()
class _Rledecoderengine:
"""Read data via the RLE-coder"""
def __init__(self, ifp):
self.ifp = ifp
self.pre_buffer = ''
self.post_buffer = ''
self.eof = 0
def read(self, wtd):
if wtd > len(self.post_buffer):
self._fill(wtd-len(self.post_buffer))
rv = self.post_buffer[:wtd]
self.post_buffer = self.post_buffer[wtd:]
return rv
def _fill(self, wtd):
self.pre_buffer = self.pre_buffer + self.ifp.read(wtd+4)
if self.ifp.eof:
self.post_buffer = self.post_buffer + \
binascii.rledecode_hqx(self.pre_buffer)
self.pre_buffer = ''
return
#
# Obfuscated code ahead. We have to take care that we don't
# end up with an orphaned RUNCHAR later on. So, we keep a couple
# of bytes in the buffer, depending on what the end of
# the buffer looks like:
# '\220\0\220' - Keep 3 bytes: repeated \220 (escaped as \220\0)
# '?\220' - Keep 2 bytes: repeated something-else
# '\220\0' - Escaped \220: Keep 2 bytes.
# '?\220?' - Complete repeat sequence: decode all
# otherwise: keep 1 byte.
#
mark = len(self.pre_buffer)
if self.pre_buffer[-3:] == RUNCHAR + '\0' + RUNCHAR:
mark = mark - 3
elif self.pre_buffer[-1] == RUNCHAR:
mark = mark - 2
elif self.pre_buffer[-2:] == RUNCHAR + '\0':
mark = mark - 2
elif self.pre_buffer[-2] == RUNCHAR:
pass # Decode all
else:
mark = mark - 1
self.post_buffer = self.post_buffer + \
binascii.rledecode_hqx(self.pre_buffer[:mark])
self.pre_buffer = self.pre_buffer[mark:]
def close(self):
self.ifp.close()
class HexBin:
def __init__(self, ifp):
if type(ifp) == type(''):
ifp = open(ifp)
#
# Find initial colon.
#
while 1:
ch = ifp.read(1)
if not ch:
raise Error, "No binhex data found"
# Cater for \r\n terminated lines (which show up as \n\r, hence
# all lines start with \r)
if ch == '\r':
continue
if ch == ':':
break
if ch != '\n':
dummy = ifp.readline()
hqxifp = _Hqxdecoderengine(ifp)
self.ifp = _Rledecoderengine(hqxifp)
self.crc = 0
self._readheader()
def _read(self, len):
data = self.ifp.read(len)
self.crc = binascii.crc_hqx(data, self.crc)
return data
def _checkcrc(self):
filecrc = struct.unpack('>h', self.ifp.read(2))[0] & 0xffff
#self.crc = binascii.crc_hqx('\0\0', self.crc)
# XXXX Is this needed??
self.crc = self.crc & 0xffff
if filecrc != self.crc:
raise Error, 'CRC error, computed %x, read %x' \
%(self.crc, filecrc)
self.crc = 0
def _readheader(self):
len = self._read(1)
fname = self._read(ord(len))
rest = self._read(1+4+4+2+4+4)
self._checkcrc()
type = rest[1:5]
creator = rest[5:9]
flags = struct.unpack('>h', rest[9:11])[0]
self.dlen = struct.unpack('>l', rest[11:15])[0]
self.rlen = struct.unpack('>l', rest[15:19])[0]
self.FName = fname
self.FInfo = FInfo()
self.FInfo.Creator = creator
self.FInfo.Type = type
self.FInfo.Flags = flags
self.state = _DID_HEADER
def read(self, *n):
if self.state != _DID_HEADER:
raise Error, 'Read data at wrong time'
if n:
n = n[0]
n = min(n, self.dlen)
else:
n = self.dlen
rv = ''
while len(rv) < n:
rv = rv + self._read(n-len(rv))
self.dlen = self.dlen - n
return rv
def close_data(self):
if self.state != _DID_HEADER:
raise Error, 'close_data at wrong time'
if self.dlen:
dummy = self._read(self.dlen)
self._checkcrc()
self.state = _DID_DATA
def read_rsrc(self, *n):
if self.state == _DID_HEADER:
self.close_data()
if self.state != _DID_DATA:
raise Error, 'Read resource data at wrong time'
if n:
n = n[0]
n = min(n, self.rlen)
else:
n = self.rlen
self.rlen = self.rlen - n
return self._read(n)
def close(self):
if self.rlen:
dummy = self.read_rsrc(self.rlen)
self._checkcrc()
self.state = _DID_RSRC
self.ifp.close()
def hexbin(inp, out):
"""(infilename, outfilename) - Decode binhexed file"""
ifp = HexBin(inp)
finfo = ifp.FInfo
if not out:
out = ifp.FName
if os.name == 'mac':
ofss = macfs.FSSpec(out)
out = ofss.as_pathname()
ofp = open(out, 'wb')
# XXXX Do translation on non-mac systems
while 1:
d = ifp.read(128000)
if not d: break
ofp.write(d)
ofp.close()
ifp.close_data()
d = ifp.read_rsrc(128000)
if d:
ofp = openrsrc(out, 'wb')
ofp.write(d)
while 1:
d = ifp.read_rsrc(128000)
if not d: break
ofp.write(d)
ofp.close()
if os.name == 'mac':
nfinfo = ofss.GetFInfo()
nfinfo.Creator = finfo.Creator
nfinfo.Type = finfo.Type
nfinfo.Flags = finfo.Flags
ofss.SetFInfo(nfinfo)
ifp.close()
def _test():
if os.name == 'mac':
fss, ok = macfs.PromptGetFile('File to convert:')
if not ok:
sys.exit(0)
fname = fss.as_pathname()
else:
fname = sys.argv[1]
binhex(fname, fname+'.hqx')
hexbin(fname+'.hqx', fname+'.viahqx')
#hexbin(fname, fname+'.unpacked')
sys.exit(1)
if __name__ == '__main__':
_test()

+ 0
- 78
lib/jython/Lib/bisect.py View File

@@ -1,78 +0,0 @@
"""Bisection algorithms."""
def insort_right(a, x, lo=0, hi=None):
"""Insert item x in list a, and keep it sorted assuming a is sorted.
If x is already in a, insert it to the right of the rightmost x.
Optional args lo (default 0) and hi (default len(a)) bound the
slice of a to be searched.
"""
if hi is None:
hi = len(a)
while lo < hi:
mid = (lo+hi)/2
if x < a[mid]: hi = mid
else: lo = mid+1
a.insert(lo, x)
insort = insort_right # backward compatibility
def bisect_right(a, x, lo=0, hi=None):
"""Return the index where to insert item x in list a, assuming a is sorted.
The return value i is such that all e in a[:i] have e <= x, and all e in
a[i:] have e > x. So if x already appears in the list, i points just
beyond the rightmost x already there.
Optional args lo (default 0) and hi (default len(a)) bound the
slice of a to be searched.
"""
if hi is None:
hi = len(a)
while lo < hi:
mid = (lo+hi)/2
if x < a[mid]: hi = mid
else: lo = mid+1
return lo
bisect = bisect_right # backward compatibility
def insort_left(a, x, lo=0, hi=None):
"""Insert item x in list a, and keep it sorted assuming a is sorted.
If x is already in a, insert it to the left of the leftmost x.
Optional args lo (default 0) and hi (default len(a)) bound the
slice of a to be searched.
"""
if hi is None:
hi = len(a)
while lo < hi:
mid = (lo+hi)/2
if a[mid] < x: lo = mid+1
else: hi = mid
a.insert(lo, x)
def bisect_left(a, x, lo=0, hi=None):
"""Return the index where to insert item x in list a, assuming a is sorted.
The return value i is such that all e in a[:i] have e < x, and all e in
a[i:] have e >= x. So if x already appears in the list, i points just
before the leftmost x already there.
Optional args lo (default 0) and hi (default len(a)) bound the
slice of a to be searched.
"""
if hi is None:
hi = len(a)
while lo < hi:
mid = (lo+hi)/2
if a[mid] < x: lo = mid+1
else: hi = mid
return lo

+ 0
- 209
lib/jython/Lib/calendar.py View File

@@ -1,209 +0,0 @@
"""Calendar printing functions
Note when comparing these calendars to the ones printed by cal(1): By
default, these calendars have Monday as the first day of the week, and
Sunday as the last (the European convention). Use setfirstweekday() to
set the first day of the week (0=Monday, 6=Sunday)."""
# Revision 2: uses functions from built-in time module
# Import functions and variables from time module
from time import localtime, mktime
__all__ = ["error","setfirstweekday","firstweekday","isleap",
"leapdays","weekday","monthrange","monthcalendar",
"prmonth","month","prcal","calendar","timegm"]
# Exception raised for bad input (with string parameter for details)
error = ValueError
# Constants for months referenced later
January = 1
February = 2
# Number of days per month (except for February in leap years)
mdays = [0, 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]
# Full and abbreviated names of weekdays
day_name = ['Monday', 'Tuesday', 'Wednesday', 'Thursday',
'Friday', 'Saturday', 'Sunday']
day_abbr = ['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun']
# Full and abbreviated names of months (1-based arrays!!!)
month_name = ['', 'January', 'February', 'March', 'April',
'May', 'June', 'July', 'August',
'September', 'October', 'November', 'December']
month_abbr = [' ', 'Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun',
'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec']
# Constants for weekdays
(MONDAY, TUESDAY, WEDNESDAY, THURSDAY, FRIDAY, SATURDAY, SUNDAY) = range(7)
_firstweekday = 0 # 0 = Monday, 6 = Sunday
def firstweekday():
return _firstweekday
def setfirstweekday(weekday):
"""Set weekday (Monday=0, Sunday=6) to start each week."""
global _firstweekday
if not MONDAY <= weekday <= SUNDAY:
raise ValueError, \
'bad weekday number; must be 0 (Monday) to 6 (Sunday)'
_firstweekday = weekday
def isleap(year):
"""Return 1 for leap years, 0 for non-leap years."""
return year % 4 == 0 and (year % 100 != 0 or year % 400 == 0)
def leapdays(y1, y2):
"""Return number of leap years in range [y1, y2).
Assume y1 <= y2."""
y1 -= 1
y2 -= 1
return (y2/4 - y1/4) - (y2/100 - y1/100) + (y2/400 - y1/400)
def weekday(year, month, day):
"""Return weekday (0-6 ~ Mon-Sun) for year (1970-...), month (1-12),
day (1-31)."""
secs = mktime((year, month, day, 0, 0, 0, 0, 0, 0))
tuple = localtime(secs)
return tuple[6]
def monthrange(year, month):
"""Return weekday (0-6 ~ Mon-Sun) and number of days (28-31) for
year, month."""
if not 1 <= month <= 12:
raise ValueError, 'bad month number'
day1 = weekday(year, month, 1)
ndays = mdays[month] + (month == February and isleap(year))
return day1, ndays
def monthcalendar(year, month):
"""Return a matrix representing a month's calendar.
Each row represents a week; days outside this month are zero."""
day1, ndays = monthrange(year, month)
rows = []
r7 = range(7)
day = (_firstweekday - day1 + 6) % 7 - 5 # for leading 0's in first week
while day <= ndays:
row = [0, 0, 0, 0, 0, 0, 0]
for i in r7:
if 1 <= day <= ndays: row[i] = day
day = day + 1
rows.append(row)
return rows
def _center(str, width):
"""Center a string in a field."""
n = width - len(str)
if n <= 0:
return str
return ' '*((n+1)/2) + str + ' '*((n)/2)
def prweek(theweek, width):
"""Print a single week (no newline)."""
print week(theweek, width),
def week(theweek, width):
"""Returns a single week in a string (no newline)."""
days = []
for day in theweek:
if day == 0:
s = ''
else:
s = '%2i' % day # right-align single-digit days
days.append(_center(s, width))
return ' '.join(days)
def weekheader(width):
"""Return a header for a week."""
if width >= 9:
names = day_name
else:
names = day_abbr
days = []
for i in range(_firstweekday, _firstweekday + 7):
days.append(_center(names[i%7][:width], width))
return ' '.join(days)
def prmonth(theyear, themonth, w=0, l=0):
"""Print a month's calendar."""
print month(theyear, themonth, w, l),
def month(theyear, themonth, w=0, l=0):
"""Return a month's calendar string (multi-line)."""
w = max(2, w)
l = max(1, l)
s = (_center(month_name[themonth] + ' ' + `theyear`,
7 * (w + 1) - 1).rstrip() +
'\n' * l + weekheader(w).rstrip() + '\n' * l)
for aweek in monthcalendar(theyear, themonth):
s = s + week(aweek, w).rstrip() + '\n' * l
return s[:-l] + '\n'
# Spacing of month columns for 3-column year calendar
_colwidth = 7*3 - 1 # Amount printed by prweek()
_spacing = 6 # Number of spaces between columns
def format3c(a, b, c, colwidth=_colwidth, spacing=_spacing):
"""Prints 3-column formatting for year calendars"""
print format3cstring(a, b, c, colwidth, spacing)
def format3cstring(a, b, c, colwidth=_colwidth, spacing=_spacing):
"""Returns a string formatted from 3 strings, centered within 3 columns."""
return (_center(a, colwidth) + ' ' * spacing + _center(b, colwidth) +
' ' * spacing + _center(c, colwidth))
def prcal(year, w=0, l=0, c=_spacing):
"""Print a year's calendar."""
print calendar(year, w, l, c),
def calendar(year, w=0, l=0, c=_spacing):
"""Returns a year's calendar as a multi-line string."""
w = max(2, w)
l = max(1, l)
c = max(2, c)
colwidth = (w + 1) * 7 - 1
s = _center(`year`, colwidth * 3 + c * 2).rstrip() + '\n' * l
header = weekheader(w)
header = format3cstring(header, header, header, colwidth, c).rstrip()
for q in range(January, January+12, 3):
s = (s + '\n' * l +
format3cstring(month_name[q], month_name[q+1], month_name[q+2],
colwidth, c).rstrip() +
'\n' * l + header + '\n' * l)
data = []
height = 0
for amonth in range(q, q + 3):
cal = monthcalendar(year, amonth)
if len(cal) > height:
height = len(cal)
data.append(cal)
for i in range(height):
weeks = []
for cal in data:
if i >= len(cal):
weeks.append('')
else:
weeks.append(week(cal[i], w))
s = s + format3cstring(weeks[0], weeks[1], weeks[2],
colwidth, c).rstrip() + '\n' * l
return s[:-l] + '\n'
EPOCH = 1970
def timegm(tuple):
"""Unrelated but handy function to calculate Unix timestamp from GMT."""
year, month, day, hour, minute, second = tuple[:6]
assert year >= EPOCH
assert 1 <= month <= 12
days = 365*(year-EPOCH) + leapdays(EPOCH, year)
for i in range(1, month):
days = days + mdays[i]
if month > 2 and isleap(year):
days = days + 1
days = days + day - 1
hours = days*24 + hour
minutes = hours*60 + minute
seconds = minutes*60 + second
return seconds

+ 0
- 1000
lib/jython/Lib/cgi.py
File diff suppressed because it is too large
View File


+ 0
- 198
lib/jython/Lib/cmd.py View File

@@ -1,198 +0,0 @@
"""A generic class to build line-oriented command interpreters.
Interpreters constructed with this class obey the following conventions:
1. End of file on input is processed as the command 'EOF'.
2. A command is parsed out of each line by collecting the prefix composed
of characters in the identchars member.
3. A command `foo' is dispatched to a method 'do_foo()'; the do_ method
is passed a single argument consisting of the remainder of the line.
4. Typing an empty line repeats the last command. (Actually, it calls the
method `emptyline', which may be overridden in a subclass.)
5. There is a predefined `help' method. Given an argument `topic', it
calls the command `help_topic'. With no arguments, it lists all topics
with defined help_ functions, broken into up to three topics; documented
commands, miscellaneous help topics, and undocumented commands.
6. The command '?' is a synonym for `help'. The command '!' is a synonym
for `shell', if a do_shell method exists.
The `default' method may be overridden to intercept commands for which there
is no do_ method.
The data member `self.ruler' sets the character used to draw separator lines
in the help messages. If empty, no ruler line is drawn. It defaults to "=".
If the value of `self.intro' is nonempty when the cmdloop method is called,
it is printed out on interpreter startup. This value may be overridden
via an optional argument to the cmdloop() method.
The data members `self.doc_header', `self.misc_header', and
`self.undoc_header' set the headers used for the help function's
listings of documented functions, miscellaneous topics, and undocumented
functions respectively.
These interpreters use raw_input; thus, if the readline module is loaded,
they automatically support Emacs-like command history and editing features.
"""
import string, sys
__all__ = ["Cmd"]
PROMPT = '(Cmd) '
IDENTCHARS = string.letters + string.digits + '_'
class Cmd:
prompt = PROMPT
identchars = IDENTCHARS
ruler = '='
lastcmd = ''
cmdqueue = []
intro = None
doc_leader = ""
doc_header = "Documented commands (type help <topic>):"
misc_header = "Miscellaneous help topics:"
undoc_header = "Undocumented commands:"
nohelp = "*** No help on %s"
use_rawinput = 1
def __init__(self): pass
def cmdloop(self, intro=None):
self.preloop()
if intro is not None:
self.intro = intro
if self.intro:
print self.intro
stop = None
while not stop:
if self.cmdqueue:
line = self.cmdqueue[0]
del self.cmdqueue[0]
else:
if self.use_rawinput:
try:
line = raw_input(self.prompt)
except EOFError:
line = 'EOF'
else:
sys.stdout.write(self.prompt)
line = sys.stdin.readline()
if not len(line):
line = 'EOF'
else:
line = line[:-1] # chop \n
line = self.precmd(line)
stop = self.onecmd(line)
stop = self.postcmd(stop, line)
self.postloop()
def precmd(self, line):
return line
def postcmd(self, stop, line):
return stop
def preloop(self):
pass
def postloop(self):
pass
def onecmd(self, line):
line = line.strip()
if not line:
return self.emptyline()
elif line[0] == '?':
line = 'help ' + line[1:]
elif line[0] == '!':
if hasattr(self, 'do_shell'):
line = 'shell ' + line[1:]
else:
return self.default(line)
self.lastcmd = line
i, n = 0, len(line)
while i < n and line[i] in self.identchars: i = i+1
cmd, arg = line[:i], line[i:].strip()
if cmd == '':
return self.default(line)
else:
try:
func = getattr(self, 'do_' + cmd)
except AttributeError:
return self.default(line)
return func(arg)
def emptyline(self):
if self.lastcmd:
return self.onecmd(self.lastcmd)
def default(self, line):
print '*** Unknown syntax:', line
def do_help(self, arg):
if arg:
# XXX check arg syntax
try:
func = getattr(self, 'help_' + arg)
except:
try:
doc=getattr(self, 'do_' + arg).__doc__
if doc:
print doc
return
except:
pass
print self.nohelp % (arg,)
return
func()
else:
# Inheritance says we have to look in class and
# base classes; order is not important.
names = []
classes = [self.__class__]
while classes:
aclass = classes[0]
if aclass.__bases__:
classes = classes + list(aclass.__bases__)
names = names + dir(aclass)
del classes[0]
cmds_doc = []
cmds_undoc = []
help = {}
for name in names:
if name[:5] == 'help_':
help[name[5:]]=1
names.sort()
# There can be duplicates if routines overridden
prevname = ''
for name in names:
if name[:3] == 'do_':
if name == prevname:
continue
prevname = name
cmd=name[3:]
if help.has_key(cmd):
cmds_doc.append(cmd)
del help[cmd]
elif getattr(self, name).__doc__:
cmds_doc.append(cmd)
else:
cmds_undoc.append(cmd)
print self.doc_leader
self.print_topics(self.doc_header, cmds_doc, 15,80)
self.print_topics(self.misc_header, help.keys(),15,80)
self.print_topics(self.undoc_header, cmds_undoc, 15,80)
def print_topics(self, header, cmds, cmdlen, maxcol):
if cmds:
print header
if self.ruler:
print self.ruler * len(header)
(cmds_per_line,junk)=divmod(maxcol,cmdlen)
col=cmds_per_line
for cmd in cmds:
if col==0: print
print (("%-"+`cmdlen`+"s") % cmd),
col = (col+1) % cmds_per_line
print "\n"

+ 0
- 308
lib/jython/Lib/code.py View File

@@ -1,308 +0,0 @@
"""Utilities needed to emulate Python's interactive interpreter.
"""
# Inspired by similar code by Jeff Epler and Fredrik Lundh.
import sys
import traceback
from codeop import compile_command
__all__ = ["InteractiveInterpreter","InteractiveConsole","interact",
"compile_command"]
def softspace(file, newvalue):
oldvalue = 0
try:
oldvalue = file.softspace
except AttributeError:
pass
try:
file.softspace = newvalue
except TypeError: # "attribute-less object" or "read-only attributes"
pass
return oldvalue
class InteractiveInterpreter:
"""Base class for InteractiveConsole.
This class deals with parsing and interpreter state (the user's
namespace); it doesn't deal with input buffering or prompting or
input file naming (the filename is always passed in explicitly).
"""
def __init__(self, locals=None):
"""Constructor.
The optional 'locals' argument specifies the dictionary in
which code will be executed; it defaults to a newly created
dictionary with key "__name__" set to "__console__" and key
"__doc__" set to None.
"""
if locals is None:
locals = {"__name__": "__console__", "__doc__": None}
self.locals = locals
def runsource(self, source, filename="<input>", symbol="single"):
"""Compile and run some source in the interpreter.
Arguments are as for compile_command().
One several things can happen:
1) The input is incorrect; compile_command() raised an
exception (SyntaxError or OverflowError). A syntax traceback
will be printed by calling the showsyntaxerror() method.
2) The input is incomplete, and more input is required;
compile_command() returned None. Nothing happens.
3) The input is complete; compile_command() returned a code
object. The code is executed by calling self.runcode() (which
also handles run-time exceptions, except for SystemExit).
The return value is 1 in case 2, 0 in the other cases (unless
an exception is raised). The return value can be used to
decide whether to use sys.ps1 or sys.ps2 to prompt the next
line.
"""
try:
code = compile_command(source, filename, symbol)
except (OverflowError, SyntaxError, ValueError):
# Case 1
self.showsyntaxerror(filename)
return 0
if code is None:
# Case 2
return 1
# Case 3
self.runcode(code)
return 0
def runcode(self, code):
"""Execute a code object.
When an exception occurs, self.showtraceback() is called to
display a traceback. All exceptions are caught except
SystemExit, which is reraised.
A note about KeyboardInterrupt: this exception may occur
elsewhere in this code, and may not always be caught. The
caller should be prepared to deal with it.
"""
try:
exec code in self.locals
except SystemExit:
raise
except:
self.showtraceback()
else:
if softspace(sys.stdout, 0):
print
def showsyntaxerror(self, filename=None):
"""Display the syntax error that just occurred.
This doesn't display a stack trace because there isn't one.
If a filename is given, it is stuffed in the exception instead
of what was there before (because Python's parser always uses
"<string>" when reading from a string).
The output is written by self.write(), below.
"""
type, value, sys.last_traceback = sys.exc_info()
sys.last_type = type
sys.last_value = value
if filename and type is SyntaxError:
# Work hard to stuff the correct filename in the exception
try:
msg, (dummy_filename, lineno, offset, line) = value
except:
# Not the format we expect; leave it alone
pass
else:
# Stuff in the right filename
try:
# Assume SyntaxError is a class exception
value = SyntaxError(msg, (filename, lineno, offset, line))
except:
# If that failed, assume SyntaxError is a string
value = msg, (filename, lineno, offset, line)
list = traceback.format_exception_only(type, value)
map(self.write, list)
def showtraceback(self):
"""Display the exception that just occurred.
We remove the first stack item because it is our own code.
The output is written by self.write(), below.
"""
try:
type, value, tb = sys.exc_info()
sys.last_type = type
sys.last_value = value
sys.last_traceback = tb
tblist = traceback.extract_tb(tb)
del tblist[:1]
list = traceback.format_list(tblist)
if list:
list.insert(0, "Traceback (most recent call last):\n")
list[len(list):] = traceback.format_exception_only(type, value)
finally:
tblist = tb = None
map(self.write, list)
def write(self, data):
"""Write a string.
The base implementation writes to sys.stderr; a subclass may
replace this with a different implementation.
"""
sys.stderr.write(data)
class InteractiveConsole(InteractiveInterpreter):
"""Closely emulate the behavior of the interactive Python interpreter.
This class builds on InteractiveInterpreter and adds prompting
using the familiar sys.ps1 and sys.ps2, and input buffering.
"""
def __init__(self, locals=None, filename="<console>"):
"""Constructor.
The optional locals argument will be passed to the
InteractiveInterpreter base class.
The optional filename argument should specify the (file)name
of the input stream; it will show up in tracebacks.
"""
InteractiveInterpreter.__init__(self, locals)
self.filename = filename
self.resetbuffer()
def resetbuffer(self):
"""Reset the input buffer."""
self.buffer = []
def interact(self, banner=None):
"""Closely emulate the interactive Python console.
The optional banner argument specify the banner to print
before the first interaction; by default it prints a banner
similar to the one printed by the real Python interpreter,
followed by the current class name in parentheses (so as not
to confuse this with the real interpreter -- since it's so
close!).
"""
try:
sys.ps1
except AttributeError:
sys.ps1 = ">>> "
try:
sys.ps2
except AttributeError:
sys.ps2 = "... "
cprt = 'Type "copyright", "credits" or "license" for more information.'
if banner is None:
self.write("Python %s on %s\n%s\n(%s)\n" %
(sys.version, sys.platform, cprt,
self.__class__.__name__))
else:
self.write("%s\n" % str(banner))
more = 0
while 1:
try:
if more:
prompt = sys.ps2
else:
prompt = sys.ps1
try:
line = self.raw_input(prompt)
except EOFError:
self.write("\n")
break
else:
more = self.push(line)
except KeyboardInterrupt:
self.write("\nKeyboardInterrupt\n")
self.resetbuffer()
more = 0
def push(self, line):
"""Push a line to the interpreter.
The line should not have a trailing newline; it may have
internal newlines. The line is appended to a buffer and the
interpreter's runsource() method is called with the
concatenated contents of the buffer as source. If this
indicates that the command was executed or invalid, the buffer
is reset; otherwise, the command is incomplete, and the buffer
is left as it was after the line was appended. The return
value is 1 if more input is required, 0 if the line was dealt
with in some way (this is the same as runsource()).
"""
self.buffer.append(line)
source = "\n".join(self.buffer)
more = self.runsource(source, self.filename)
if not more:
self.resetbuffer()
return more
def raw_input(self, prompt=""):
"""Write a prompt and read a line.
The returned line does not include the trailing newline.
When the user enters the EOF key sequence, EOFError is raised.
The base implementation uses the built-in function
raw_input(); a subclass may replace this with a different
implementation.
"""
return raw_input(prompt)
def interact(banner=None, readfunc=None, local=None):
"""Closely emulate the interactive Python interpreter.
This is a backwards compatible interface to the InteractiveConsole
class. When readfunc is not specified, it attempts to import the
readline module to enable GNU readline if it is available.
Arguments (all optional, all default to None):
banner -- passed to InteractiveConsole.interact()
readfunc -- if not None, replaces InteractiveConsole.raw_input()
local -- passed to InteractiveInterpreter.__init__()
"""
console = InteractiveConsole(local)
if readfunc is not None:
console.raw_input = readfunc
else:
try:
import readline
except:
pass
console.interact(banner)
if __name__ == '__main__':
interact()

+ 0
- 570
lib/jython/Lib/codecs.py View File

@@ -1,570 +0,0 @@
""" codecs -- Python Codec Registry, API and helpers.
Written by Marc-Andre Lemburg (mal@lemburg.com).
(c) Copyright CNRI, All Rights Reserved. NO WARRANTY.
"""#"
import struct,types,__builtin__
### Registry and builtin stateless codec functions
try:
from _codecs import *
except ImportError,why:
raise SystemError,\
'Failed to load the builtin codecs: %s' % why
__all__ = ["register","lookup","open","EncodedFile","BOM","BOM_BE",
"BOM_LE","BOM32_BE","BOM32_LE","BOM64_BE","BOM64_LE"]
### Constants
#
# Byte Order Mark (BOM) and its possible values (BOM_BE, BOM_LE)
#
BOM = struct.pack('=H',0xFEFF)
#
BOM_BE = BOM32_BE = '\376\377'
# corresponds to Unicode U+FEFF in UTF-16 on big endian
# platforms == ZERO WIDTH NO-BREAK SPACE
BOM_LE = BOM32_LE = '\377\376'
# corresponds to Unicode U+FFFE in UTF-16 on little endian
# platforms == defined as being an illegal Unicode character
#
# 64-bit Byte Order Marks
#
BOM64_BE = '\000\000\376\377'
# corresponds to Unicode U+0000FEFF in UCS-4
BOM64_LE = '\377\376\000\000'
# corresponds to Unicode U+0000FFFE in UCS-4
### Codec base classes (defining the API)
class Codec:
""" Defines the interface for stateless encoders/decoders.
The .encode()/.decode() methods may implement different error
handling schemes by providing the errors argument. These
string values are defined:
'strict' - raise a ValueError error (or a subclass)
'ignore' - ignore the character and continue with the next
'replace' - replace with a suitable replacement character;
Python will use the official U+FFFD REPLACEMENT
CHARACTER for the builtin Unicode codecs.
"""
def encode(self,input,errors='strict'):
""" Encodes the object input and returns a tuple (output
object, length consumed).
errors defines the error handling to apply. It defaults to
'strict' handling.
The method may not store state in the Codec instance. Use
StreamCodec for codecs which have to keep state in order to
make encoding/decoding efficient.
The encoder must be able to handle zero length input and
return an empty object of the output object type in this
situation.
"""
raise NotImplementedError
def decode(self,input,errors='strict'):
""" Decodes the object input and returns a tuple (output
object, length consumed).
input must be an object which provides the bf_getreadbuf
buffer slot. Python strings, buffer objects and memory
mapped files are examples of objects providing this slot.
errors defines the error handling to apply. It defaults to
'strict' handling.
The method may not store state in the Codec instance. Use
StreamCodec for codecs which have to keep state in order to
make encoding/decoding efficient.
The decoder must be able to handle zero length input and
return an empty object of the output object type in this
situation.
"""
raise NotImplementedError
#
# The StreamWriter and StreamReader class provide generic working
# interfaces which can be used to implement new encodings submodules
# very easily. See encodings/utf_8.py for an example on how this is
# done.
#
class StreamWriter(Codec):
def __init__(self,stream,errors='strict'):
""" Creates a StreamWriter instance.
stream must be a file-like object open for writing
(binary) data.
The StreamWriter may implement different error handling
schemes by providing the errors keyword argument. These
parameters are defined:
'strict' - raise a ValueError (or a subclass)
'ignore' - ignore the character and continue with the next
'replace'- replace with a suitable replacement character
"""
self.stream = stream
self.errors = errors
def write(self, object):
""" Writes the object's contents encoded to self.stream.
"""
data, consumed = self.encode(object,self.errors)
self.stream.write(data)
def writelines(self, list):
""" Writes the concatenated list of strings to the stream
using .write().
"""
self.write(''.join(list))
def reset(self):
""" Flushes and resets the codec buffers used for keeping state.
Calling this method should ensure that the data on the
output is put into a clean state, that allows appending
of new fresh data without having to rescan the whole
stream to recover state.
"""
pass
def __getattr__(self,name,
getattr=getattr):
""" Inherit all other methods from the underlying stream.
"""
return getattr(self.stream,name)
###
class StreamReader(Codec):
def __init__(self,stream,errors='strict'):
""" Creates a StreamReader instance.
stream must be a file-like object open for reading
(binary) data.
The StreamReader may implement different error handling
schemes by providing the errors keyword argument. These
parameters are defined:
'strict' - raise a ValueError (or a subclass)
'ignore' - ignore the character and continue with the next
'replace'- replace with a suitable replacement character;
"""
self.stream = stream
self.errors = errors
def read(self, size=-1):
""" Decodes data from the stream self.stream and returns the
resulting object.
size indicates the approximate maximum number of bytes to
read from the stream for decoding purposes. The decoder
can modify this setting as appropriate. The default value
-1 indicates to read and decode as much as possible. size
is intended to prevent having to decode huge files in one
step.
The method should use a greedy read strategy meaning that
it should read as much data as is allowed within the
definition of the encoding and the given size, e.g. if
optional encoding endings or state markers are available
on the stream, these should be read too.
"""
# Unsliced reading:
if size < 0:
return self.decode(self.stream.read(), self.errors)[0]
# Sliced reading:
read = self.stream.read
decode = self.decode
data = read(size)
i = 0
while 1:
try:
object, decodedbytes = decode(data, self.errors)
except ValueError,why:
# This method is slow but should work under pretty much
# all conditions; at most 10 tries are made
i = i + 1
newdata = read(1)
if not newdata or i > 10:
raise
data = data + newdata
else:
return object
def readline(self, size=None):
""" Read one line from the input stream and return the
decoded data.
Note: Unlike the .readlines() method, this method inherits
the line breaking knowledge from the underlying stream's
.readline() method -- there is currently no support for
line breaking using the codec decoder due to lack of line
buffering. Sublcasses should however, if possible, try to
implement this method using their own knowledge of line
breaking.
size, if given, is passed as size argument to the stream's
.readline() method.
"""
if size is None:
line = self.stream.readline()
else:
line = self.stream.readline(size)
return self.decode(line,self.errors)[0]
def readlines(self, sizehint=0):
""" Read all lines available on the input stream
and return them as list of lines.
Line breaks are implemented using the codec's decoder
method and are included in the list entries.
sizehint, if given, is passed as size argument to the
stream's .read() method.
"""
if sizehint is None:
data = self.stream.read()
else:
data = self.stream.read(sizehint)
return self.decode(data,self.errors)[0].splitlines(1)
def reset(self):
""" Resets the codec buffers used for keeping state.
Note that no stream repositioning should take place.
This method is primarily intended to be able to recover
from decoding errors.
"""
pass
def __getattr__(self,name,
getattr=getattr):
""" Inherit all other methods from the underlying stream.
"""
return getattr(self.stream,name)
###
class StreamReaderWriter:
""" StreamReaderWriter instances allow wrapping streams which
work in both read and write modes.
The design is such that one can use the factory functions
returned by the codec.lookup() function to construct the
instance.
"""
# Optional attributes set by the file wrappers below
encoding = 'unknown'
def __init__(self,stream,Reader,Writer,errors='strict'):
""" Creates a StreamReaderWriter instance.
stream must be a Stream-like object.
Reader, Writer must be factory functions or classes
providing the StreamReader, StreamWriter interface resp.
Error handling is done in the same way as defined for the
StreamWriter/Readers.
"""
self.stream = stream
self.reader = Reader(stream, errors)
self.writer = Writer(stream, errors)
self.errors = errors
def read(self,size=-1):
return self.reader.read(size)
def readline(self, size=None):
return self.reader.readline(size)
def readlines(self, sizehint=None):
return self.reader.readlines(sizehint)
def write(self,data):
return self.writer.write(data)
def writelines(self,list):
return self.writer.writelines(list)
def reset(self):
self.reader.reset()
self.writer.reset()
def __getattr__(self,name,
getattr=getattr):
""" Inherit all other methods from the underlying stream.
"""
return getattr(self.stream,name)
###
class StreamRecoder:
""" StreamRecoder instances provide a frontend - backend
view of encoding data.
They use the complete set of APIs returned by the
codecs.lookup() function to implement their task.
Data written to the stream is first decoded into an
intermediate format (which is dependent on the given codec
combination) and then written to the stream using an instance
of the provided Writer class.
In the other direction, data is read from the stream using a
Reader instance and then return encoded data to the caller.
"""
# Optional attributes set by the file wrappers below
data_encoding = 'unknown'
file_encoding = 'unknown'
def __init__(self,stream,encode,decode,Reader,Writer,errors='strict'):
""" Creates a StreamRecoder instance which implements a two-way
conversion: encode and decode work on the frontend (the
input to .read() and output of .write()) while
Reader and Writer work on the backend (reading and
writing to the stream).
You can use these objects to do transparent direct
recodings from e.g. latin-1 to utf-8 and back.
stream must be a file-like object.
encode, decode must adhere to the Codec interface, Reader,
Writer must be factory functions or classes providing the
StreamReader, StreamWriter interface resp.
encode and decode are needed for the frontend translation,
Reader and Writer for the backend translation. Unicode is
used as intermediate encoding.
Error handling is done in the same way as defined for the
StreamWriter/Readers.
"""
self.stream = stream
self.encode = encode
self.decode = decode
self.reader = Reader(stream, errors)
self.writer = Writer(stream, errors)
self.errors = errors
def read(self,size=-1):
data = self.reader.read(size)
data, bytesencoded = self.encode(data, self.errors)
return data
def readline(self,size=None):
if size is None:
data = self.reader.readline()
else:
data = self.reader.readline(size)
data, bytesencoded = self.encode(data, self.errors)
return data
def readlines(self,sizehint=None):
if sizehint is None:
data = self.reader.read()
else:
data = self.reader.read(sizehint)
data, bytesencoded = self.encode(data, self.errors)
return data.splitlines(1)
def write(self,data):
data, bytesdecoded = self.decode(data, self.errors)
return self.writer.write(data)
def writelines(self,list):
data = ''.join(list)
data, bytesdecoded = self.decode(data, self.errors)
return self.writer.write(data)
def reset(self):
self.reader.reset()
self.writer.reset()
def __getattr__(self,name,
getattr=getattr):
""" Inherit all other methods from the underlying stream.
"""
return getattr(self.stream,name)
### Shortcuts
def open(filename, mode='rb', encoding=None, errors='strict', buffering=1):
""" Open an encoded file using the given mode and return
a wrapped version providing transparent encoding/decoding.
Note: The wrapped version will only accept the object format
defined by the codecs, i.e. Unicode objects for most builtin
codecs. Output is also codec dependent and will usually by
Unicode as well.
Files are always opened in binary mode, even if no binary mode
was specified. Thisis done to avoid data loss due to encodings
using 8-bit values. The default file mode is 'rb' meaning to
open the file in binary read mode.
encoding specifies the encoding which is to be used for the
the file.
errors may be given to define the error handling. It defaults
to 'strict' which causes ValueErrors to be raised in case an
encoding error occurs.
buffering has the same meaning as for the builtin open() API.
It defaults to line buffered.
The returned wrapped file object provides an extra attribute
.encoding which allows querying the used encoding. This
attribute is only available if an encoding was specified as
parameter.
"""
if encoding is not None and \
'b' not in mode:
# Force opening of the file in binary mode
mode = mode + 'b'
file = __builtin__.open(filename, mode, buffering)
if encoding is None:
return file
(e,d,sr,sw) = lookup(encoding)
srw = StreamReaderWriter(file, sr, sw, errors)
# Add attributes to simplify introspection
srw.encoding = encoding
return srw
def EncodedFile(file, data_encoding, file_encoding=None, errors='strict'):
""" Return a wrapped version of file which provides transparent
encoding translation.
Strings written to the wrapped file are interpreted according
to the given data_encoding and then written to the original
file as string using file_encoding. The intermediate encoding
will usually be Unicode but depends on the specified codecs.
Strings are read from the file using file_encoding and then
passed back to the caller as string using data_encoding.
If file_encoding is not given, it defaults to data_encoding.
errors may be given to define the error handling. It defaults
to 'strict' which causes ValueErrors to be raised in case an
encoding error occurs.
The returned wrapped file object provides two extra attributes
.data_encoding and .file_encoding which reflect the given
parameters of the same name. The attributes can be used for
introspection by Python programs.
"""
if file_encoding is None:
file_encoding = data_encoding
encode, decode = lookup(data_encoding)[:2]
Reader, Writer = lookup(file_encoding)[2:]
sr = StreamRecoder(file,
encode,decode,Reader,Writer,
errors)
# Add attributes to simplify introspection
sr.data_encoding = data_encoding
sr.file_encoding = file_encoding
return sr
### Helpers for charmap-based codecs
def make_identity_dict(rng):
""" make_identity_dict(rng) -> dict
Return a dictionary where elements of the rng sequence are
mapped to themselves.
"""
res = {}
for i in rng:
res[i]=i
return res
### Tests
if __name__ == '__main__':
import sys
# Make stdout translate Latin-1 output into UTF-8 output
sys.stdout = EncodedFile(sys.stdout, 'latin-1', 'utf-8')
# Have stdin translate Latin-1 input into UTF-8 input
sys.stdin = EncodedFile(sys.stdin, 'utf-8', 'latin-1')

+ 0
- 123
lib/jython/Lib/colorsys.py View File

@@ -1,123 +0,0 @@
"""Conversion functions between RGB and other color systems.
This modules provides two functions for each color system ABC:
rgb_to_abc(r, g, b) --> a, b, c
abc_to_rgb(a, b, c) --> r, g, b
All inputs and outputs are triples of floats in the range [0.0...1.0].
Inputs outside this range may cause exceptions or invalid outputs.
Supported color systems:
RGB: Red, Green, Blue components
YIQ: used by composite video signals
HLS: Hue, Luminance, Saturation
HSV: Hue, Saturation, Value
"""
# References:
# XXX Where's the literature?
__all__ = ["rgb_to_yiq","yiq_to_rgb","rgb_to_hls","hls_to_rgb",
"rgb_to_hsv","hsv_to_rgb"]
# Some floating point constants
ONE_THIRD = 1.0/3.0
ONE_SIXTH = 1.0/6.0
TWO_THIRD = 2.0/3.0
# YIQ: used by composite video signals (linear combinations of RGB)
# Y: perceived grey level (0.0 == black, 1.0 == white)
# I, Q: color components
def rgb_to_yiq(r, g, b):
y = 0.30*r + 0.59*g + 0.11*b
i = 0.60*r - 0.28*g - 0.32*b
q = 0.21*r - 0.52*g + 0.31*b
return (y, i, q)
def yiq_to_rgb(y, i, q):
r = y + 0.948262*i + 0.624013*q
g = y - 0.276066*i - 0.639810*q
b = y - 1.105450*i + 1.729860*q
if r < 0.0: r = 0.0
if g < 0.0: g = 0.0
if b < 0.0: b = 0.0
if r > 1.0: r = 1.0
if g > 1.0: g = 1.0
if b > 1.0: b = 1.0
return (r, g, b)
# HLS: Hue, Luminance, S???
# H: position in the spectrum
# L: ???
# S: ???
def rgb_to_hls(r, g, b):
maxc = max(r, g, b)
minc = min(r, g, b)
# XXX Can optimize (maxc+minc) and (maxc-minc)
l = (minc+maxc)/2.0
if minc == maxc: return 0.0, l, 0.0
if l <= 0.5: s = (maxc-minc) / (maxc+minc)
else: s = (maxc-minc) / (2.0-maxc-minc)
rc = (maxc-r) / (maxc-minc)
gc = (maxc-g) / (maxc-minc)
bc = (maxc-b) / (maxc-minc)
if r == maxc: h = bc-gc
elif g == maxc: h = 2.0+rc-bc
else: h = 4.0+gc-rc
h = (h/6.0) % 1.0
return h, l, s
def hls_to_rgb(h, l, s):
if s == 0.0: return l, l, l
if l <= 0.5: m2 = l * (1.0+s)
else: m2 = l+s-(l*s)
m1 = 2.0*l - m2
return (_v(m1, m2, h+ONE_THIRD), _v(m1, m2, h), _v(m1, m2, h-ONE_THIRD))
def _v(m1, m2, hue):
hue = hue % 1.0
if hue < ONE_SIXTH: return m1 + (m2-m1)*hue*6.0
if hue < 0.5: return m2
if hue < TWO_THIRD: return m1 + (m2-m1)*(TWO_THIRD-hue)*6.0
return m1
# HSV: Hue, Saturation, Value(?)
# H: position in the spectrum
# S: ???
# V: ???
def rgb_to_hsv(r, g, b):
maxc = max(r, g, b)
minc = min(r, g, b)
v = maxc
if minc == maxc: return 0.0, 0.0, v
s = (maxc-minc) / maxc
rc = (maxc-r) / (maxc-minc)
gc = (maxc-g) / (maxc-minc)
bc = (maxc-b) / (maxc-minc)
if r == maxc: h = bc-gc
elif g == maxc: h = 2.0+rc-bc
else: h = 4.0+gc-rc
h = (h/6.0) % 1.0
return h, s, v
def hsv_to_rgb(h, s, v):
if s == 0.0: return v, v, v
i = int(h*6.0) # XXX assume int() truncates!
f = (h*6.0) - i
p = v*(1.0 - s)
q = v*(1.0 - s*f)
t = v*(1.0 - s*(1.0-f))
if i%6 == 0: return v, t, p
if i == 1: return q, v, p
if i == 2: return p, v, t
if i == 3: return p, q, v
if i == 4: return t, p, v
if i == 5: return v, p, q
# Cannot get here

+ 0
- 84
lib/jython/Lib/commands.py View File

@@ -1,84 +0,0 @@
"""Execute shell commands via os.popen() and return status, output.
Interface summary:
import commands
outtext = commands.getoutput(cmd)
(exitstatus, outtext) = commands.getstatusoutput(cmd)
outtext = commands.getstatus(file) # returns output of "ls -ld file"
A trailing newline is removed from the output string.
Encapsulates the basic operation:
pipe = os.popen('{ ' + cmd + '; } 2>&1', 'r')
text = pipe.read()
sts = pipe.close()
[Note: it would be nice to add functions to interpret the exit status.]
"""
__all__ = ["getstatusoutput","getoutput","getstatus"]
# Module 'commands'
#
# Various tools for executing commands and looking at their output and status.
#
# NB This only works (and is only relevant) for UNIX.
# Get 'ls -l' status for an object into a string
#
def getstatus(file):
"""Return output of "ls -ld <file>" in a string."""
return getoutput('ls -ld' + mkarg(file))
# Get the output from a shell command into a string.
# The exit status is ignored; a trailing newline is stripped.
# Assume the command will work with '{ ... ; } 2>&1' around it..
#
def getoutput(cmd):
"""Return output (stdout or stderr) of executing cmd in a shell."""
return getstatusoutput(cmd)[1]
# Ditto but preserving the exit status.
# Returns a pair (sts, output)
#
def getstatusoutput(cmd):
"""Return (status, output) of executing cmd in a shell."""
import os
pipe = os.popen('{ ' + cmd + '; } 2>&1', 'r')
text = pipe.read()
sts = pipe.close()
if sts is None: sts = 0
if text[-1:] == '\n': text = text[:-1]
return sts, text
# Make command argument from directory and pathname (prefix space, add quotes).
#
def mk2arg(head, x):
import os
return mkarg(os.path.join(head, x))
# Make a shell command argument from a string.
# Return a string beginning with a space followed by a shell-quoted
# version of the argument.
# Two strategies: enclose in single quotes if it contains none;
# otherwise, enclose in double quotes and prefix quotable characters
# with backslash.
#
def mkarg(x):
if '\'' not in x:
return ' \'' + x + '\''
s = ' "'
for c in x:
if c in '\\$"`':
s = s + '\\'
s = s + c
s = s + '"'
return s

+ 0
- 130
lib/jython/Lib/compileall.py View File

@@ -1,130 +0,0 @@
"""Module/script to "compile" all .py files to .pyc (or .pyo) file.
When called as a script with arguments, this compiles the directories
given as arguments recursively; the -l option prevents it from
recursing into directories.
Without arguments, if compiles all modules on sys.path, without
recursing into subdirectories. (Even though it should do so for
packages -- for now, you'll have to deal with packages separately.)
See module py_compile for details of the actual byte-compilation.
"""
import os
import stat
import sys
import py_compile
__all__ = ["compile_dir","compile_path"]
def compile_dir(dir, maxlevels=10, ddir=None, force=0):
"""Byte-compile all modules in the given directory tree.
Arguments (only dir is required):
dir: the directory to byte-compile
maxlevels: maximum recursion level (default 10)
ddir: if given, purported directory name (this is the
directory name that will show up in error messages)
force: if 1, force compilation, even if timestamps are up-to-date
"""
print 'Listing', dir, '...'
try:
names = os.listdir(dir)
except os.error:
print "Can't list", dir
names = []
names.sort()
success = 1
for name in names:
fullname = os.path.join(dir, name)
if ddir:
dfile = os.path.join(ddir, name)
else:
dfile = None
if os.path.isfile(fullname):
head, tail = name[:-3], name[-3:]
if tail == '.py':
cfile = fullname + (__debug__ and 'c' or 'o')
ftime = os.stat(fullname)[stat.ST_MTIME]
try: ctime = os.stat(cfile)[stat.ST_MTIME]
except os.error: ctime = 0
if (ctime > ftime) and not force: continue
print 'Compiling', fullname, '...'
try:
py_compile.compile(fullname, None, dfile)
except KeyboardInterrupt:
raise KeyboardInterrupt
except:
if type(sys.exc_type) == type(''):
exc_type_name = sys.exc_type
else: exc_type_name = sys.exc_type.__name__
print 'Sorry:', exc_type_name + ':',
print sys.exc_value
success = 0
elif maxlevels > 0 and \
name != os.curdir and name != os.pardir and \
os.path.isdir(fullname) and \
not os.path.islink(fullname):
compile_dir(fullname, maxlevels - 1, dfile, force)
return success
def compile_path(skip_curdir=1, maxlevels=0, force=0):
"""Byte-compile all module on sys.path.
Arguments (all optional):
skip_curdir: if true, skip current directory (default true)
maxlevels: max recursion level (default 0)
force: as for compile_dir() (default 0)
"""
success = 1
for dir in sys.path:
if (not dir or dir == os.curdir) and skip_curdir:
print 'Skipping current directory'
else:
success = success and compile_dir(dir, maxlevels, None, force)
return success
def main():
"""Script main program."""
import getopt
try:
opts, args = getopt.getopt(sys.argv[1:], 'lfd:')
except getopt.error, msg:
print msg
print "usage: compileall [-l] [-f] [-d destdir] [directory ...]"
print "-l: don't recurse down"
print "-f: force rebuild even if timestamps are up-to-date"
print "-d destdir: purported directory name for error messages"
print "if no directory arguments, -l sys.path is assumed"
sys.exit(2)
maxlevels = 10
ddir = None
force = 0
for o, a in opts:
if o == '-l': maxlevels = 0
if o == '-d': ddir = a
if o == '-f': force = 1
if ddir:
if len(args) != 1:
print "-d destdir require exactly one directory argument"
sys.exit(2)
success = 1
try:
if args:
for dir in args:
success = success and compile_dir(dir, maxlevels, ddir, force)
else:
success = compile_path()
except KeyboardInterrupt:
print "\n[interrupt]"
success = 0
return success
if __name__ == '__main__':
sys.exit(not main())

+ 0
- 330
lib/jython/Lib/copy.py View File

@@ -1,330 +0,0 @@
"""Generic (shallow and deep) copying operations.
Interface summary:
import copy
x = copy.copy(y) # make a shallow copy of y
x = copy.deepcopy(y) # make a deep copy of y
For module specific errors, copy.error is raised.
The difference between shallow and deep copying is only relevant for
compound objects (objects that contain other objects, like lists or
class instances).
- A shallow copy constructs a new compound object and then (to the
extent possible) inserts *the same objects* into in that the
original contains.
- A deep copy constructs a new compound object and then, recursively,
inserts *copies* into it of the objects found in the original.
Two problems often exist with deep copy operations that don't exist
with shallow copy operations:
a) recursive objects (compound objects that, directly or indirectly,
contain a reference to themselves) may cause a recursive loop
b) because deep copy copies *everything* it may copy too much, e.g.
administrative data structures that should be shared even between
copies
Python's deep copy operation avoids these problems by:
a) keeping a table of objects already copied during the current
copying pass
b) letting user-defined classes override the copying operation or the
set of components copied
This version does not copy types like module, class, function, method,
nor stack trace, stack frame, nor file, socket, window, nor array, nor
any similar types.
Classes can use the same interfaces to control copying that they use
to control pickling: they can define methods called __getinitargs__(),
__getstate__() and __setstate__(). See the documentation for module
"pickle" for information on these methods.
"""
# XXX need to support copy_reg here too...
import types
class Error(Exception):
pass
error = Error # backward compatibility
try:
from org.python.core import PyStringMap
except ImportError:
PyStringMap = None
__all__ = ["Error","error","copy","deepcopy"]
def copy(x):
"""Shallow copy operation on arbitrary Python objects.
See the module's __doc__ string for more info.
"""
try:
copierfunction = _copy_dispatch[type(x)]
except KeyError:
try:
copier = x.__copy__
except AttributeError:
raise error, \
"un(shallow)copyable object of type %s" % type(x)
y = copier()
else:
y = copierfunction(x)
return y
_copy_dispatch = d = {}
def _copy_atomic(x):
return x
d[types.NoneType] = _copy_atomic
d[types.IntType] = _copy_atomic
d[types.LongType] = _copy_atomic
d[types.FloatType] = _copy_atomic
d[types.StringType] = _copy_atomic
d[types.UnicodeType] = _copy_atomic
try:
d[types.CodeType] = _copy_atomic
except AttributeError:
pass
d[types.TypeType] = _copy_atomic
d[types.XRangeType] = _copy_atomic
d[types.ClassType] = _copy_atomic
def _copy_list(x):
return x[:]
d[types.ListType] = _copy_list
def _copy_tuple(x):
return x[:]
d[types.TupleType] = _copy_tuple
def _copy_dict(x):
return x.copy()
d[types.DictionaryType] = _copy_dict
if PyStringMap is not None:
d[PyStringMap] = _copy_dict
def _copy_inst(x):
if hasattr(x, '__copy__'):
return x.__copy__()
if hasattr(x, '__getinitargs__'):
args = x.__getinitargs__()
y = apply(x.__class__, args)
else:
if hasattr(x.__class__, '__del__'):
y = _EmptyClassDel()
else:
y = _EmptyClass()
y.__class__ = x.__class__
if hasattr(x, '__getstate__'):
state = x.__getstate__()
else:
state = x.__dict__
if hasattr(y, '__setstate__'):
y.__setstate__(state)
else:
y.__dict__.update(state)
return y
d[types.InstanceType] = _copy_inst
del d
def deepcopy(x, memo = None):
"""Deep copy operation on arbitrary Python objects.
See the module's __doc__ string for more info.
"""
if memo is None:
memo = {}
d = id(x)
if memo.has_key(d):
return memo[d]
try:
copierfunction = _deepcopy_dispatch[type(x)]
except KeyError:
try:
copier = x.__deepcopy__
except AttributeError:
raise error, \
"un-deep-copyable object of type %s" % type(x)
y = copier(memo)
else:
y = copierfunction(x, memo)
memo[d] = y
return y
_deepcopy_dispatch = d = {}
def _deepcopy_atomic(x, memo):
return x
d[types.NoneType] = _deepcopy_atomic
d[types.IntType] = _deepcopy_atomic
d[types.LongType] = _deepcopy_atomic
d[types.FloatType] = _deepcopy_atomic
d[types.StringType] = _deepcopy_atomic
d[types.UnicodeType] = _deepcopy_atomic
d[types.CodeType] = _deepcopy_atomic
d[types.TypeType] = _deepcopy_atomic
d[types.XRangeType] = _deepcopy_atomic
def _deepcopy_list(x, memo):
y = []
memo[id(x)] = y
for a in x:
y.append(deepcopy(a, memo))
return y
d[types.ListType] = _deepcopy_list
def _deepcopy_tuple(x, memo):
y = []
for a in x:
y.append(deepcopy(a, memo))
d = id(x)
try:
return memo[d]
except KeyError:
pass
for i in range(len(x)):
if x[i] is not y[i]:
y = tuple(y)
break
else:
y = x
memo[d] = y
return y
d[types.TupleType] = _deepcopy_tuple
def _deepcopy_dict(x, memo):
y = {}
memo[id(x)] = y
for key in x.keys():
y[deepcopy(key, memo)] = deepcopy(x[key], memo)
return y
d[types.DictionaryType] = _deepcopy_dict
if PyStringMap is not None:
d[PyStringMap] = _deepcopy_dict
def _keep_alive(x, memo):
"""Keeps a reference to the object x in the memo.
Because we remember objects by their id, we have
to assure that possibly temporary objects are kept
alive by referencing them.
We store a reference at the id of the memo, which should
normally not be used unless someone tries to deepcopy
the memo itself...
"""
try:
memo[id(memo)].append(x)
except KeyError:
# aha, this is the first one :-)
memo[id(memo)]=[x]
def _deepcopy_inst(x, memo):
if hasattr(x, '__deepcopy__'):
return x.__deepcopy__(memo)
if hasattr(x, '__getinitargs__'):
args = x.__getinitargs__()
_keep_alive(args, memo)
args = deepcopy(args, memo)
y = apply(x.__class__, args)
else:
if hasattr(x.__class__, '__del__'):
y = _EmptyClassDel()
else:
y = _EmptyClass()
y.__class__ = x.__class__
memo[id(x)] = y
if hasattr(x, '__getstate__'):
state = x.__getstate__()
_keep_alive(state, memo)
else:
state = x.__dict__
state = deepcopy(state, memo)
if hasattr(y, '__setstate__'):
y.__setstate__(state)
else:
y.__dict__.update(state)
return y
d[types.InstanceType] = _deepcopy_inst
del d
del types
# Helper for instance creation without calling __init__
class _EmptyClass:
pass
# Helper for instance creation without calling __init__. Used when
# the source class contains a __del__ attribute.
class _EmptyClassDel:
def __del__(self):
pass
def _test():
l = [None, 1, 2L, 3.14, 'xyzzy', (1, 2L), [3.14, 'abc'],
{'abc': 'ABC'}, (), [], {}]
l1 = copy(l)
print l1==l
l1 = map(copy, l)
print l1==l
l1 = deepcopy(l)
print l1==l
class C:
def __init__(self, arg=None):
self.a = 1
self.arg = arg
if __name__ == '__main__':
import sys
file = sys.argv[0]
else:
file = __file__
self.fp = open(file)
self.fp.close()
def __getstate__(self):
return {'a': self.a, 'arg': self.arg}
def __setstate__(self, state):
for key in state.keys():
setattr(self, key, state[key])
def __deepcopy__(self, memo = None):
new = self.__class__(deepcopy(self.arg, memo))
new.a = self.a
return new
c = C('argument sketch')
l.append(c)
l2 = copy(l)
print l == l2
print l
print l2
l2 = deepcopy(l)
print l == l2
print l
print l2
l.append({l[1]: l, 'xyz': l[2]})
l3 = copy(l)
import repr
print map(repr.repr, l)
print map(repr.repr, l1)
print map(repr.repr, l2)
print map(repr.repr, l3)
l3 = deepcopy(l)
import repr
print map(repr.repr, l)
print map(repr.repr, l1)
print map(repr.repr, l2)
print map(repr.repr, l3)
if __name__ == '__main__':
_test()

+ 0
- 35
lib/jython/Lib/copy_reg.py View File

@@ -1,35 +0,0 @@
"""Helper to provide extensibility for pickle/cPickle.
This is only useful to add pickle support for extension types defined in
C, not for instances of user-defined classes.
"""
from types import ClassType as _ClassType
__all__ = ["pickle","constructor"]
dispatch_table = {}
safe_constructors = {}
def pickle(ob_type, pickle_function, constructor_ob=None):
if type(ob_type) is _ClassType:
raise TypeError("copy_reg is not intended for use with classes")
if not callable(pickle_function):
raise TypeError("reduction functions must be callable")
dispatch_table[ob_type] = pickle_function
if constructor_ob is not None:
constructor(constructor_ob)
def constructor(object):
if not callable(object):
raise TypeError("constructors must be callable")
safe_constructors[object] = 1
# Example: provide pickling support for complex numbers.
def pickle_complex(c):
return complex, (c.real, c.imag)
pickle(type(1j), pickle_complex, complex)

+ 0
- 690
lib/jython/Lib/dbexts.py View File

@@ -1,690 +0,0 @@
# $Id: dbexts.py,v 1.4 2001/12/29 18:00:15 bzimmer Exp $
"""
This script provides platform independence by wrapping Python
Database API 2.0 compatible drivers to allow seamless database
usage across implementations.
In order to use the C version, you need mxODBC and mxDateTime.
In order to use the Java version, you need zxJDBC.
>>> import dbexts
>>> d = dbexts.dbexts() # use the default db
>>> d.isql('select count(*) count from player')
count
-------
13569.0
1 row affected
>>> r = d.raw('select count(*) count from player')
>>> r
([('count', 3, 17, None, 15, 0, 1)], [(13569.0,)])
>>>
The configuration file follows the following format in a file name dbexts.ini:
[default]
name=mysql
[jdbc]
name=mysql
url=jdbc:mysql://localhost/ziclix
user=
pwd=
driver=org.gjt.mm.mysql.Driver
datahandler=com.ziclix.python.sql.handler.MySQLDataHandler
[jdbc]
name=pg
url=jdbc:postgresql://localhost:5432/ziclix
user=bzimmer
pwd=
driver=org.postgresql.Driver
datahandler=com.ziclix.python.sql.handler.PostgresqlDataHandler
"""
import os, string, re
__author__ = "brian zimmer (bzimmer@ziclix.com)"
__version__ = "$Revision: 1.4 $"[11:-2]
__OS__ = os.name
choose = lambda bool, a, b: (bool and [a] or [b])[0]
def console(rows, headers=()):
"""Format the results into a list of strings (one for each row):
<header>
<headersep>
<row1>
<row2>
...
headers may be given as list of strings.
Columns are separated by colsep; the header is separated from
the result set by a line of headersep characters.
The function calls stringify to format the value data into a string.
It defaults to calling str() and striping leading and trailing whitespace.
- copied and modified from mxODBC
"""
# Check row entry lengths
output = []
headers = map(string.upper, list(map(lambda x: x or "", headers)))
collen = map(len,headers)
output.append(headers)
if rows and len(rows) > 0:
for row in rows:
row = map(lambda x: str(x), row)
for i in range(len(row)):
entry = row[i]
if collen[i] < len(entry):
collen[i] = len(entry)
output.append(row)
if len(output) == 1:
affected = "0 rows affected"
elif len(output) == 2:
affected = "1 row affected"
else:
affected = "%d rows affected" % (len(output) - 1)
# Format output
for i in range(len(output)):
row = output[i]
l = []
for j in range(len(row)):
l.append('%-*s' % (collen[j],row[j]))
output[i] = string.join(l, " | ")
# Insert header separator
totallen = len(output[0])
output[1:1] = ["-"*(totallen/len("-"))]
output.append("\n" + affected)
return output
def html(rows, headers=()):
output = []
output.append('<table class="results">')
output.append('<tr class="headers">')
headers = map(lambda x: '<td class="header">%s</td>' % (x.upper()), list(headers))
map(output.append, headers)
output.append('</tr>')
if rows and len(rows) > 0:
for row in rows:
output.append('<tr class="row">')
row = map(lambda x: '<td class="value">%s</td>' % (x), row)
map(output.append, row)
output.append('</tr>')
output.append('</table>')
return output
comments = lambda x: re.compile("{.*?}", re.S).sub("", x, 0)
class ex_proxy:
"""Wraps mxODBC to provide proxy support for zxJDBC's additional parameters."""
def __init__(self, c):
self.c = c
def __getattr__(self, name):
if name == "execute":
return self.execute
elif name == "gettypeinfo":
return self.gettypeinfo
else:
return getattr(self.c, name)
def execute(self, sql, params=None, bindings=None, maxrows=None):
if params:
self.c.execute(sql, params)
else:
self.c.execute(sql)
def gettypeinfo(self, typeid=None):
if typeid:
self.c.gettypeinfo(typeid)
class executor:
"""Handles the insertion of values given dynamic data."""
def __init__(self, table, cols):
self.cols = cols
self.table = table
if self.cols:
self.sql = "insert into %s (%s) values (%s)" % (table, ",".join(self.cols), ",".join(("?",) * len(self.cols)))
else:
self.sql = "insert into %s values (%%s)" % (table)
def execute(self, db, rows, bindings):
assert rows and len(rows) > 0, "must have at least one row"
if self.cols:
sql = self.sql
else:
sql = self.sql % (",".join(("?",) * len(rows[0])))
db.raw(sql, rows, bindings)
def connect(dbname):
return dbexts(dbname)
def lookup(dbname):
return dbexts(jndiname=dbname)
class dbexts:
def __init__(self, dbname=None, cfg=None, formatter=console, autocommit=1, jndiname=None, out=None):
self.verbose = 1
self.results = None
self.headers = None
self.datahandler = None
self.autocommit = autocommit
self.formatter = formatter
self.out = out
self.lastrowid = None
self.updatecount = None
if not jndiname:
if cfg == None:
fn = os.path.join(os.path.split(__file__)[0], "dbexts.ini")
if not os.path.exists(fn):
fn = os.path.join(os.environ['HOME'], ".dbexts")
self.dbs = IniParser(fn)
elif isinstance(cfg, IniParser):
self.dbs = cfg
else:
self.dbs = IniParser(cfg)
if dbname == None: dbname = self.dbs[("default", "name")]
if __OS__ == 'java':
from com.ziclix.python.sql import zxJDBC
database = zxJDBC
if not jndiname:
t = self.dbs[("jdbc", dbname)]
self.dburl, dbuser, dbpwd, jdbcdriver = t['url'], t['user'], t['pwd'], t['driver']
if t.has_key("datahandler"):
try:
datahandlerclass = string.split(t['datahandler'], ".")[-1]
self.datahandler = __import__(t['datahandler'], globals(), locals(), datahandlerclass)
except:
pass
keys = filter(lambda x: x not in ['url', 'user', 'pwd', 'driver', 'datahandler', 'name'], t.keys())
props = {}
for a in keys:
props[a] = t[a]
self.db = apply(database.connect, (self.dburl, dbuser, dbpwd, jdbcdriver), props)
else:
self.db = database.lookup(jndiname)
self.db.autocommit = 0
elif __OS__ == 'nt':
for modname in ["mx.ODBC.Windows", "ODBC.Windows"]:
try:
database = __import__(modname, globals(), locals(), "Windows")
break
except:
continue
else:
raise ImportError("unable to find appropriate mxODBC module")
t = self.dbs[("odbc", dbname)]
self.dburl, dbuser, dbpwd = t['url'], t['user'], t['pwd']
self.db = database.Connect(self.dburl, dbuser, dbpwd, clear_auto_commit=1)
for a in database.sqltype.keys():
setattr(self, database.sqltype[a], a)
del database
def __str__(self):
return self.dburl
def __repr__(self):
return self.dburl
def __getattr__(self, name):
if "cfg" == name:
return self.dbs.cfg
def close(self):
""" close the connection to the database """
self.db.close()
def begin(self):
""" reset ivars and return a new cursor, possibly binding an auxiliary datahandler """
self.headers, self.results = None, None
c = self.db.cursor()
if __OS__ == 'java':
if self.datahandler: c.datahandler = self.datahandler(c.datahandler)
else:
c = ex_proxy(c)
return c
def commit(self, cursor=None):
""" commit the cursor and create the result set """
if cursor and cursor.description:
self.headers = cursor.description
self.results = cursor.fetchall()
if hasattr(cursor, "nextset"):
s = cursor.nextset()
while s:
f = cursor.fetchall()
if f: self.results = choose(self.results is None, [], self.results) + f
s = cursor.nextset()
if hasattr(cursor, "lastrowid"): self.lastrowid = cursor.lastrowid
if hasattr(cursor, "updatecount"): self.updatecount = cursor.updatecount
if self.autocommit or cursor is None: self.db.commit()
if cursor: cursor.close()
def rollback(self):
""" rollback the cursor """
self.db.rollback()
def display(self):
""" using the formatter, display the results """
if self.formatter and self.verbose > 0:
res = self.results
if res:
print >> self.out, ""
for a in self.formatter(res, map(lambda x: x[0], self.headers)):
print >> self.out, a
print >> self.out, ""
def __execute__(self, sql, params=None, bindings=None, maxrows=None):
""" the primary execution method """
cur = self.begin()
try:
if bindings:
cur.execute(sql, params, bindings, maxrows=maxrows)
elif params:
cur.execute(sql, params, maxrows=maxrows)
else:
cur.execute(sql, maxrows=maxrows)
finally:
self.commit(cur)
def isql(self, sql, params=None, bindings=None, maxrows=None):
""" execute and display the sql """
self.raw(sql, params, bindings, maxrows=maxrows)
self.display()
def raw(self, sql, params=None, bindings=None, delim=None, comments=comments, maxrows=None):
""" execute the sql and return a tuple of (headers, results) """
if delim:
headers = []
results = []
if comments: sql = comments(sql)
statements = filter(lambda x: len(x) > 0, map(string.strip, string.split(sql, delim)))
for a in statements:
self.__execute__(a, params, bindings, maxrows=maxrows)
headers.append(self.headers)
results.append(self.results)
self.headers = headers
self.results = results
else:
self.__execute__(sql, params, bindings, maxrows=maxrows)
return (self.headers, self.results)
def callproc(self, procname, params=None, bindings=None, maxrows=None):
""" execute a stored procedure """
cur = self.begin()
try:
cur.callproc(procname, params=params, bindings=bindings, maxrows=maxrows)
finally:
self.commit(cur)
self.display()
def pk(self, table, owner=None, schema=None):
""" display the table's primary keys """
cur = self.begin()
cur.primarykeys(schema, owner, table)
self.commit(cur)
self.display()
def fk(self, primary_table=None, foreign_table=None, owner=None, schema=None):
""" display the table's foreign keys """
cur = self.begin()
if primary_table and foreign_table:
cur.foreignkeys(schema, owner, primary_table, schema, owner, foreign_table)
elif primary_table:
cur.foreignkeys(schema, owner, primary_table, schema, owner, None)
elif foreign_table:
cur.foreignkeys(schema, owner, None, schema, owner, foreign_table)
self.commit(cur)
self.display()
def table(self, table=None, types=("TABLE",), owner=None, schema=None):
"""If no table argument, displays a list of all tables. If a table argument,
displays the columns of the given table."""
cur = self.begin()
if table:
cur.columns(schema, owner, table, None)
else:
cur.tables(schema, owner, None, types)
self.commit(cur)
self.display()
def proc(self, proc=None, owner=None, schema=None):
"""If no proc argument, displays a list of all procedures. If a proc argument,
displays the parameters of the given procedure."""
cur = self.begin()
if proc:
cur.procedurecolumns(schema, owner, proc, None)
else:
cur.procedures(schema, owner, None)
self.commit(cur)
self.display()
def stat(self, table, qualifier=None, owner=None, unique=0, accuracy=0):
""" display the table's indicies """
cur = self.begin()
cur.statistics(qualifier, owner, table, unique, accuracy)
self.commit(cur)
self.display()
def typeinfo(self, sqltype=None):
""" display the types available for the database """
cur = self.begin()
cur.gettypeinfo(sqltype)
self.commit(cur)
self.display()
def tabletypeinfo(self):
""" display the table types available for the database """
cur = self.begin()
cur.gettabletypeinfo()
self.commit(cur)
self.display()
def schema(self, table, full=0, sort=1, owner=None):
"""Displays a Schema object for the table. If full is true, then generates
references to the table in addition to the standard fields. If sort is true,
sort all the items in the schema, else leave them in db dependent order."""
print >> self.out, str(Schema(self, table, owner, full, sort))
def bulkcopy(self, dst, table, include=[], exclude=[], autobatch=0, executor=executor):
"""Returns a Bulkcopy object using the given table."""
if type(dst) == type(""):
dst = dbexts(dst, cfg=self.dbs)
bcp = Bulkcopy(dst, table, include=include, exclude=exclude, autobatch=autobatch, executor=executor)
return bcp
def bcp(self, src, table, where='(1=1)', params=[], include=[], exclude=[], autobatch=0, executor=executor):
"""Bulkcopy of rows from a src database to the current database for a given table and where clause."""
if type(src) == type(""):
src = dbexts(src, cfg=self.dbs)
bcp = self.bulkcopy(self, table, include, exclude, autobatch, executor)
num = bcp.transfer(src, where, params)
return num
def unload(self, filename, sql, delimiter=",", includeheaders=1):
""" Unloads the delimited results of the query to the file specified, optionally including headers. """
u = Unload(self, filename, delimiter, includeheaders)
u.unload(sql)
class Bulkcopy:
"""The idea for a bcp class came from http://object-craft.com.au/projects/sybase"""
def __init__(self, dst, table, include=[], exclude=[], autobatch=0, executor=executor):
self.dst = dst
self.table = table
self.total = 0
self.rows = []
self.autobatch = autobatch
self.bindings = {}
include = map(lambda x: string.lower(x), include)
exclude = map(lambda x: string.lower(x), exclude)
_verbose = self.dst.verbose
self.dst.verbose = 0
try:
self.dst.table(self.table)
if self.dst.results:
colmap = {}
for a in self.dst.results:
colmap[a[3].lower()] = a[4]
cols = self.__filter__(colmap.keys(), include, exclude)
for a in zip(range(len(cols)), cols):
self.bindings[a[0]] = colmap[a[1]]
colmap = None
else:
cols = self.__filter__(include, include, exclude)
finally:
self.dst.verbose = _verbose
self.executor = executor(table, cols)
def __str__(self):
return "[%s].[%s]" % (self.dst, self.table)
def __repr__(self):
return "[%s].[%s]" % (self.dst, self.table)
def __getattr__(self, name):
if name == 'columns':
return self.executor.cols
def __filter__(self, values, include, exclude):
cols = map(string.lower, values)
if exclude:
cols = filter(lambda x, ex=exclude: x not in ex, cols)
if include:
cols = filter(lambda x, inc=include: x in inc, cols)
return cols
def format(self, column, type):
self.bindings[column] = type
def done(self):
if len(self.rows) > 0:
return self.batch()
return 0
def batch(self):
self.executor.execute(self.dst, self.rows, self.bindings)
cnt = len(self.rows)
self.total += cnt
self.rows = []
return cnt
def rowxfer(self, line):
self.rows.append(line)
if self.autobatch: self.batch()
def transfer(self, src, where="(1=1)", params=[]):
sql = "select %s from %s where %s" % (string.join(self.columns, ", "), self.table, where)
h, d = src.raw(sql, params)
if d:
map(self.rowxfer, d)
return self.done()
return 0
class Unload:
"""Unloads a sql statement to a file with optional formatting of each value."""
def __init__(self, db, filename, delimiter=",", includeheaders=1):
self.db = db
self.filename = filename
self.delimiter = delimiter
self.includeheaders = includeheaders
self.formatters = {}
def format(self, o):
if not o:
return ""
o = str(o)
if o.find(",") != -1:
o = "\"\"%s\"\"" % (o)
return o
def unload(self, sql, mode="w"):
headers, results = self.db.raw(sql)
w = open(self.filename, mode)
if self.includeheaders:
w.write("%s\n" % (string.join(map(lambda x: x[0], headers), self.delimiter)))
if results:
for a in results:
w.write("%s\n" % (string.join(map(self.format, a), self.delimiter)))
w.flush()
w.close()
class Schema:
"""Produces a Schema object which represents the database schema for a table"""
def __init__(self, db, table, owner=None, full=0, sort=1):
self.db = db
self.table = table
self.owner = owner
self.full = full
self.sort = sort
_verbose = self.db.verbose
self.db.verbose = 0
try:
if table: self.computeschema()
finally:
self.db.verbose = _verbose
def computeschema(self):
self.db.table(self.table, owner=self.owner)
self.columns = []
# (column name, type_name, size, nullable)
if self.db.results:
self.columns = map(lambda x: (x[3], x[5], x[6], x[10]), self.db.results)
if self.sort: self.columns.sort(lambda x, y: cmp(x[0], y[0]))
self.db.fk(None, self.table)
# (pk table name, pk column name, fk column name, fk name, pk name)
self.imported = []
if self.db.results:
self.imported = map(lambda x: (x[2], x[3], x[7], x[11], x[12]), self.db.results)
if self.sort: self.imported.sort(lambda x, y: cmp(x[2], y[2]))
self.exported = []
if self.full:
self.db.fk(self.table, None)
# (pk column name, fk table name, fk column name, fk name, pk name)
if self.db.results:
self.exported = map(lambda x: (x[3], x[6], x[7], x[11], x[12]), self.db.results)
if self.sort: self.exported.sort(lambda x, y: cmp(x[1], y[1]))
self.db.pk(self.table)
self.primarykeys = []
if self.db.results:
# (column name, key_seq, pk name)
self.primarykeys = map(lambda x: (x[3], x[4], x[5]), self.db.results)
if self.sort: self.primarykeys.sort(lambda x, y: cmp(x[1], y[1]))
self.db.stat(self.table)
# (non-unique, name, type, pos, column name, asc)
self.indices = []
if self.db.results:
idxdict = {}
# mxODBC returns a row of None's, so filter it out
idx = map(lambda x: (x[3], string.strip(x[5]), x[6], x[7], x[8]), filter(lambda x: x[5], self.db.results))
def cckmp(x, y):
c = cmp(x[1], y[1])
if c == 0: c = cmp(x[3], y[3])
return c
# sort this regardless, this gets the indicies lined up
idx.sort(cckmp)
for a in idx:
if not idxdict.has_key(a[1]):
idxdict[a[1]] = []
idxdict[a[1]].append(a)
self.indices = idxdict.values()
if self.sort: self.indices.sort(lambda x, y: cmp(x[0][1], y[0][1]))
def __str__(self):
d = []
d.append("Table")
d.append(" " + self.table)
d.append("\nPrimary Keys")
for a in self.primarykeys:
d.append(" %s {%s}" % (a[0], a[2]))
d.append("\nImported (Foreign) Keys")
for a in self.imported:
d.append(" %s (%s.%s) {%s}" % (a[2], a[0], a[1], a[3]))
if self.full:
d.append("\nExported (Referenced) Keys")
for a in self.exported:
d.append(" %s (%s.%s) {%s}" % (a[0], a[1], a[2], a[3]))
d.append("\nColumns")
for a in self.columns:
nullable = choose(a[3], "nullable", "non-nullable")
d.append(" %-20s %s(%s), %s" % (a[0], a[1], a[2], nullable))
d.append("\nIndices")
for a in self.indices:
unique = choose(a[0][0], "non-unique", "unique")
cname = string.join(map(lambda x: x[4], a), ", ")
d.append(" %s index {%s} on (%s)" % (unique, a[0][1], cname))
return string.join(d, "\n")
class IniParser:
def __init__(self, cfg, key='name'):
self.key = key
self.records = {}
self.ctypeRE = re.compile("\[(jdbc|odbc|default)\]")
self.entryRE = re.compile("([a-zA-Z]+)[ \t]*=[ \t]*(.*)")
self.cfg = cfg
self.parse()
def parse(self):
fp = open(self.cfg, "r")
data = fp.readlines()
fp.close()
lines = filter(lambda x: len(x) > 0 and x[0] not in ['#', ';'], map(string.strip, data))
current = None
for i in range(len(lines)):
line = lines[i]
g = self.ctypeRE.match(line)
if g: # a section header
current = {}
if not self.records.has_key(g.group(1)):
self.records[g.group(1)] = []
self.records[g.group(1)].append(current)
else:
g = self.entryRE.match(line)
if g:
current[g.group(1)] = g.group(2)
def __getitem__(self, (ctype, skey)):
if skey == self.key: return self.records[ctype][0][skey]
t = filter(lambda x, p=self.key, s=skey: x[p] == s, self.records[ctype])
if not t or len(t) > 1:
raise KeyError, "invalid key ('%s', '%s')" % (ctype, skey)
return t[0]
def random_table_name(prefix, num_chars):
import random
d = [prefix, '_']
i = 0
while i < num_chars:
d.append(chr(int(100 * random.random()) % 26 + ord('A')))
i += 1
return string.join(d, "")
class ResultSetRow:
def __init__(self, rs, row):
self.row = row
self.rs = rs
def __getitem__(self, i):
if type(i) == type(""):
i = self.rs.index(i)
return self.row[i]
def __getslice__(self, i, j):
if type(i) == type(""): i = self.rs.index(i)
if type(j) == type(""): j = self.rs.index(j)
return self.row[i:j]
def __len__(self):
return len(self.row)
def __repr__(self):
return str(self.row)
class ResultSet:
def __init__(self, headers, results=[]):
self.headers = map(lambda x: x.upper(), headers)
self.results = results
def index(self, i):
return self.headers.index(string.upper(i))
def __getitem__(self, i):
return ResultSetRow(self, self.results[i])
def __getslice__(self, i, j):
return map(lambda x, rs=self: ResultSetRow(rs, x), self.results[i:j])
def __repr__(self):
return "<%s instance {cols [%d], rows [%d]} at %s>" % (self.__class__, len(self.headers), len(self.results), id(self))

+ 0
- 781
lib/jython/Lib/difflib.py View File

@@ -1,781 +0,0 @@
#! /usr/bin/env python
"""
Module difflib -- helpers for computing deltas between objects.
Function get_close_matches(word, possibilities, n=3, cutoff=0.6):
Use SequenceMatcher to return list of the best "good enough" matches.
word is a sequence for which close matches are desired (typically a
string).
possibilities is a list of sequences against which to match word
(typically a list of strings).
Optional arg n (default 3) is the maximum number of close matches to
return. n must be > 0.
Optional arg cutoff (default 0.6) is a float in [0, 1]. Possibilities
that don't score at least that similar to word are ignored.
The best (no more than n) matches among the possibilities are returned
in a list, sorted by similarity score, most similar first.
>>> get_close_matches("appel", ["ape", "apple", "peach", "puppy"])
['apple', 'ape']
>>> import keyword
>>> get_close_matches("wheel", keyword.kwlist)
['while']
>>> get_close_matches("apple", keyword.kwlist)
[]
>>> get_close_matches("accept", keyword.kwlist)
['except']
Class SequenceMatcher
SequenceMatcher is a flexible class for comparing pairs of sequences of any
type, so long as the sequence elements are hashable. The basic algorithm
predates, and is a little fancier than, an algorithm published in the late
1980's by Ratcliff and Obershelp under the hyperbolic name "gestalt pattern
matching". The basic idea is to find the longest contiguous matching
subsequence that contains no "junk" elements (R-O doesn't address junk).
The same idea is then applied recursively to the pieces of the sequences to
the left and to the right of the matching subsequence. This does not yield
minimal edit sequences, but does tend to yield matches that "look right"
to people.
Example, comparing two strings, and considering blanks to be "junk":
>>> s = SequenceMatcher(lambda x: x == " ",
... "private Thread currentThread;",
... "private volatile Thread currentThread;")
>>>
.ratio() returns a float in [0, 1], measuring the "similarity" of the
sequences. As a rule of thumb, a .ratio() value over 0.6 means the
sequences are close matches:
>>> print round(s.ratio(), 3)
0.866
>>>
If you're only interested in where the sequences match,
.get_matching_blocks() is handy:
>>> for block in s.get_matching_blocks():
... print "a[%d] and b[%d] match for %d elements" % block
a[0] and b[0] match for 8 elements
a[8] and b[17] match for 6 elements
a[14] and b[23] match for 15 elements
a[29] and b[38] match for 0 elements
Note that the last tuple returned by .get_matching_blocks() is always a
dummy, (len(a), len(b), 0), and this is the only case in which the last
tuple element (number of elements matched) is 0.
If you want to know how to change the first sequence into the second, use
.get_opcodes():
>>> for opcode in s.get_opcodes():
... print "%6s a[%d:%d] b[%d:%d]" % opcode
equal a[0:8] b[0:8]
insert a[8:8] b[8:17]
equal a[8:14] b[17:23]
equal a[14:29] b[23:38]
See Tools/scripts/ndiff.py for a fancy human-friendly file differencer,
which uses SequenceMatcher both to view files as sequences of lines, and
lines as sequences of characters.
See also function get_close_matches() in this module, which shows how
simple code building on SequenceMatcher can be used to do useful work.
Timing: Basic R-O is cubic time worst case and quadratic time expected
case. SequenceMatcher is quadratic time for the worst case and has
expected-case behavior dependent in a complicated way on how many
elements the sequences have in common; best case time is linear.
SequenceMatcher methods:
__init__(isjunk=None, a='', b='')
Construct a SequenceMatcher.
Optional arg isjunk is None (the default), or a one-argument function
that takes a sequence element and returns true iff the element is junk.
None is equivalent to passing "lambda x: 0", i.e. no elements are
considered to be junk. For example, pass
lambda x: x in " \\t"
if you're comparing lines as sequences of characters, and don't want to
synch up on blanks or hard tabs.
Optional arg a is the first of two sequences to be compared. By
default, an empty string. The elements of a must be hashable.
Optional arg b is the second of two sequences to be compared. By
default, an empty string. The elements of b must be hashable.
set_seqs(a, b)
Set the two sequences to be compared.
>>> s = SequenceMatcher()
>>> s.set_seqs("abcd", "bcde")
>>> s.ratio()
0.75
set_seq1(a)
Set the first sequence to be compared.
The second sequence to be compared is not changed.
>>> s = SequenceMatcher(None, "abcd", "bcde")
>>> s.ratio()
0.75
>>> s.set_seq1("bcde")
>>> s.ratio()
1.0
>>>
SequenceMatcher computes and caches detailed information about the
second sequence, so if you want to compare one sequence S against many
sequences, use .set_seq2(S) once and call .set_seq1(x) repeatedly for
each of the other sequences.
See also set_seqs() and set_seq2().
set_seq2(b)
Set the second sequence to be compared.
The first sequence to be compared is not changed.
>>> s = SequenceMatcher(None, "abcd", "bcde")
>>> s.ratio()
0.75
>>> s.set_seq2("abcd")
>>> s.ratio()
1.0
>>>
SequenceMatcher computes and caches detailed information about the
second sequence, so if you want to compare one sequence S against many
sequences, use .set_seq2(S) once and call .set_seq1(x) repeatedly for
each of the other sequences.
See also set_seqs() and set_seq1().
find_longest_match(alo, ahi, blo, bhi)
Find longest matching block in a[alo:ahi] and b[blo:bhi].
If isjunk is not defined:
Return (i,j,k) such that a[i:i+k] is equal to b[j:j+k], where
alo <= i <= i+k <= ahi
blo <= j <= j+k <= bhi
and for all (i',j',k') meeting those conditions,
k >= k'
i <= i'
and if i == i', j <= j'
In other words, of all maximal matching blocks, return one that starts
earliest in a, and of all those maximal matching blocks that start
earliest in a, return the one that starts earliest in b.
>>> s = SequenceMatcher(None, " abcd", "abcd abcd")
>>> s.find_longest_match(0, 5, 0, 9)
(0, 4, 5)
If isjunk is defined, first the longest matching block is determined as
above, but with the additional restriction that no junk element appears
in the block. Then that block is extended as far as possible by
matching (only) junk elements on both sides. So the resulting block
never matches on junk except as identical junk happens to be adjacent
to an "interesting" match.
Here's the same example as before, but considering blanks to be junk.
That prevents " abcd" from matching the " abcd" at the tail end of the
second sequence directly. Instead only the "abcd" can match, and
matches the leftmost "abcd" in the second sequence:
>>> s = SequenceMatcher(lambda x: x==" ", " abcd", "abcd abcd")
>>> s.find_longest_match(0, 5, 0, 9)
(1, 0, 4)
If no blocks match, return (alo, blo, 0).
>>> s = SequenceMatcher(None, "ab", "c")
>>> s.find_longest_match(0, 2, 0, 1)
(0, 0, 0)
get_matching_blocks()
Return list of triples describing matching subsequences.
Each triple is of the form (i, j, n), and means that
a[i:i+n] == b[j:j+n]. The triples are monotonically increasing in i
and in j.
The last triple is a dummy, (len(a), len(b), 0), and is the only triple
with n==0.
>>> s = SequenceMatcher(None, "abxcd", "abcd")
>>> s.get_matching_blocks()
[(0, 0, 2), (3, 2, 2), (5, 4, 0)]
get_opcodes()
Return list of 5-tuples describing how to turn a into b.
Each tuple is of the form (tag, i1, i2, j1, j2). The first tuple has
i1 == j1 == 0, and remaining tuples have i1 == the i2 from the tuple
preceding it, and likewise for j1 == the previous j2.
The tags are strings, with these meanings:
'replace': a[i1:i2] should be replaced by b[j1:j2]
'delete': a[i1:i2] should be deleted.
Note that j1==j2 in this case.
'insert': b[j1:j2] should be inserted at a[i1:i1].
Note that i1==i2 in this case.
'equal': a[i1:i2] == b[j1:j2]
>>> a = "qabxcd"
>>> b = "abycdf"
>>> s = SequenceMatcher(None, a, b)
>>> for tag, i1, i2, j1, j2 in s.get_opcodes():
... print ("%7s a[%d:%d] (%s) b[%d:%d] (%s)" %
... (tag, i1, i2, a[i1:i2], j1, j2, b[j1:j2]))
delete a[0:1] (q) b[0:0] ()
equal a[1:3] (ab) b[0:2] (ab)
replace a[3:4] (x) b[2:3] (y)
equal a[4:6] (cd) b[3:5] (cd)
insert a[6:6] () b[5:6] (f)
ratio()
Return a measure of the sequences' similarity (float in [0,1]).
Where T is the total number of elements in both sequences, and M is the
number of matches, this is 2,0*M / T. Note that this is 1 if the
sequences are identical, and 0 if they have nothing in common.
.ratio() is expensive to compute if you haven't already computed
.get_matching_blocks() or .get_opcodes(), in which case you may want to
try .quick_ratio() or .real_quick_ratio() first to get an upper bound.
>>> s = SequenceMatcher(None, "abcd", "bcde")
>>> s.ratio()
0.75
>>> s.quick_ratio()
0.75
>>> s.real_quick_ratio()
1.0
quick_ratio()
Return an upper bound on .ratio() relatively quickly.
This isn't defined beyond that it is an upper bound on .ratio(), and
is faster to compute.
real_quick_ratio():
Return an upper bound on ratio() very quickly.
This isn't defined beyond that it is an upper bound on .ratio(), and
is faster to compute than either .ratio() or .quick_ratio().
"""
TRACE = 0
class SequenceMatcher:
def __init__(self, isjunk=None, a='', b=''):
"""Construct a SequenceMatcher.
Optional arg isjunk is None (the default), or a one-argument
function that takes a sequence element and returns true iff the
element is junk. None is equivalent to passing "lambda x: 0", i.e.
no elements are considered to be junk. For example, pass
lambda x: x in " \\t"
if you're comparing lines as sequences of characters, and don't
want to synch up on blanks or hard tabs.
Optional arg a is the first of two sequences to be compared. By
default, an empty string. The elements of a must be hashable. See
also .set_seqs() and .set_seq1().
Optional arg b is the second of two sequences to be compared. By
default, an empty string. The elements of b must be hashable. See
also .set_seqs() and .set_seq2().
"""
# Members:
# a
# first sequence
# b
# second sequence; differences are computed as "what do
# we need to do to 'a' to change it into 'b'?"
# b2j
# for x in b, b2j[x] is a list of the indices (into b)
# at which x appears; junk elements do not appear
# b2jhas
# b2j.has_key
# fullbcount
# for x in b, fullbcount[x] == the number of times x
# appears in b; only materialized if really needed (used
# only for computing quick_ratio())
# matching_blocks
# a list of (i, j, k) triples, where a[i:i+k] == b[j:j+k];
# ascending & non-overlapping in i and in j; terminated by
# a dummy (len(a), len(b), 0) sentinel
# opcodes
# a list of (tag, i1, i2, j1, j2) tuples, where tag is
# one of
# 'replace' a[i1:i2] should be replaced by b[j1:j2]
# 'delete' a[i1:i2] should be deleted
# 'insert' b[j1:j2] should be inserted
# 'equal' a[i1:i2] == b[j1:j2]
# isjunk
# a user-supplied function taking a sequence element and
# returning true iff the element is "junk" -- this has
# subtle but helpful effects on the algorithm, which I'll
# get around to writing up someday <0.9 wink>.
# DON'T USE! Only __chain_b uses this. Use isbjunk.
# isbjunk
# for x in b, isbjunk(x) == isjunk(x) but much faster;
# it's really the has_key method of a hidden dict.
# DOES NOT WORK for x in a!
self.isjunk = isjunk
self.a = self.b = None
self.set_seqs(a, b)
def set_seqs(self, a, b):
"""Set the two sequences to be compared.
>>> s = SequenceMatcher()
>>> s.set_seqs("abcd", "bcde")
>>> s.ratio()
0.75
"""
self.set_seq1(a)
self.set_seq2(b)
def set_seq1(self, a):
"""Set the first sequence to be compared.
The second sequence to be compared is not changed.
>>> s = SequenceMatcher(None, "abcd", "bcde")
>>> s.ratio()
0.75
>>> s.set_seq1("bcde")
>>> s.ratio()
1.0
>>>
SequenceMatcher computes and caches detailed information about the
second sequence, so if you want to compare one sequence S against
many sequences, use .set_seq2(S) once and call .set_seq1(x)
repeatedly for each of the other sequences.
See also set_seqs() and set_seq2().
"""
if a is self.a:
return
self.a = a
self.matching_blocks = self.opcodes = None
def set_seq2(self, b):
"""Set the second sequence to be compared.
The first sequence to be compared is not changed.
>>> s = SequenceMatcher(None, "abcd", "bcde")
>>> s.ratio()
0.75
>>> s.set_seq2("abcd")
>>> s.ratio()
1.0
>>>
SequenceMatcher computes and caches detailed information about the
second sequence, so if you want to compare one sequence S against
many sequences, use .set_seq2(S) once and call .set_seq1(x)
repeatedly for each of the other sequences.
See also set_seqs() and set_seq1().
"""
if b is self.b:
return
self.b = b
self.matching_blocks = self.opcodes = None
self.fullbcount = None
self.__chain_b()
# For each element x in b, set b2j[x] to a list of the indices in
# b where x appears; the indices are in increasing order; note that
# the number of times x appears in b is len(b2j[x]) ...
# when self.isjunk is defined, junk elements don't show up in this
# map at all, which stops the central find_longest_match method
# from starting any matching block at a junk element ...
# also creates the fast isbjunk function ...
# note that this is only called when b changes; so for cross-product
# kinds of matches, it's best to call set_seq2 once, then set_seq1
# repeatedly
def __chain_b(self):
# Because isjunk is a user-defined (not C) function, and we test
# for junk a LOT, it's important to minimize the number of calls.
# Before the tricks described here, __chain_b was by far the most
# time-consuming routine in the whole module! If anyone sees
# Jim Roskind, thank him again for profile.py -- I never would
# have guessed that.
# The first trick is to build b2j ignoring the possibility
# of junk. I.e., we don't call isjunk at all yet. Throwing
# out the junk later is much cheaper than building b2j "right"
# from the start.
b = self.b
self.b2j = b2j = {}
self.b2jhas = b2jhas = b2j.has_key
for i in xrange(len(b)):
elt = b[i]
if b2jhas(elt):
b2j[elt].append(i)
else:
b2j[elt] = [i]
# Now b2j.keys() contains elements uniquely, and especially when
# the sequence is a string, that's usually a good deal smaller
# than len(string). The difference is the number of isjunk calls
# saved.
isjunk, junkdict = self.isjunk, {}
if isjunk:
for elt in b2j.keys():
if isjunk(elt):
junkdict[elt] = 1 # value irrelevant; it's a set
del b2j[elt]
# Now for x in b, isjunk(x) == junkdict.has_key(x), but the
# latter is much faster. Note too that while there may be a
# lot of junk in the sequence, the number of *unique* junk
# elements is probably small. So the memory burden of keeping
# this dict alive is likely trivial compared to the size of b2j.
self.isbjunk = junkdict.has_key
def find_longest_match(self, alo, ahi, blo, bhi):
"""Find longest matching block in a[alo:ahi] and b[blo:bhi].
If isjunk is not defined:
Return (i,j,k) such that a[i:i+k] is equal to b[j:j+k], where
alo <= i <= i+k <= ahi
blo <= j <= j+k <= bhi
and for all (i',j',k') meeting those conditions,
k >= k'
i <= i'
and if i == i', j <= j'
In other words, of all maximal matching blocks, return one that
starts earliest in a, and of all those maximal matching blocks that
start earliest in a, return the one that starts earliest in b.
>>> s = SequenceMatcher(None, " abcd", "abcd abcd")
>>> s.find_longest_match(0, 5, 0, 9)
(0, 4, 5)
If isjunk is defined, first the longest matching block is
determined as above, but with the additional restriction that no
junk element appears in the block. Then that block is extended as
far as possible by matching (only) junk elements on both sides. So
the resulting block never matches on junk except as identical junk
happens to be adjacent to an "interesting" match.
Here's the same example as before, but considering blanks to be
junk. That prevents " abcd" from matching the " abcd" at the tail
end of the second sequence directly. Instead only the "abcd" can
match, and matches the leftmost "abcd" in the second sequence:
>>> s = SequenceMatcher(lambda x: x==" ", " abcd", "abcd abcd")
>>> s.find_longest_match(0, 5, 0, 9)
(1, 0, 4)
If no blocks match, return (alo, blo, 0).
>>> s = SequenceMatcher(None, "ab", "c")
>>> s.find_longest_match(0, 2, 0, 1)
(0, 0, 0)
"""
# CAUTION: stripping common prefix or suffix would be incorrect.
# E.g.,
# ab
# acab
# Longest matching block is "ab", but if common prefix is
# stripped, it's "a" (tied with "b"). UNIX(tm) diff does so
# strip, so ends up claiming that ab is changed to acab by
# inserting "ca" in the middle. That's minimal but unintuitive:
# "it's obvious" that someone inserted "ac" at the front.
# Windiff ends up at the same place as diff, but by pairing up
# the unique 'b's and then matching the first two 'a's.
a, b, b2j, isbjunk = self.a, self.b, self.b2j, self.isbjunk
besti, bestj, bestsize = alo, blo, 0
# find longest junk-free match
# during an iteration of the loop, j2len[j] = length of longest
# junk-free match ending with a[i-1] and b[j]
j2len = {}
nothing = []
for i in xrange(alo, ahi):
# look at all instances of a[i] in b; note that because
# b2j has no junk keys, the loop is skipped if a[i] is junk
j2lenget = j2len.get
newj2len = {}
for j in b2j.get(a[i], nothing):
# a[i] matches b[j]
if j < blo:
continue
if j >= bhi:
break
k = newj2len[j] = j2lenget(j-1, 0) + 1
if k > bestsize:
besti, bestj, bestsize = i-k+1, j-k+1, k
j2len = newj2len
# Now that we have a wholly interesting match (albeit possibly
# empty!), we may as well suck up the matching junk on each
# side of it too. Can't think of a good reason not to, and it
# saves post-processing the (possibly considerable) expense of
# figuring out what to do with it. In the case of an empty
# interesting match, this is clearly the right thing to do,
# because no other kind of match is possible in the regions.
while besti > alo and bestj > blo and \
isbjunk(b[bestj-1]) and \
a[besti-1] == b[bestj-1]:
besti, bestj, bestsize = besti-1, bestj-1, bestsize+1
while besti+bestsize < ahi and bestj+bestsize < bhi and \
isbjunk(b[bestj+bestsize]) and \
a[besti+bestsize] == b[bestj+bestsize]:
bestsize = bestsize + 1
if TRACE:
print "get_matching_blocks", alo, ahi, blo, bhi
print " returns", besti, bestj, bestsize
return besti, bestj, bestsize
def get_matching_blocks(self):
"""Return list of triples describing matching subsequences.
Each triple is of the form (i, j, n), and means that
a[i:i+n] == b[j:j+n]. The triples are monotonically increasing in
i and in j.
The last triple is a dummy, (len(a), len(b), 0), and is the only
triple with n==0.
>>> s = SequenceMatcher(None, "abxcd", "abcd")
>>> s.get_matching_blocks()
[(0, 0, 2), (3, 2, 2), (5, 4, 0)]
"""
if self.matching_blocks is not None:
return self.matching_blocks
self.matching_blocks = []
la, lb = len(self.a), len(self.b)
self.__helper(0, la, 0, lb, self.matching_blocks)
self.matching_blocks.append( (la, lb, 0) )
if TRACE:
print '*** matching blocks', self.matching_blocks
return self.matching_blocks
# builds list of matching blocks covering a[alo:ahi] and
# b[blo:bhi], appending them in increasing order to answer
def __helper(self, alo, ahi, blo, bhi, answer):
i, j, k = x = self.find_longest_match(alo, ahi, blo, bhi)
# a[alo:i] vs b[blo:j] unknown
# a[i:i+k] same as b[j:j+k]
# a[i+k:ahi] vs b[j+k:bhi] unknown
if k:
if alo < i and blo < j:
self.__helper(alo, i, blo, j, answer)
answer.append(x)
if i+k < ahi and j+k < bhi:
self.__helper(i+k, ahi, j+k, bhi, answer)
def get_opcodes(self):
"""Return list of 5-tuples describing how to turn a into b.
Each tuple is of the form (tag, i1, i2, j1, j2). The first tuple
has i1 == j1 == 0, and remaining tuples have i1 == the i2 from the
tuple preceding it, and likewise for j1 == the previous j2.
The tags are strings, with these meanings:
'replace': a[i1:i2] should be replaced by b[j1:j2]
'delete': a[i1:i2] should be deleted.
Note that j1==j2 in this case.
'insert': b[j1:j2] should be inserted at a[i1:i1].
Note that i1==i2 in this case.
'equal': a[i1:i2] == b[j1:j2]
>>> a = "qabxcd"
>>> b = "abycdf"
>>> s = SequenceMatcher(None, a, b)
>>> for tag, i1, i2, j1, j2 in s.get_opcodes():
... print ("%7s a[%d:%d] (%s) b[%d:%d] (%s)" %
... (tag, i1, i2, a[i1:i2], j1, j2, b[j1:j2]))
delete a[0:1] (q) b[0:0] ()
equal a[1:3] (ab) b[0:2] (ab)
replace a[3:4] (x) b[2:3] (y)
equal a[4:6] (cd) b[3:5] (cd)
insert a[6:6] () b[5:6] (f)
"""
if self.opcodes is not None:
return self.opcodes
i = j = 0
self.opcodes = answer = []
for ai, bj, size in self.get_matching_blocks():
# invariant: we've pumped out correct diffs to change
# a[:i] into b[:j], and the next matching block is
# a[ai:ai+size] == b[bj:bj+size]. So we need to pump
# out a diff to change a[i:ai] into b[j:bj], pump out
# the matching block, and move (i,j) beyond the match
tag = ''
if i < ai and j < bj:
tag = 'replace'
elif i < ai:
tag = 'delete'
elif j < bj:
tag = 'insert'
if tag:
answer.append( (tag, i, ai, j, bj) )
i, j = ai+size, bj+size
# the list of matching blocks is terminated by a
# sentinel with size 0
if size:
answer.append( ('equal', ai, i, bj, j) )
return answer
def ratio(self):
"""Return a measure of the sequences' similarity (float in [0,1]).
Where T is the total number of elements in both sequences, and
M is the number of matches, this is 2,0*M / T.
Note that this is 1 if the sequences are identical, and 0 if
they have nothing in common.
.ratio() is expensive to compute if you haven't already computed
.get_matching_blocks() or .get_opcodes(), in which case you may
want to try .quick_ratio() or .real_quick_ratio() first to get an
upper bound.
>>> s = SequenceMatcher(None, "abcd", "bcde")
>>> s.ratio()
0.75
>>> s.quick_ratio()
0.75
>>> s.real_quick_ratio()
1.0
"""
matches = reduce(lambda sum, triple: sum + triple[-1],
self.get_matching_blocks(), 0)
return 2.0 * matches / (len(self.a) + len(self.b))
def quick_ratio(self):
"""Return an upper bound on ratio() relatively quickly.
This isn't defined beyond that it is an upper bound on .ratio(), and
is faster to compute.
"""
# viewing a and b as multisets, set matches to the cardinality
# of their intersection; this counts the number of matches
# without regard to order, so is clearly an upper bound
if self.fullbcount is None:
self.fullbcount = fullbcount = {}
for elt in self.b:
fullbcount[elt] = fullbcount.get(elt, 0) + 1
fullbcount = self.fullbcount
# avail[x] is the number of times x appears in 'b' less the
# number of times we've seen it in 'a' so far ... kinda
avail = {}
availhas, matches = avail.has_key, 0
for elt in self.a:
if availhas(elt):
numb = avail[elt]
else:
numb = fullbcount.get(elt, 0)
avail[elt] = numb - 1
if numb > 0:
matches = matches + 1
return 2.0 * matches / (len(self.a) + len(self.b))
def real_quick_ratio(self):
"""Return an upper bound on ratio() very quickly.
This isn't defined beyond that it is an upper bound on .ratio(), and
is faster to compute than either .ratio() or .quick_ratio().
"""
la, lb = len(self.a), len(self.b)
# can't have more matches than the number of elements in the
# shorter sequence
return 2.0 * min(la, lb) / (la + lb)
def get_close_matches(word, possibilities, n=3, cutoff=0.6):
"""Use SequenceMatcher to return list of the best "good enough" matches.
word is a sequence for which close matches are desired (typically a
string).
possibilities is a list of sequences against which to match word
(typically a list of strings).
Optional arg n (default 3) is the maximum number of close matches to
return. n must be > 0.
Optional arg cutoff (default 0.6) is a float in [0, 1]. Possibilities
that don't score at least that similar to word are ignored.
The best (no more than n) matches among the possibilities are returned
in a list, sorted by similarity score, most similar first.
>>> get_close_matches("appel", ["ape", "apple", "peach", "puppy"])
['apple', 'ape']
>>> import keyword
>>> get_close_matches("wheel", keyword.kwlist)
['while']
>>> get_close_matches("apple", keyword.kwlist)
[]
>>> get_close_matches("accept", keyword.kwlist)
['except']
"""
if not n > 0:
raise ValueError("n must be > 0: " + `n`)
if not 0.0 <= cutoff <= 1.0:
raise ValueError("cutoff must be in [0.0, 1.0]: " + `cutoff`)
result = []
s = SequenceMatcher()
s.set_seq2(word)
for x in possibilities:
s.set_seq1(x)
if s.real_quick_ratio() >= cutoff and \
s.quick_ratio() >= cutoff and \
s.ratio() >= cutoff:
result.append((s.ratio(), x))
# Sort by score.
result.sort()
# Retain only the best n.
result = result[-n:]
# Move best-scorer to head of list.
result.reverse()
# Strip scores.
return [x for score, x in result]
def _test():
import doctest, difflib
return doctest.testmod(difflib)
if __name__ == "__main__":
_test()

+ 0
- 44
lib/jython/Lib/dircache.py View File

@@ -1,44 +0,0 @@
"""Read and cache directory listings.
The listdir() routine returns a sorted list of the files in a directory,
using a cache to avoid reading the directory more often than necessary.
The annotate() routine appends slashes to directories."""
import os
__all__ = ["listdir", "opendir", "annotate", "reset"]
cache = {}
def reset():
"""Reset the cache completely."""
global cache
cache = {}
def listdir(path):
"""List directory contents, using cache."""
try:
cached_mtime, list = cache[path]
del cache[path]
except KeyError:
cached_mtime, list = -1, []
try:
mtime = os.stat(path)[8]
except os.error:
return []
if mtime != cached_mtime:
try:
list = os.listdir(path)
except os.error:
return []
list.sort()
cache[path] = mtime, list
return list
opendir = listdir # XXX backward compatibility
def annotate(head, list):
"""Add '/' suffixes to directories."""
for i in range(len(list)):
if os.path.isdir(os.path.join(head, list[i])):
list[i] = list[i] + '/'

+ 0
- 1118
lib/jython/Lib/doctest.py
File diff suppressed because it is too large
View File


+ 0
- 332
lib/jython/Lib/dospath.py View File

@@ -1,332 +0,0 @@
"""Common operations on DOS pathnames."""
import os
import stat
__all__ = ["normcase","isabs","join","splitdrive","split","splitext",
"basename","dirname","commonprefix","getsize","getmtime",
"getatime","islink","exists","isdir","isfile","ismount",
"walk","expanduser","expandvars","normpath","abspath"]
def normcase(s):
"""Normalize the case of a pathname.
On MS-DOS it maps the pathname to lowercase, turns slashes into
backslashes.
Other normalizations (such as optimizing '../' away) are not allowed
(this is done by normpath).
Previously, this version mapped invalid consecutive characters to a
single '_', but this has been removed. This functionality should
possibly be added as a new function."""
return s.replace("/", "\\").lower()
def isabs(s):
"""Return whether a path is absolute.
Trivial in Posix, harder on the Mac or MS-DOS.
For DOS it is absolute if it starts with a slash or backslash (current
volume), or if a pathname after the volume letter and colon starts with
a slash or backslash."""
s = splitdrive(s)[1]
return s != '' and s[:1] in '/\\'
def join(a, *p):
"""Join two (or more) paths."""
path = a
for b in p:
if isabs(b):
path = b
elif path == '' or path[-1:] in '/\\:':
path = path + b
else:
path = path + "\\" + b
return path
def splitdrive(p):
"""Split a path into a drive specification (a drive letter followed
by a colon) and path specification.
It is always true that drivespec + pathspec == p."""
if p[1:2] == ':':
return p[0:2], p[2:]
return '', p
def split(p):
"""Split a path into head (everything up to the last '/') and tail
(the rest). After the trailing '/' is stripped, the invariant
join(head, tail) == p holds.
The resulting head won't end in '/' unless it is the root."""
d, p = splitdrive(p)
# set i to index beyond p's last slash
i = len(p)
while i and p[i-1] not in '/\\':
i = i - 1
head, tail = p[:i], p[i:] # now tail has no slashes
# remove trailing slashes from head, unless it's all slashes
head2 = head
while head2 and head2[-1] in '/\\':
head2 = head2[:-1]
head = head2 or head
return d + head, tail
def splitext(p):
"""Split a path into root and extension.
The extension is everything starting at the first dot in the last
pathname component; the root is everything before that.
It is always true that root + ext == p."""
root, ext = '', ''
for c in p:
if c in '/\\':
root, ext = root + ext + c, ''
elif c == '.' or ext:
ext = ext + c
else:
root = root + c
return root, ext
def basename(p):
"""Return the tail (basename) part of a path."""
return split(p)[1]
def dirname(p):
"""Return the head (dirname) part of a path."""
return split(p)[0]
def commonprefix(m):
"""Return the longest prefix of all list elements."""
if not m: return ''
prefix = m[0]
for item in m:
for i in range(len(prefix)):
if prefix[:i+1] != item[:i+1]:
prefix = prefix[:i]
if i == 0: return ''
break
return prefix
# Get size, mtime, atime of files.
def getsize(filename):
"""Return the size of a file, reported by os.stat()."""
st = os.stat(filename)
return st[stat.ST_SIZE]
def getmtime(filename):
"""Return the last modification time of a file, reported by os.stat()."""
st = os.stat(filename)
return st[stat.ST_MTIME]
def getatime(filename):
"""Return the last access time of a file, reported by os.stat()."""
st = os.stat(filename)
return st[stat.ST_ATIME]
def islink(path):
"""Is a path a symbolic link?
This will always return false on systems where posix.lstat doesn't exist."""
return 0
def exists(path):
"""Does a path exist?
This is false for dangling symbolic links."""
try:
st = os.stat(path)
except os.error:
return 0
return 1
def isdir(path):
"""Is a path a dos directory?"""
try:
st = os.stat(path)
except os.error:
return 0
return stat.S_ISDIR(st[stat.ST_MODE])
def isfile(path):
"""Is a path a regular file?"""
try:
st = os.stat(path)
except os.error:
return 0
return stat.S_ISREG(st[stat.ST_MODE])
def ismount(path):
"""Is a path a mount point?"""
# XXX This degenerates in: 'is this the root?' on DOS
return isabs(splitdrive(path)[1])
def walk(top, func, arg):
"""Directory tree walk.
For each directory under top (including top itself, but excluding
'.' and '..'), func(arg, dirname, filenames) is called, where
dirname is the name of the directory and filenames is the list
files files (and subdirectories etc.) in the directory.
The func may modify the filenames list, to implement a filter,
or to impose a different order of visiting."""
try:
names = os.listdir(top)
except os.error:
return
func(arg, top, names)
exceptions = ('.', '..')
for name in names:
if name not in exceptions:
name = join(top, name)
if isdir(name):
walk(name, func, arg)
def expanduser(path):
"""Expand paths beginning with '~' or '~user'.
'~' means $HOME; '~user' means that user's home directory.
If the path doesn't begin with '~', or if the user or $HOME is unknown,
the path is returned unchanged (leaving error reporting to whatever
function is called with the expanded path as argument).
See also module 'glob' for expansion of *, ? and [...] in pathnames.
(A function should also be defined to do full *sh-style environment
variable expansion.)"""
if path[:1] != '~':
return path
i, n = 1, len(path)
while i < n and path[i] not in '/\\':
i = i+1
if i == 1:
if not os.environ.has_key('HOME'):
return path
userhome = os.environ['HOME']
else:
return path
return userhome + path[i:]
def expandvars(path):
"""Expand paths containing shell variable substitutions.
The following rules apply:
- no expansion within single quotes
- no escape character, except for '$$' which is translated into '$'
- ${varname} is accepted.
- varnames can be made out of letters, digits and the character '_'"""
# XXX With COMMAND.COM you can use any characters in a variable name,
# XXX except '^|<>='.
if '$' not in path:
return path
import string
varchars = string.letters + string.digits + '_-'
res = ''
index = 0
pathlen = len(path)
while index < pathlen:
c = path[index]
if c == '\'': # no expansion within single quotes
path = path[index + 1:]
pathlen = len(path)
try:
index = path.index('\'')
res = res + '\'' + path[:index + 1]
except ValueError:
res = res + path
index = pathlen -1
elif c == '$': # variable or '$$'
if path[index + 1:index + 2] == '$':
res = res + c
index = index + 1
elif path[index + 1:index + 2] == '{':
path = path[index+2:]
pathlen = len(path)
try:
index = path.index('}')
var = path[:index]
if os.environ.has_key(var):
res = res + os.environ[var]
except ValueError:
res = res + path
index = pathlen - 1
else:
var = ''
index = index + 1
c = path[index:index + 1]
while c != '' and c in varchars:
var = var + c
index = index + 1
c = path[index:index + 1]
if os.environ.has_key(var):
res = res + os.environ[var]
if c != '':
res = res + c
else:
res = res + c
index = index + 1
return res
def normpath(path):
"""Normalize a path, e.g. A//B, A/./B and A/foo/../B all become A/B.
Also, components of the path are silently truncated to 8+3 notation."""
path = path.replace("/", "\\")
prefix, path = splitdrive(path)
while path[:1] == "\\":
prefix = prefix + "\\"
path = path[1:]
comps = path.split("\\")
i = 0
while i < len(comps):
if comps[i] == '.':
del comps[i]
elif comps[i] == '..' and i > 0 and \
comps[i-1] not in ('', '..'):
del comps[i-1:i+1]
i = i - 1
elif comps[i] == '' and i > 0 and comps[i-1] != '':
del comps[i]
elif '.' in comps[i]:
comp = comps[i].split('.')
comps[i] = comp[0][:8] + '.' + comp[1][:3]
i = i + 1
elif len(comps[i]) > 8:
comps[i] = comps[i][:8]
i = i + 1
else:
i = i + 1
# If the path is now empty, substitute '.'
if not prefix and not comps:
comps.append('.')
return prefix + "\\".join(comps)
def abspath(path):
"""Return an absolute path."""
if not isabs(path):
path = join(os.getcwd(), path)
return normpath(path)

+ 0
- 148
lib/jython/Lib/dumbdbm.py View File

@@ -1,148 +0,0 @@
"""A dumb and slow but simple dbm clone.
For database spam, spam.dir contains the index (a text file),
spam.bak *may* contain a backup of the index (also a text file),
while spam.dat contains the data (a binary file).
XXX TO DO:
- seems to contain a bug when updating...
- reclaim free space (currently, space once occupied by deleted or expanded
items is never reused)
- support concurrent access (currently, if two processes take turns making
updates, they can mess up the index)
- support efficient access to large databases (currently, the whole index
is read when the database is opened, and some updates rewrite the whole index)
- support opening for read-only (flag = 'm')
"""
_os = __import__('os')
import __builtin__
_open = __builtin__.open
_BLOCKSIZE = 512
error = IOError # For anydbm
class _Database:
def __init__(self, file):
if _os.sep == '.':
endsep = '/'
else:
endsep = '.'
self._dirfile = file + endsep + 'dir'
self._datfile = file + endsep + 'dat'
self._bakfile = file + endsep + 'bak'
# Mod by Jack: create data file if needed
try:
f = _open(self._datfile, 'r')
except IOError:
f = _open(self._datfile, 'w')
f.close()
self._update()
def _update(self):
self._index = {}
try:
f = _open(self._dirfile)
except IOError:
pass
else:
while 1:
line = f.readline().rstrip()
if not line: break
key, (pos, siz) = eval(line)
self._index[key] = (pos, siz)
f.close()
def _commit(self):
try: _os.unlink(self._bakfile)
except _os.error: pass
try: _os.rename(self._dirfile, self._bakfile)
except _os.error: pass
f = _open(self._dirfile, 'w')
for key, (pos, siz) in self._index.items():
f.write("%s, (%s, %s)\n" % (`key`, `pos`, `siz`))
f.close()
def __getitem__(self, key):
pos, siz = self._index[key] # may raise KeyError
f = _open(self._datfile, 'rb')
f.seek(pos)
dat = f.read(siz)
f.close()
return dat
def _addval(self, val):
f = _open(self._datfile, 'rb+')
f.seek(0, 2)
pos = int(f.tell())
## Does not work under MW compiler
## pos = ((pos + _BLOCKSIZE - 1) / _BLOCKSIZE) * _BLOCKSIZE
## f.seek(pos)
npos = ((pos + _BLOCKSIZE - 1) / _BLOCKSIZE) * _BLOCKSIZE
f.write('\0'*(npos-pos))
pos = npos
f.write(val)
f.close()
return (pos, len(val))
def _setval(self, pos, val):
f = _open(self._datfile, 'rb+')
f.seek(pos)
f.write(val)
f.close()
return (pos, len(val))
def _addkey(self, key, (pos, siz)):
self._index[key] = (pos, siz)
f = _open(self._dirfile, 'a')
f.write("%s, (%s, %s)\n" % (`key`, `pos`, `siz`))
f.close()
def __setitem__(self, key, val):
if not type(key) == type('') == type(val):
raise TypeError, "keys and values must be strings"
if not self._index.has_key(key):
(pos, siz) = self._addval(val)
self._addkey(key, (pos, siz))
else:
pos, siz = self._index[key]
oldblocks = (siz + _BLOCKSIZE - 1) / _BLOCKSIZE
newblocks = (len(val) + _BLOCKSIZE - 1) / _BLOCKSIZE
if newblocks <= oldblocks:
pos, siz = self._setval(pos, val)
self._index[key] = pos, siz
else:
pos, siz = self._addval(val)
self._index[key] = pos, siz
def __delitem__(self, key):
del self._index[key]
self._commit()
def keys(self):
return self._index.keys()
def has_key(self, key):
return self._index.has_key(key)
def __len__(self):
return len(self._index)
def close(self):
self._index = None
self._datfile = self._dirfile = self._bakfile = None
def open(file, flag = None, mode = None):
# flag, mode arguments are currently ignored
return _Database(file)

+ 0
- 86
lib/jython/Lib/encodings/__init__.py View File

@@ -1,86 +0,0 @@
""" Standard "encodings" Package
Standard Python encoding modules are stored in this package
directory.
Codec modules must have names corresponding to standard lower-case
encoding names with hyphens mapped to underscores, e.g. 'utf-8' is
implemented by the module 'utf_8.py'.
Each codec module must export the following interface:
* getregentry() -> (encoder, decoder, stream_reader, stream_writer)
The getregentry() API must return callable objects which adhere to
the Python Codec Interface Standard.
In addition, a module may optionally also define the following
APIs which are then used by the package's codec search function:
* getaliases() -> sequence of encoding name strings to use as aliases
Alias names returned by getaliases() must be standard encoding
names as defined above (lower-case, hyphens converted to
underscores).
Written by Marc-Andre Lemburg (mal@lemburg.com).
(c) Copyright CNRI, All Rights Reserved. NO WARRANTY.
"""#"
import codecs,aliases
_cache = {}
_unknown = '--unknown--'
def search_function(encoding):
# Cache lookup
entry = _cache.get(encoding,_unknown)
if entry is not _unknown:
return entry
# Import the module
modname = encoding.replace('-', '_')
modname = aliases.aliases.get(modname,modname)
try:
mod = __import__(modname,globals(),locals(),'*')
except ImportError,why:
# cache misses
_cache[encoding] = None
return None
# Now ask the module for the registry entry
try:
entry = tuple(mod.getregentry())
except AttributeError:
entry = ()
if len(entry) != 4:
raise SystemError,\
'module "%s.%s" failed to register' % \
(__name__,modname)
for obj in entry:
if not callable(obj):
raise SystemError,\
'incompatible codecs in module "%s.%s"' % \
(__name__,modname)
# Cache the codec registry entry
_cache[encoding] = entry
# Register its aliases (without overwriting previously registered
# aliases)
try:
codecaliases = mod.getaliases()
except AttributeError:
pass
else:
for alias in codecaliases:
if not aliases.aliases.has_key(alias):
aliases.aliases[alias] = modname
# Return the registry entry
return entry
# Register the search_function in the Python codec registry
codecs.register(search_function)

+ 0
- 82
lib/jython/Lib/encodings/aliases.py View File

@@ -1,82 +0,0 @@
""" Encoding Aliases Support
This module is used by the encodings package search function to
map encodings names to module names.
Note that the search function converts the encoding names to lower
case and replaces hyphens with underscores *before* performing the
lookup.
"""
aliases = {
# Latin-1
'latin': 'latin_1',
'latin1': 'latin_1',
# UTF-8
'utf': 'utf_8',
'utf8': 'utf_8',
'u8': 'utf_8',
'utf8@ucs2': 'utf_8',
'utf8@ucs4': 'utf_8',
# UTF-16
'utf16': 'utf_16',
'u16': 'utf_16',
'utf_16be': 'utf_16_be',
'utf_16le': 'utf_16_le',
'unicodebigunmarked': 'utf_16_be',
'unicodelittleunmarked': 'utf_16_le',
# ASCII
'us_ascii': 'ascii',
# ISO
'8859': 'latin_1',
'iso8859': 'latin_1',
'iso8859_1': 'latin_1',
'iso_8859_1': 'latin_1',
'iso_8859_10': 'iso8859_10',
'iso_8859_13': 'iso8859_13',
'iso_8859_14': 'iso8859_14',
'iso_8859_15': 'iso8859_15',
'iso_8859_2': 'iso8859_2',
'iso_8859_3': 'iso8859_3',
'iso_8859_4': 'iso8859_4',
'iso_8859_5': 'iso8859_5',
'iso_8859_6': 'iso8859_6',
'iso_8859_7': 'iso8859_7',
'iso_8859_8': 'iso8859_8',
'iso_8859_9': 'iso8859_9',
# Mac
'maclatin2': 'mac_latin2',
'maccentraleurope': 'mac_latin2',
'maccyrillic': 'mac_cyrillic',
'macgreek': 'mac_greek',
'maciceland': 'mac_iceland',
'macroman': 'mac_roman',
'macturkish': 'mac_turkish',
# MBCS
'dbcs': 'mbcs',
# Code pages
'437': 'cp437',
# CJK
#
# The codecs for these encodings are not distributed with the
# Python core, but are included here for reference, since the
# locale module relies on having these aliases available.
#
'jis_7': 'jis_7',
'iso_2022_jp': 'jis_7',
'ujis': 'euc_jp',
'ajec': 'euc_jp',
'eucjp': 'euc_jp',
'tis260': 'tactis',
'sjis': 'shift_jis',
}

+ 0
- 35
lib/jython/Lib/encodings/ascii.py View File

@@ -1,35 +0,0 @@
""" Python 'ascii' Codec
Written by Marc-Andre Lemburg (mal@lemburg.com).
(c) Copyright CNRI, All Rights Reserved. NO WARRANTY.
"""
import codecs
### Codec APIs
class Codec(codecs.Codec):
# Note: Binding these as C functions will result in the class not
# converting them to methods. This is intended.
encode = codecs.ascii_encode
decode = codecs.ascii_decode
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
class StreamConverter(StreamWriter,StreamReader):
encode = codecs.ascii_decode
decode = codecs.ascii_encode
### encodings module API
def getregentry():
return (Codec.encode,Codec.decode,StreamReader,StreamWriter)

+ 0
- 51
lib/jython/Lib/encodings/charmap.py View File

@@ -1,51 +0,0 @@
""" Generic Python Character Mapping Codec.
Use this codec directly rather than through the automatic
conversion mechanisms supplied by unicode() and .encode().
Written by Marc-Andre Lemburg (mal@lemburg.com).
(c) Copyright CNRI, All Rights Reserved. NO WARRANTY.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
# Note: Binding these as C functions will result in the class not
# converting them to methods. This is intended.
encode = codecs.charmap_encode
decode = codecs.charmap_decode
class StreamWriter(Codec,codecs.StreamWriter):
def __init__(self,stream,errors='strict',mapping=None):
codecs.StreamWriter.__init__(self,strict,errors)
self.mapping = mapping
def encode(self,input,errors='strict'):
return Codec.encode(input,errors,self.mapping)
class StreamReader(Codec,codecs.StreamReader):
def __init__(self,stream,errors='strict',mapping=None):
codecs.StreamReader.__init__(self,strict,errors)
self.mapping = mapping
def decode(self,input,errors='strict'):
return Codec.decode(input,errors,self.mapping)
### encodings module API
def getregentry():
return (Codec.encode,Codec.decode,StreamReader,StreamWriter)

+ 0
- 282
lib/jython/Lib/encodings/cp037.py View File

@@ -1,282 +0,0 @@
""" Python Character Mapping Codec generated from 'CP037.TXT' with gencodec.py.
Written by Marc-Andre Lemburg (mal@lemburg.com).
(c) Copyright CNRI, All Rights Reserved. NO WARRANTY.
(c) Copyright 2000 Guido van Rossum.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_map)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_map)
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return (Codec().encode,Codec().decode,StreamReader,StreamWriter)
### Decoding Map
decoding_map = codecs.make_identity_dict(range(256))
decoding_map.update({
0x0004: 0x009c, # CONTROL
0x0005: 0x0009, # HORIZONTAL TABULATION
0x0006: 0x0086, # CONTROL
0x0007: 0x007f, # DELETE
0x0008: 0x0097, # CONTROL
0x0009: 0x008d, # CONTROL
0x000a: 0x008e, # CONTROL
0x0014: 0x009d, # CONTROL
0x0015: 0x0085, # CONTROL
0x0016: 0x0008, # BACKSPACE
0x0017: 0x0087, # CONTROL
0x001a: 0x0092, # CONTROL
0x001b: 0x008f, # CONTROL
0x0020: 0x0080, # CONTROL
0x0021: 0x0081, # CONTROL
0x0022: 0x0082, # CONTROL
0x0023: 0x0083, # CONTROL
0x0024: 0x0084, # CONTROL
0x0025: 0x000a, # LINE FEED
0x0026: 0x0017, # END OF TRANSMISSION BLOCK
0x0027: 0x001b, # ESCAPE
0x0028: 0x0088, # CONTROL
0x0029: 0x0089, # CONTROL
0x002a: 0x008a, # CONTROL
0x002b: 0x008b, # CONTROL
0x002c: 0x008c, # CONTROL
0x002d: 0x0005, # ENQUIRY
0x002e: 0x0006, # ACKNOWLEDGE
0x002f: 0x0007, # BELL
0x0030: 0x0090, # CONTROL
0x0031: 0x0091, # CONTROL
0x0032: 0x0016, # SYNCHRONOUS IDLE
0x0033: 0x0093, # CONTROL
0x0034: 0x0094, # CONTROL
0x0035: 0x0095, # CONTROL
0x0036: 0x0096, # CONTROL
0x0037: 0x0004, # END OF TRANSMISSION
0x0038: 0x0098, # CONTROL
0x0039: 0x0099, # CONTROL
0x003a: 0x009a, # CONTROL
0x003b: 0x009b, # CONTROL
0x003c: 0x0014, # DEVICE CONTROL FOUR
0x003d: 0x0015, # NEGATIVE ACKNOWLEDGE
0x003e: 0x009e, # CONTROL
0x003f: 0x001a, # SUBSTITUTE
0x0040: 0x0020, # SPACE
0x0041: 0x00a0, # NO-BREAK SPACE
0x0042: 0x00e2, # LATIN SMALL LETTER A WITH CIRCUMFLEX
0x0043: 0x00e4, # LATIN SMALL LETTER A WITH DIAERESIS
0x0044: 0x00e0, # LATIN SMALL LETTER A WITH GRAVE
0x0045: 0x00e1, # LATIN SMALL LETTER A WITH ACUTE
0x0046: 0x00e3, # LATIN SMALL LETTER A WITH TILDE
0x0047: 0x00e5, # LATIN SMALL LETTER A WITH RING ABOVE
0x0048: 0x00e7, # LATIN SMALL LETTER C WITH CEDILLA
0x0049: 0x00f1, # LATIN SMALL LETTER N WITH TILDE
0x004a: 0x00a2, # CENT SIGN
0x004b: 0x002e, # FULL STOP
0x004c: 0x003c, # LESS-THAN SIGN
0x004d: 0x0028, # LEFT PARENTHESIS
0x004e: 0x002b, # PLUS SIGN
0x004f: 0x007c, # VERTICAL LINE
0x0050: 0x0026, # AMPERSAND
0x0051: 0x00e9, # LATIN SMALL LETTER E WITH ACUTE
0x0052: 0x00ea, # LATIN SMALL LETTER E WITH CIRCUMFLEX
0x0053: 0x00eb, # LATIN SMALL LETTER E WITH DIAERESIS
0x0054: 0x00e8, # LATIN SMALL LETTER E WITH GRAVE
0x0055: 0x00ed, # LATIN SMALL LETTER I WITH ACUTE
0x0056: 0x00ee, # LATIN SMALL LETTER I WITH CIRCUMFLEX
0x0057: 0x00ef, # LATIN SMALL LETTER I WITH DIAERESIS
0x0058: 0x00ec, # LATIN SMALL LETTER I WITH GRAVE
0x0059: 0x00df, # LATIN SMALL LETTER SHARP S (GERMAN)
0x005a: 0x0021, # EXCLAMATION MARK
0x005b: 0x0024, # DOLLAR SIGN
0x005c: 0x002a, # ASTERISK
0x005d: 0x0029, # RIGHT PARENTHESIS
0x005e: 0x003b, # SEMICOLON
0x005f: 0x00ac, # NOT SIGN
0x0060: 0x002d, # HYPHEN-MINUS
0x0061: 0x002f, # SOLIDUS
0x0062: 0x00c2, # LATIN CAPITAL LETTER A WITH CIRCUMFLEX
0x0063: 0x00c4, # LATIN CAPITAL LETTER A WITH DIAERESIS
0x0064: 0x00c0, # LATIN CAPITAL LETTER A WITH GRAVE
0x0065: 0x00c1, # LATIN CAPITAL LETTER A WITH ACUTE
0x0066: 0x00c3, # LATIN CAPITAL LETTER A WITH TILDE
0x0067: 0x00c5, # LATIN CAPITAL LETTER A WITH RING ABOVE
0x0068: 0x00c7, # LATIN CAPITAL LETTER C WITH CEDILLA
0x0069: 0x00d1, # LATIN CAPITAL LETTER N WITH TILDE
0x006a: 0x00a6, # BROKEN BAR
0x006b: 0x002c, # COMMA
0x006c: 0x0025, # PERCENT SIGN
0x006d: 0x005f, # LOW LINE
0x006e: 0x003e, # GREATER-THAN SIGN
0x006f: 0x003f, # QUESTION MARK
0x0070: 0x00f8, # LATIN SMALL LETTER O WITH STROKE
0x0071: 0x00c9, # LATIN CAPITAL LETTER E WITH ACUTE
0x0072: 0x00ca, # LATIN CAPITAL LETTER E WITH CIRCUMFLEX
0x0073: 0x00cb, # LATIN CAPITAL LETTER E WITH DIAERESIS
0x0074: 0x00c8, # LATIN CAPITAL LETTER E WITH GRAVE
0x0075: 0x00cd, # LATIN CAPITAL LETTER I WITH ACUTE
0x0076: 0x00ce, # LATIN CAPITAL LETTER I WITH CIRCUMFLEX
0x0077: 0x00cf, # LATIN CAPITAL LETTER I WITH DIAERESIS
0x0078: 0x00cc, # LATIN CAPITAL LETTER I WITH GRAVE
0x0079: 0x0060, # GRAVE ACCENT
0x007a: 0x003a, # COLON
0x007b: 0x0023, # NUMBER SIGN
0x007c: 0x0040, # COMMERCIAL AT
0x007d: 0x0027, # APOSTROPHE
0x007e: 0x003d, # EQUALS SIGN
0x007f: 0x0022, # QUOTATION MARK
0x0080: 0x00d8, # LATIN CAPITAL LETTER O WITH STROKE
0x0081: 0x0061, # LATIN SMALL LETTER A
0x0082: 0x0062, # LATIN SMALL LETTER B
0x0083: 0x0063, # LATIN SMALL LETTER C
0x0084: 0x0064, # LATIN SMALL LETTER D
0x0085: 0x0065, # LATIN SMALL LETTER E
0x0086: 0x0066, # LATIN SMALL LETTER F
0x0087: 0x0067, # LATIN SMALL LETTER G
0x0088: 0x0068, # LATIN SMALL LETTER H
0x0089: 0x0069, # LATIN SMALL LETTER I
0x008a: 0x00ab, # LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
0x008b: 0x00bb, # RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
0x008c: 0x00f0, # LATIN SMALL LETTER ETH (ICELANDIC)
0x008d: 0x00fd, # LATIN SMALL LETTER Y WITH ACUTE
0x008e: 0x00fe, # LATIN SMALL LETTER THORN (ICELANDIC)
0x008f: 0x00b1, # PLUS-MINUS SIGN
0x0090: 0x00b0, # DEGREE SIGN
0x0091: 0x006a, # LATIN SMALL LETTER J
0x0092: 0x006b, # LATIN SMALL LETTER K
0x0093: 0x006c, # LATIN SMALL LETTER L
0x0094: 0x006d, # LATIN SMALL LETTER M
0x0095: 0x006e, # LATIN SMALL LETTER N
0x0096: 0x006f, # LATIN SMALL LETTER O
0x0097: 0x0070, # LATIN SMALL LETTER P
0x0098: 0x0071, # LATIN SMALL LETTER Q
0x0099: 0x0072, # LATIN SMALL LETTER R
0x009a: 0x00aa, # FEMININE ORDINAL INDICATOR
0x009b: 0x00ba, # MASCULINE ORDINAL INDICATOR
0x009c: 0x00e6, # LATIN SMALL LIGATURE AE
0x009d: 0x00b8, # CEDILLA
0x009e: 0x00c6, # LATIN CAPITAL LIGATURE AE
0x009f: 0x00a4, # CURRENCY SIGN
0x00a0: 0x00b5, # MICRO SIGN
0x00a1: 0x007e, # TILDE
0x00a2: 0x0073, # LATIN SMALL LETTER S
0x00a3: 0x0074, # LATIN SMALL LETTER T
0x00a4: 0x0075, # LATIN SMALL LETTER U
0x00a5: 0x0076, # LATIN SMALL LETTER V
0x00a6: 0x0077, # LATIN SMALL LETTER W
0x00a7: 0x0078, # LATIN SMALL LETTER X
0x00a8: 0x0079, # LATIN SMALL LETTER Y
0x00a9: 0x007a, # LATIN SMALL LETTER Z
0x00aa: 0x00a1, # INVERTED EXCLAMATION MARK
0x00ab: 0x00bf, # INVERTED QUESTION MARK
0x00ac: 0x00d0, # LATIN CAPITAL LETTER ETH (ICELANDIC)
0x00ad: 0x00dd, # LATIN CAPITAL LETTER Y WITH ACUTE
0x00ae: 0x00de, # LATIN CAPITAL LETTER THORN (ICELANDIC)
0x00af: 0x00ae, # REGISTERED SIGN
0x00b0: 0x005e, # CIRCUMFLEX ACCENT
0x00b1: 0x00a3, # POUND SIGN
0x00b2: 0x00a5, # YEN SIGN
0x00b3: 0x00b7, # MIDDLE DOT
0x00b4: 0x00a9, # COPYRIGHT SIGN
0x00b5: 0x00a7, # SECTION SIGN
0x00b7: 0x00bc, # VULGAR FRACTION ONE QUARTER
0x00b8: 0x00bd, # VULGAR FRACTION ONE HALF
0x00b9: 0x00be, # VULGAR FRACTION THREE QUARTERS
0x00ba: 0x005b, # LEFT SQUARE BRACKET
0x00bb: 0x005d, # RIGHT SQUARE BRACKET
0x00bc: 0x00af, # MACRON
0x00bd: 0x00a8, # DIAERESIS
0x00be: 0x00b4, # ACUTE ACCENT
0x00bf: 0x00d7, # MULTIPLICATION SIGN
0x00c0: 0x007b, # LEFT CURLY BRACKET
0x00c1: 0x0041, # LATIN CAPITAL LETTER A
0x00c2: 0x0042, # LATIN CAPITAL LETTER B
0x00c3: 0x0043, # LATIN CAPITAL LETTER C
0x00c4: 0x0044, # LATIN CAPITAL LETTER D
0x00c5: 0x0045, # LATIN CAPITAL LETTER E
0x00c6: 0x0046, # LATIN CAPITAL LETTER F
0x00c7: 0x0047, # LATIN CAPITAL LETTER G
0x00c8: 0x0048, # LATIN CAPITAL LETTER H
0x00c9: 0x0049, # LATIN CAPITAL LETTER I
0x00ca: 0x00ad, # SOFT HYPHEN
0x00cb: 0x00f4, # LATIN SMALL LETTER O WITH CIRCUMFLEX
0x00cc: 0x00f6, # LATIN SMALL LETTER O WITH DIAERESIS
0x00cd: 0x00f2, # LATIN SMALL LETTER O WITH GRAVE
0x00ce: 0x00f3, # LATIN SMALL LETTER O WITH ACUTE
0x00cf: 0x00f5, # LATIN SMALL LETTER O WITH TILDE
0x00d0: 0x007d, # RIGHT CURLY BRACKET
0x00d1: 0x004a, # LATIN CAPITAL LETTER J
0x00d2: 0x004b, # LATIN CAPITAL LETTER K
0x00d3: 0x004c, # LATIN CAPITAL LETTER L
0x00d4: 0x004d, # LATIN CAPITAL LETTER M
0x00d5: 0x004e, # LATIN CAPITAL LETTER N
0x00d6: 0x004f, # LATIN CAPITAL LETTER O
0x00d7: 0x0050, # LATIN CAPITAL LETTER P
0x00d8: 0x0051, # LATIN CAPITAL LETTER Q
0x00d9: 0x0052, # LATIN CAPITAL LETTER R
0x00da: 0x00b9, # SUPERSCRIPT ONE
0x00db: 0x00fb, # LATIN SMALL LETTER U WITH CIRCUMFLEX
0x00dc: 0x00fc, # LATIN SMALL LETTER U WITH DIAERESIS
0x00dd: 0x00f9, # LATIN SMALL LETTER U WITH GRAVE
0x00de: 0x00fa, # LATIN SMALL LETTER U WITH ACUTE
0x00df: 0x00ff, # LATIN SMALL LETTER Y WITH DIAERESIS
0x00e0: 0x005c, # REVERSE SOLIDUS
0x00e1: 0x00f7, # DIVISION SIGN
0x00e2: 0x0053, # LATIN CAPITAL LETTER S
0x00e3: 0x0054, # LATIN CAPITAL LETTER T
0x00e4: 0x0055, # LATIN CAPITAL LETTER U
0x00e5: 0x0056, # LATIN CAPITAL LETTER V
0x00e6: 0x0057, # LATIN CAPITAL LETTER W
0x00e7: 0x0058, # LATIN CAPITAL LETTER X
0x00e8: 0x0059, # LATIN CAPITAL LETTER Y
0x00e9: 0x005a, # LATIN CAPITAL LETTER Z
0x00ea: 0x00b2, # SUPERSCRIPT TWO
0x00eb: 0x00d4, # LATIN CAPITAL LETTER O WITH CIRCUMFLEX
0x00ec: 0x00d6, # LATIN CAPITAL LETTER O WITH DIAERESIS
0x00ed: 0x00d2, # LATIN CAPITAL LETTER O WITH GRAVE
0x00ee: 0x00d3, # LATIN CAPITAL LETTER O WITH ACUTE
0x00ef: 0x00d5, # LATIN CAPITAL LETTER O WITH TILDE
0x00f0: 0x0030, # DIGIT ZERO
0x00f1: 0x0031, # DIGIT ONE
0x00f2: 0x0032, # DIGIT TWO
0x00f3: 0x0033, # DIGIT THREE
0x00f4: 0x0034, # DIGIT FOUR
0x00f5: 0x0035, # DIGIT FIVE
0x00f6: 0x0036, # DIGIT SIX
0x00f7: 0x0037, # DIGIT SEVEN
0x00f8: 0x0038, # DIGIT EIGHT
0x00f9: 0x0039, # DIGIT NINE
0x00fa: 0x00b3, # SUPERSCRIPT THREE
0x00fb: 0x00db, # LATIN CAPITAL LETTER U WITH CIRCUMFLEX
0x00fc: 0x00dc, # LATIN CAPITAL LETTER U WITH DIAERESIS
0x00fd: 0x00d9, # LATIN CAPITAL LETTER U WITH GRAVE
0x00fe: 0x00da, # LATIN CAPITAL LETTER U WITH ACUTE
0x00ff: 0x009f, # CONTROL
})
### Encoding Map
encoding_map = {}
for k,v in decoding_map.items():
encoding_map[v] = k

+ 0
- 140
lib/jython/Lib/encodings/cp1006.py View File

@@ -1,140 +0,0 @@
""" Python Character Mapping Codec generated from 'CP1006.TXT' with gencodec.py.
Written by Marc-Andre Lemburg (mal@lemburg.com).
(c) Copyright CNRI, All Rights Reserved. NO WARRANTY.
(c) Copyright 2000 Guido van Rossum.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_map)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_map)
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return (Codec().encode,Codec().decode,StreamReader,StreamWriter)
### Decoding Map
decoding_map = codecs.make_identity_dict(range(256))
decoding_map.update({
0x00a1: 0x06f0, # EXTENDED ARABIC-INDIC DIGIT ZERO
0x00a2: 0x06f1, # EXTENDED ARABIC-INDIC DIGIT ONE
0x00a3: 0x06f2, # EXTENDED ARABIC-INDIC DIGIT TWO
0x00a4: 0x06f3, # EXTENDED ARABIC-INDIC DIGIT THREE
0x00a5: 0x06f4, # EXTENDED ARABIC-INDIC DIGIT FOUR
0x00a6: 0x06f5, # EXTENDED ARABIC-INDIC DIGIT FIVE
0x00a7: 0x06f6, # EXTENDED ARABIC-INDIC DIGIT SIX
0x00a8: 0x06f7, # EXTENDED ARABIC-INDIC DIGIT SEVEN
0x00a9: 0x06f8, # EXTENDED ARABIC-INDIC DIGIT EIGHT
0x00aa: 0x06f9, # EXTENDED ARABIC-INDIC DIGIT NINE
0x00ab: 0x060c, # ARABIC COMMA
0x00ac: 0x061b, # ARABIC SEMICOLON
0x00ae: 0x061f, # ARABIC QUESTION MARK
0x00af: 0xfe81, # ARABIC LETTER ALEF WITH MADDA ABOVE ISOLATED FORM
0x00b0: 0xfe8d, # ARABIC LETTER ALEF ISOLATED FORM
0x00b1: 0xfe8e, # ARABIC LETTER ALEF FINAL FORM
0x00b2: 0xfe8e, # ARABIC LETTER ALEF FINAL FORM
0x00b3: 0xfe8f, # ARABIC LETTER BEH ISOLATED FORM
0x00b4: 0xfe91, # ARABIC LETTER BEH INITIAL FORM
0x00b5: 0xfb56, # ARABIC LETTER PEH ISOLATED FORM
0x00b6: 0xfb58, # ARABIC LETTER PEH INITIAL FORM
0x00b7: 0xfe93, # ARABIC LETTER TEH MARBUTA ISOLATED FORM
0x00b8: 0xfe95, # ARABIC LETTER TEH ISOLATED FORM
0x00b9: 0xfe97, # ARABIC LETTER TEH INITIAL FORM
0x00ba: 0xfb66, # ARABIC LETTER TTEH ISOLATED FORM
0x00bb: 0xfb68, # ARABIC LETTER TTEH INITIAL FORM
0x00bc: 0xfe99, # ARABIC LETTER THEH ISOLATED FORM
0x00bd: 0xfe9b, # ARABIC LETTER THEH INITIAL FORM
0x00be: 0xfe9d, # ARABIC LETTER JEEM ISOLATED FORM
0x00bf: 0xfe9f, # ARABIC LETTER JEEM INITIAL FORM
0x00c0: 0xfb7a, # ARABIC LETTER TCHEH ISOLATED FORM
0x00c1: 0xfb7c, # ARABIC LETTER TCHEH INITIAL FORM
0x00c2: 0xfea1, # ARABIC LETTER HAH ISOLATED FORM
0x00c3: 0xfea3, # ARABIC LETTER HAH INITIAL FORM
0x00c4: 0xfea5, # ARABIC LETTER KHAH ISOLATED FORM
0x00c5: 0xfea7, # ARABIC LETTER KHAH INITIAL FORM
0x00c6: 0xfea9, # ARABIC LETTER DAL ISOLATED FORM
0x00c7: 0xfb84, # ARABIC LETTER DAHAL ISOLATED FORMN
0x00c8: 0xfeab, # ARABIC LETTER THAL ISOLATED FORM
0x00c9: 0xfead, # ARABIC LETTER REH ISOLATED FORM
0x00ca: 0xfb8c, # ARABIC LETTER RREH ISOLATED FORM
0x00cb: 0xfeaf, # ARABIC LETTER ZAIN ISOLATED FORM
0x00cc: 0xfb8a, # ARABIC LETTER JEH ISOLATED FORM
0x00cd: 0xfeb1, # ARABIC LETTER SEEN ISOLATED FORM
0x00ce: 0xfeb3, # ARABIC LETTER SEEN INITIAL FORM
0x00cf: 0xfeb5, # ARABIC LETTER SHEEN ISOLATED FORM
0x00d0: 0xfeb7, # ARABIC LETTER SHEEN INITIAL FORM
0x00d1: 0xfeb9, # ARABIC LETTER SAD ISOLATED FORM
0x00d2: 0xfebb, # ARABIC LETTER SAD INITIAL FORM
0x00d3: 0xfebd, # ARABIC LETTER DAD ISOLATED FORM
0x00d4: 0xfebf, # ARABIC LETTER DAD INITIAL FORM
0x00d5: 0xfec1, # ARABIC LETTER TAH ISOLATED FORM
0x00d6: 0xfec5, # ARABIC LETTER ZAH ISOLATED FORM
0x00d7: 0xfec9, # ARABIC LETTER AIN ISOLATED FORM
0x00d8: 0xfeca, # ARABIC LETTER AIN FINAL FORM
0x00d9: 0xfecb, # ARABIC LETTER AIN INITIAL FORM
0x00da: 0xfecc, # ARABIC LETTER AIN MEDIAL FORM
0x00db: 0xfecd, # ARABIC LETTER GHAIN ISOLATED FORM
0x00dc: 0xfece, # ARABIC LETTER GHAIN FINAL FORM
0x00dd: 0xfecf, # ARABIC LETTER GHAIN INITIAL FORM
0x00de: 0xfed0, # ARABIC LETTER GHAIN MEDIAL FORM
0x00df: 0xfed1, # ARABIC LETTER FEH ISOLATED FORM
0x00e0: 0xfed3, # ARABIC LETTER FEH INITIAL FORM
0x00e1: 0xfed5, # ARABIC LETTER QAF ISOLATED FORM
0x00e2: 0xfed7, # ARABIC LETTER QAF INITIAL FORM
0x00e3: 0xfed9, # ARABIC LETTER KAF ISOLATED FORM
0x00e4: 0xfedb, # ARABIC LETTER KAF INITIAL FORM
0x00e5: 0xfb92, # ARABIC LETTER GAF ISOLATED FORM
0x00e6: 0xfb94, # ARABIC LETTER GAF INITIAL FORM
0x00e7: 0xfedd, # ARABIC LETTER LAM ISOLATED FORM
0x00e8: 0xfedf, # ARABIC LETTER LAM INITIAL FORM
0x00e9: 0xfee0, # ARABIC LETTER LAM MEDIAL FORM
0x00ea: 0xfee1, # ARABIC LETTER MEEM ISOLATED FORM
0x00eb: 0xfee3, # ARABIC LETTER MEEM INITIAL FORM
0x00ec: 0xfb9e, # ARABIC LETTER NOON GHUNNA ISOLATED FORM
0x00ed: 0xfee5, # ARABIC LETTER NOON ISOLATED FORM
0x00ee: 0xfee7, # ARABIC LETTER NOON INITIAL FORM
0x00ef: 0xfe85, # ARABIC LETTER WAW WITH HAMZA ABOVE ISOLATED FORM
0x00f0: 0xfeed, # ARABIC LETTER WAW ISOLATED FORM
0x00f1: 0xfba6, # ARABIC LETTER HEH GOAL ISOLATED FORM
0x00f2: 0xfba8, # ARABIC LETTER HEH GOAL INITIAL FORM
0x00f3: 0xfba9, # ARABIC LETTER HEH GOAL MEDIAL FORM
0x00f4: 0xfbaa, # ARABIC LETTER HEH DOACHASHMEE ISOLATED FORM
0x00f5: 0xfe80, # ARABIC LETTER HAMZA ISOLATED FORM
0x00f6: 0xfe89, # ARABIC LETTER YEH WITH HAMZA ABOVE ISOLATED FORM
0x00f7: 0xfe8a, # ARABIC LETTER YEH WITH HAMZA ABOVE FINAL FORM
0x00f8: 0xfe8b, # ARABIC LETTER YEH WITH HAMZA ABOVE INITIAL FORM
0x00f9: 0xfef1, # ARABIC LETTER YEH ISOLATED FORM
0x00fa: 0xfef2, # ARABIC LETTER YEH FINAL FORM
0x00fb: 0xfef3, # ARABIC LETTER YEH INITIAL FORM
0x00fc: 0xfbb0, # ARABIC LETTER YEH BARREE WITH HAMZA ABOVE ISOLATED FORM
0x00fd: 0xfbae, # ARABIC LETTER YEH BARREE ISOLATED FORM
0x00fe: 0xfe7c, # ARABIC SHADDA ISOLATED FORM
0x00ff: 0xfe7d, # ARABIC SHADDA MEDIAL FORM
})
### Encoding Map
encoding_map = {}
for k,v in decoding_map.items():
encoding_map[v] = k

+ 0
- 282
lib/jython/Lib/encodings/cp1026.py View File

@@ -1,282 +0,0 @@
""" Python Character Mapping Codec generated from 'CP1026.TXT' with gencodec.py.
Written by Marc-Andre Lemburg (mal@lemburg.com).
(c) Copyright CNRI, All Rights Reserved. NO WARRANTY.
(c) Copyright 2000 Guido van Rossum.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_map)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_map)
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return (Codec().encode,Codec().decode,StreamReader,StreamWriter)
### Decoding Map
decoding_map = codecs.make_identity_dict(range(256))
decoding_map.update({
0x0004: 0x009c, # CONTROL
0x0005: 0x0009, # HORIZONTAL TABULATION
0x0006: 0x0086, # CONTROL
0x0007: 0x007f, # DELETE
0x0008: 0x0097, # CONTROL
0x0009: 0x008d, # CONTROL
0x000a: 0x008e, # CONTROL
0x0014: 0x009d, # CONTROL
0x0015: 0x0085, # CONTROL
0x0016: 0x0008, # BACKSPACE
0x0017: 0x0087, # CONTROL
0x001a: 0x0092, # CONTROL
0x001b: 0x008f, # CONTROL
0x0020: 0x0080, # CONTROL
0x0021: 0x0081, # CONTROL
0x0022: 0x0082, # CONTROL
0x0023: 0x0083, # CONTROL
0x0024: 0x0084, # CONTROL
0x0025: 0x000a, # LINE FEED
0x0026: 0x0017, # END OF TRANSMISSION BLOCK
0x0027: 0x001b, # ESCAPE
0x0028: 0x0088, # CONTROL
0x0029: 0x0089, # CONTROL
0x002a: 0x008a, # CONTROL
0x002b: 0x008b, # CONTROL
0x002c: 0x008c, # CONTROL
0x002d: 0x0005, # ENQUIRY
0x002e: 0x0006, # ACKNOWLEDGE
0x002f: 0x0007, # BELL
0x0030: 0x0090, # CONTROL
0x0031: 0x0091, # CONTROL
0x0032: 0x0016, # SYNCHRONOUS IDLE
0x0033: 0x0093, # CONTROL
0x0034: 0x0094, # CONTROL
0x0035: 0x0095, # CONTROL
0x0036: 0x0096, # CONTROL
0x0037: 0x0004, # END OF TRANSMISSION
0x0038: 0x0098, # CONTROL
0x0039: 0x0099, # CONTROL
0x003a: 0x009a, # CONTROL
0x003b: 0x009b, # CONTROL
0x003c: 0x0014, # DEVICE CONTROL FOUR
0x003d: 0x0015, # NEGATIVE ACKNOWLEDGE
0x003e: 0x009e, # CONTROL
0x003f: 0x001a, # SUBSTITUTE
0x0040: 0x0020, # SPACE
0x0041: 0x00a0, # NO-BREAK SPACE
0x0042: 0x00e2, # LATIN SMALL LETTER A WITH CIRCUMFLEX
0x0043: 0x00e4, # LATIN SMALL LETTER A WITH DIAERESIS
0x0044: 0x00e0, # LATIN SMALL LETTER A WITH GRAVE
0x0045: 0x00e1, # LATIN SMALL LETTER A WITH ACUTE
0x0046: 0x00e3, # LATIN SMALL LETTER A WITH TILDE
0x0047: 0x00e5, # LATIN SMALL LETTER A WITH RING ABOVE
0x0048: 0x007b, # LEFT CURLY BRACKET
0x0049: 0x00f1, # LATIN SMALL LETTER N WITH TILDE
0x004a: 0x00c7, # LATIN CAPITAL LETTER C WITH CEDILLA
0x004b: 0x002e, # FULL STOP
0x004c: 0x003c, # LESS-THAN SIGN
0x004d: 0x0028, # LEFT PARENTHESIS
0x004e: 0x002b, # PLUS SIGN
0x004f: 0x0021, # EXCLAMATION MARK
0x0050: 0x0026, # AMPERSAND
0x0051: 0x00e9, # LATIN SMALL LETTER E WITH ACUTE
0x0052: 0x00ea, # LATIN SMALL LETTER E WITH CIRCUMFLEX
0x0053: 0x00eb, # LATIN SMALL LETTER E WITH DIAERESIS
0x0054: 0x00e8, # LATIN SMALL LETTER E WITH GRAVE
0x0055: 0x00ed, # LATIN SMALL LETTER I WITH ACUTE
0x0056: 0x00ee, # LATIN SMALL LETTER I WITH CIRCUMFLEX
0x0057: 0x00ef, # LATIN SMALL LETTER I WITH DIAERESIS
0x0058: 0x00ec, # LATIN SMALL LETTER I WITH GRAVE
0x0059: 0x00df, # LATIN SMALL LETTER SHARP S (GERMAN)
0x005a: 0x011e, # LATIN CAPITAL LETTER G WITH BREVE
0x005b: 0x0130, # LATIN CAPITAL LETTER I WITH DOT ABOVE
0x005c: 0x002a, # ASTERISK
0x005d: 0x0029, # RIGHT PARENTHESIS
0x005e: 0x003b, # SEMICOLON
0x005f: 0x005e, # CIRCUMFLEX ACCENT
0x0060: 0x002d, # HYPHEN-MINUS
0x0061: 0x002f, # SOLIDUS
0x0062: 0x00c2, # LATIN CAPITAL LETTER A WITH CIRCUMFLEX
0x0063: 0x00c4, # LATIN CAPITAL LETTER A WITH DIAERESIS
0x0064: 0x00c0, # LATIN CAPITAL LETTER A WITH GRAVE
0x0065: 0x00c1, # LATIN CAPITAL LETTER A WITH ACUTE
0x0066: 0x00c3, # LATIN CAPITAL LETTER A WITH TILDE
0x0067: 0x00c5, # LATIN CAPITAL LETTER A WITH RING ABOVE
0x0068: 0x005b, # LEFT SQUARE BRACKET
0x0069: 0x00d1, # LATIN CAPITAL LETTER N WITH TILDE
0x006a: 0x015f, # LATIN SMALL LETTER S WITH CEDILLA
0x006b: 0x002c, # COMMA
0x006c: 0x0025, # PERCENT SIGN
0x006d: 0x005f, # LOW LINE
0x006e: 0x003e, # GREATER-THAN SIGN
0x006f: 0x003f, # QUESTION MARK
0x0070: 0x00f8, # LATIN SMALL LETTER O WITH STROKE
0x0071: 0x00c9, # LATIN CAPITAL LETTER E WITH ACUTE
0x0072: 0x00ca, # LATIN CAPITAL LETTER E WITH CIRCUMFLEX
0x0073: 0x00cb, # LATIN CAPITAL LETTER E WITH DIAERESIS
0x0074: 0x00c8, # LATIN CAPITAL LETTER E WITH GRAVE
0x0075: 0x00cd, # LATIN CAPITAL LETTER I WITH ACUTE
0x0076: 0x00ce, # LATIN CAPITAL LETTER I WITH CIRCUMFLEX
0x0077: 0x00cf, # LATIN CAPITAL LETTER I WITH DIAERESIS
0x0078: 0x00cc, # LATIN CAPITAL LETTER I WITH GRAVE
0x0079: 0x0131, # LATIN SMALL LETTER DOTLESS I
0x007a: 0x003a, # COLON
0x007b: 0x00d6, # LATIN CAPITAL LETTER O WITH DIAERESIS
0x007c: 0x015e, # LATIN CAPITAL LETTER S WITH CEDILLA
0x007d: 0x0027, # APOSTROPHE
0x007e: 0x003d, # EQUALS SIGN
0x007f: 0x00dc, # LATIN CAPITAL LETTER U WITH DIAERESIS
0x0080: 0x00d8, # LATIN CAPITAL LETTER O WITH STROKE
0x0081: 0x0061, # LATIN SMALL LETTER A
0x0082: 0x0062, # LATIN SMALL LETTER B
0x0083: 0x0063, # LATIN SMALL LETTER C
0x0084: 0x0064, # LATIN SMALL LETTER D
0x0085: 0x0065, # LATIN SMALL LETTER E
0x0086: 0x0066, # LATIN SMALL LETTER F
0x0087: 0x0067, # LATIN SMALL LETTER G
0x0088: 0x0068, # LATIN SMALL LETTER H
0x0089: 0x0069, # LATIN SMALL LETTER I
0x008a: 0x00ab, # LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
0x008b: 0x00bb, # RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
0x008c: 0x007d, # RIGHT CURLY BRACKET
0x008d: 0x0060, # GRAVE ACCENT
0x008e: 0x00a6, # BROKEN BAR
0x008f: 0x00b1, # PLUS-MINUS SIGN
0x0090: 0x00b0, # DEGREE SIGN
0x0091: 0x006a, # LATIN SMALL LETTER J
0x0092: 0x006b, # LATIN SMALL LETTER K
0x0093: 0x006c, # LATIN SMALL LETTER L
0x0094: 0x006d, # LATIN SMALL LETTER M
0x0095: 0x006e, # LATIN SMALL LETTER N
0x0096: 0x006f, # LATIN SMALL LETTER O
0x0097: 0x0070, # LATIN SMALL LETTER P
0x0098: 0x0071, # LATIN SMALL LETTER Q
0x0099: 0x0072, # LATIN SMALL LETTER R
0x009a: 0x00aa, # FEMININE ORDINAL INDICATOR
0x009b: 0x00ba, # MASCULINE ORDINAL INDICATOR
0x009c: 0x00e6, # LATIN SMALL LIGATURE AE
0x009d: 0x00b8, # CEDILLA
0x009e: 0x00c6, # LATIN CAPITAL LIGATURE AE
0x009f: 0x00a4, # CURRENCY SIGN
0x00a0: 0x00b5, # MICRO SIGN
0x00a1: 0x00f6, # LATIN SMALL LETTER O WITH DIAERESIS
0x00a2: 0x0073, # LATIN SMALL LETTER S
0x00a3: 0x0074, # LATIN SMALL LETTER T
0x00a4: 0x0075, # LATIN SMALL LETTER U
0x00a5: 0x0076, # LATIN SMALL LETTER V
0x00a6: 0x0077, # LATIN SMALL LETTER W
0x00a7: 0x0078, # LATIN SMALL LETTER X
0x00a8: 0x0079, # LATIN SMALL LETTER Y
0x00a9: 0x007a, # LATIN SMALL LETTER Z
0x00aa: 0x00a1, # INVERTED EXCLAMATION MARK
0x00ab: 0x00bf, # INVERTED QUESTION MARK
0x00ac: 0x005d, # RIGHT SQUARE BRACKET
0x00ad: 0x0024, # DOLLAR SIGN
0x00ae: 0x0040, # COMMERCIAL AT
0x00af: 0x00ae, # REGISTERED SIGN
0x00b0: 0x00a2, # CENT SIGN
0x00b1: 0x00a3, # POUND SIGN
0x00b2: 0x00a5, # YEN SIGN
0x00b3: 0x00b7, # MIDDLE DOT
0x00b4: 0x00a9, # COPYRIGHT SIGN
0x00b5: 0x00a7, # SECTION SIGN
0x00b7: 0x00bc, # VULGAR FRACTION ONE QUARTER
0x00b8: 0x00bd, # VULGAR FRACTION ONE HALF
0x00b9: 0x00be, # VULGAR FRACTION THREE QUARTERS
0x00ba: 0x00ac, # NOT SIGN
0x00bb: 0x007c, # VERTICAL LINE
0x00bc: 0x00af, # MACRON
0x00bd: 0x00a8, # DIAERESIS
0x00be: 0x00b4, # ACUTE ACCENT
0x00bf: 0x00d7, # MULTIPLICATION SIGN
0x00c0: 0x00e7, # LATIN SMALL LETTER C WITH CEDILLA
0x00c1: 0x0041, # LATIN CAPITAL LETTER A
0x00c2: 0x0042, # LATIN CAPITAL LETTER B
0x00c3: 0x0043, # LATIN CAPITAL LETTER C
0x00c4: 0x0044, # LATIN CAPITAL LETTER D
0x00c5: 0x0045, # LATIN CAPITAL LETTER E
0x00c6: 0x0046, # LATIN CAPITAL LETTER F
0x00c7: 0x0047, # LATIN CAPITAL LETTER G
0x00c8: 0x0048, # LATIN CAPITAL LETTER H
0x00c9: 0x0049, # LATIN CAPITAL LETTER I
0x00ca: 0x00ad, # SOFT HYPHEN
0x00cb: 0x00f4, # LATIN SMALL LETTER O WITH CIRCUMFLEX
0x00cc: 0x007e, # TILDE
0x00cd: 0x00f2, # LATIN SMALL LETTER O WITH GRAVE
0x00ce: 0x00f3, # LATIN SMALL LETTER O WITH ACUTE
0x00cf: 0x00f5, # LATIN SMALL LETTER O WITH TILDE
0x00d0: 0x011f, # LATIN SMALL LETTER G WITH BREVE
0x00d1: 0x004a, # LATIN CAPITAL LETTER J
0x00d2: 0x004b, # LATIN CAPITAL LETTER K
0x00d3: 0x004c, # LATIN CAPITAL LETTER L
0x00d4: 0x004d, # LATIN CAPITAL LETTER M
0x00d5: 0x004e, # LATIN CAPITAL LETTER N
0x00d6: 0x004f, # LATIN CAPITAL LETTER O
0x00d7: 0x0050, # LATIN CAPITAL LETTER P
0x00d8: 0x0051, # LATIN CAPITAL LETTER Q
0x00d9: 0x0052, # LATIN CAPITAL LETTER R
0x00da: 0x00b9, # SUPERSCRIPT ONE
0x00db: 0x00fb, # LATIN SMALL LETTER U WITH CIRCUMFLEX
0x00dc: 0x005c, # REVERSE SOLIDUS
0x00dd: 0x00f9, # LATIN SMALL LETTER U WITH GRAVE
0x00de: 0x00fa, # LATIN SMALL LETTER U WITH ACUTE
0x00df: 0x00ff, # LATIN SMALL LETTER Y WITH DIAERESIS
0x00e0: 0x00fc, # LATIN SMALL LETTER U WITH DIAERESIS
0x00e1: 0x00f7, # DIVISION SIGN
0x00e2: 0x0053, # LATIN CAPITAL LETTER S
0x00e3: 0x0054, # LATIN CAPITAL LETTER T
0x00e4: 0x0055, # LATIN CAPITAL LETTER U
0x00e5: 0x0056, # LATIN CAPITAL LETTER V
0x00e6: 0x0057, # LATIN CAPITAL LETTER W
0x00e7: 0x0058, # LATIN CAPITAL LETTER X
0x00e8: 0x0059, # LATIN CAPITAL LETTER Y
0x00e9: 0x005a, # LATIN CAPITAL LETTER Z
0x00ea: 0x00b2, # SUPERSCRIPT TWO
0x00eb: 0x00d4, # LATIN CAPITAL LETTER O WITH CIRCUMFLEX
0x00ec: 0x0023, # NUMBER SIGN
0x00ed: 0x00d2, # LATIN CAPITAL LETTER O WITH GRAVE
0x00ee: 0x00d3, # LATIN CAPITAL LETTER O WITH ACUTE
0x00ef: 0x00d5, # LATIN CAPITAL LETTER O WITH TILDE
0x00f0: 0x0030, # DIGIT ZERO
0x00f1: 0x0031, # DIGIT ONE
0x00f2: 0x0032, # DIGIT TWO
0x00f3: 0x0033, # DIGIT THREE
0x00f4: 0x0034, # DIGIT FOUR
0x00f5: 0x0035, # DIGIT FIVE
0x00f6: 0x0036, # DIGIT SIX
0x00f7: 0x0037, # DIGIT SEVEN
0x00f8: 0x0038, # DIGIT EIGHT
0x00f9: 0x0039, # DIGIT NINE
0x00fa: 0x00b3, # SUPERSCRIPT THREE
0x00fb: 0x00db, # LATIN CAPITAL LETTER U WITH CIRCUMFLEX
0x00fc: 0x0022, # QUOTATION MARK
0x00fd: 0x00d9, # LATIN CAPITAL LETTER U WITH GRAVE
0x00fe: 0x00da, # LATIN CAPITAL LETTER U WITH ACUTE
0x00ff: 0x009f, # CONTROL
})
### Encoding Map
encoding_map = {}
for k,v in decoding_map.items():
encoding_map[v] = k

+ 0
- 125
lib/jython/Lib/encodings/cp1250.py View File

@@ -1,125 +0,0 @@
""" Python Character Mapping Codec generated from 'CP1250.TXT' with gencodec.py.
Written by Marc-Andre Lemburg (mal@lemburg.com).
(c) Copyright CNRI, All Rights Reserved. NO WARRANTY.
(c) Copyright 2000 Guido van Rossum.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_map)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_map)
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return (Codec().encode,Codec().decode,StreamReader,StreamWriter)
### Decoding Map
decoding_map = codecs.make_identity_dict(range(256))
decoding_map.update({
0x0080: 0x20ac, # EURO SIGN
0x0081: None, # UNDEFINED
0x0082: 0x201a, # SINGLE LOW-9 QUOTATION MARK
0x0083: None, # UNDEFINED
0x0084: 0x201e, # DOUBLE LOW-9 QUOTATION MARK
0x0085: 0x2026, # HORIZONTAL ELLIPSIS
0x0086: 0x2020, # DAGGER
0x0087: 0x2021, # DOUBLE DAGGER
0x0088: None, # UNDEFINED
0x0089: 0x2030, # PER MILLE SIGN
0x008a: 0x0160, # LATIN CAPITAL LETTER S WITH CARON
0x008b: 0x2039, # SINGLE LEFT-POINTING ANGLE QUOTATION MARK
0x008c: 0x015a, # LATIN CAPITAL LETTER S WITH ACUTE
0x008d: 0x0164, # LATIN CAPITAL LETTER T WITH CARON
0x008e: 0x017d, # LATIN CAPITAL LETTER Z WITH CARON
0x008f: 0x0179, # LATIN CAPITAL LETTER Z WITH ACUTE
0x0090: None, # UNDEFINED
0x0091: 0x2018, # LEFT SINGLE QUOTATION MARK
0x0092: 0x2019, # RIGHT SINGLE QUOTATION MARK
0x0093: 0x201c, # LEFT DOUBLE QUOTATION MARK
0x0094: 0x201d, # RIGHT DOUBLE QUOTATION MARK
0x0095: 0x2022, # BULLET
0x0096: 0x2013, # EN DASH
0x0097: 0x2014, # EM DASH
0x0098: None, # UNDEFINED
0x0099: 0x2122, # TRADE MARK SIGN
0x009a: 0x0161, # LATIN SMALL LETTER S WITH CARON
0x009b: 0x203a, # SINGLE RIGHT-POINTING ANGLE QUOTATION MARK
0x009c: 0x015b, # LATIN SMALL LETTER S WITH ACUTE
0x009d: 0x0165, # LATIN SMALL LETTER T WITH CARON
0x009e: 0x017e, # LATIN SMALL LETTER Z WITH CARON
0x009f: 0x017a, # LATIN SMALL LETTER Z WITH ACUTE
0x00a1: 0x02c7, # CARON
0x00a2: 0x02d8, # BREVE
0x00a3: 0x0141, # LATIN CAPITAL LETTER L WITH STROKE
0x00a5: 0x0104, # LATIN CAPITAL LETTER A WITH OGONEK
0x00aa: 0x015e, # LATIN CAPITAL LETTER S WITH CEDILLA
0x00af: 0x017b, # LATIN CAPITAL LETTER Z WITH DOT ABOVE
0x00b2: 0x02db, # OGONEK
0x00b3: 0x0142, # LATIN SMALL LETTER L WITH STROKE
0x00b9: 0x0105, # LATIN SMALL LETTER A WITH OGONEK
0x00ba: 0x015f, # LATIN SMALL LETTER S WITH CEDILLA
0x00bc: 0x013d, # LATIN CAPITAL LETTER L WITH CARON
0x00bd: 0x02dd, # DOUBLE ACUTE ACCENT
0x00be: 0x013e, # LATIN SMALL LETTER L WITH CARON
0x00bf: 0x017c, # LATIN SMALL LETTER Z WITH DOT ABOVE
0x00c0: 0x0154, # LATIN CAPITAL LETTER R WITH ACUTE
0x00c3: 0x0102, # LATIN CAPITAL LETTER A WITH BREVE
0x00c5: 0x0139, # LATIN CAPITAL LETTER L WITH ACUTE
0x00c6: 0x0106, # LATIN CAPITAL LETTER C WITH ACUTE
0x00c8: 0x010c, # LATIN CAPITAL LETTER C WITH CARON
0x00ca: 0x0118, # LATIN CAPITAL LETTER E WITH OGONEK
0x00cc: 0x011a, # LATIN CAPITAL LETTER E WITH CARON
0x00cf: 0x010e, # LATIN CAPITAL LETTER D WITH CARON
0x00d0: 0x0110, # LATIN CAPITAL LETTER D WITH STROKE
0x00d1: 0x0143, # LATIN CAPITAL LETTER N WITH ACUTE
0x00d2: 0x0147, # LATIN CAPITAL LETTER N WITH CARON
0x00d5: 0x0150, # LATIN CAPITAL LETTER O WITH DOUBLE ACUTE
0x00d8: 0x0158, # LATIN CAPITAL LETTER R WITH CARON
0x00d9: 0x016e, # LATIN CAPITAL LETTER U WITH RING ABOVE
0x00db: 0x0170, # LATIN CAPITAL LETTER U WITH DOUBLE ACUTE
0x00de: 0x0162, # LATIN CAPITAL LETTER T WITH CEDILLA
0x00e0: 0x0155, # LATIN SMALL LETTER R WITH ACUTE
0x00e3: 0x0103, # LATIN SMALL LETTER A WITH BREVE
0x00e5: 0x013a, # LATIN SMALL LETTER L WITH ACUTE
0x00e6: 0x0107, # LATIN SMALL LETTER C WITH ACUTE
0x00e8: 0x010d, # LATIN SMALL LETTER C WITH CARON
0x00ea: 0x0119, # LATIN SMALL LETTER E WITH OGONEK
0x00ec: 0x011b, # LATIN SMALL LETTER E WITH CARON
0x00ef: 0x010f, # LATIN SMALL LETTER D WITH CARON
0x00f0: 0x0111, # LATIN SMALL LETTER D WITH STROKE
0x00f1: 0x0144, # LATIN SMALL LETTER N WITH ACUTE
0x00f2: 0x0148, # LATIN SMALL LETTER N WITH CARON
0x00f5: 0x0151, # LATIN SMALL LETTER O WITH DOUBLE ACUTE
0x00f8: 0x0159, # LATIN SMALL LETTER R WITH CARON
0x00f9: 0x016f, # LATIN SMALL LETTER U WITH RING ABOVE
0x00fb: 0x0171, # LATIN SMALL LETTER U WITH DOUBLE ACUTE
0x00fe: 0x0163, # LATIN SMALL LETTER T WITH CEDILLA
0x00ff: 0x02d9, # DOT ABOVE
})
### Encoding Map
encoding_map = {}
for k,v in decoding_map.items():
encoding_map[v] = k

+ 0
- 159
lib/jython/Lib/encodings/cp1251.py View File

@@ -1,159 +0,0 @@
""" Python Character Mapping Codec generated from 'CP1251.TXT' with gencodec.py.
Written by Marc-Andre Lemburg (mal@lemburg.com).
(c) Copyright CNRI, All Rights Reserved. NO WARRANTY.
(c) Copyright 2000 Guido van Rossum.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_map)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_map)
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return (Codec().encode,Codec().decode,StreamReader,StreamWriter)
### Decoding Map
decoding_map = codecs.make_identity_dict(range(256))
decoding_map.update({
0x0080: 0x0402, # CYRILLIC CAPITAL LETTER DJE
0x0081: 0x0403, # CYRILLIC CAPITAL LETTER GJE
0x0082: 0x201a, # SINGLE LOW-9 QUOTATION MARK
0x0083: 0x0453, # CYRILLIC SMALL LETTER GJE
0x0084: 0x201e, # DOUBLE LOW-9 QUOTATION MARK
0x0085: 0x2026, # HORIZONTAL ELLIPSIS
0x0086: 0x2020, # DAGGER
0x0087: 0x2021, # DOUBLE DAGGER
0x0088: 0x20ac, # EURO SIGN
0x0089: 0x2030, # PER MILLE SIGN
0x008a: 0x0409, # CYRILLIC CAPITAL LETTER LJE
0x008b: 0x2039, # SINGLE LEFT-POINTING ANGLE QUOTATION MARK
0x008c: 0x040a, # CYRILLIC CAPITAL LETTER NJE
0x008d: 0x040c, # CYRILLIC CAPITAL LETTER KJE
0x008e: 0x040b, # CYRILLIC CAPITAL LETTER TSHE
0x008f: 0x040f, # CYRILLIC CAPITAL LETTER DZHE
0x0090: 0x0452, # CYRILLIC SMALL LETTER DJE
0x0091: 0x2018, # LEFT SINGLE QUOTATION MARK
0x0092: 0x2019, # RIGHT SINGLE QUOTATION MARK
0x0093: 0x201c, # LEFT DOUBLE QUOTATION MARK
0x0094: 0x201d, # RIGHT DOUBLE QUOTATION MARK
0x0095: 0x2022, # BULLET
0x0096: 0x2013, # EN DASH
0x0097: 0x2014, # EM DASH
0x0098: None, # UNDEFINED
0x0099: 0x2122, # TRADE MARK SIGN
0x009a: 0x0459, # CYRILLIC SMALL LETTER LJE
0x009b: 0x203a, # SINGLE RIGHT-POINTING ANGLE QUOTATION MARK
0x009c: 0x045a, # CYRILLIC SMALL LETTER NJE
0x009d: 0x045c, # CYRILLIC SMALL LETTER KJE
0x009e: 0x045b, # CYRILLIC SMALL LETTER TSHE
0x009f: 0x045f, # CYRILLIC SMALL LETTER DZHE
0x00a1: 0x040e, # CYRILLIC CAPITAL LETTER SHORT U
0x00a2: 0x045e, # CYRILLIC SMALL LETTER SHORT U
0x00a3: 0x0408, # CYRILLIC CAPITAL LETTER JE
0x00a5: 0x0490, # CYRILLIC CAPITAL LETTER GHE WITH UPTURN
0x00a8: 0x0401, # CYRILLIC CAPITAL LETTER IO
0x00aa: 0x0404, # CYRILLIC CAPITAL LETTER UKRAINIAN IE
0x00af: 0x0407, # CYRILLIC CAPITAL LETTER YI
0x00b2: 0x0406, # CYRILLIC CAPITAL LETTER BYELORUSSIAN-UKRAINIAN I
0x00b3: 0x0456, # CYRILLIC SMALL LETTER BYELORUSSIAN-UKRAINIAN I
0x00b4: 0x0491, # CYRILLIC SMALL LETTER GHE WITH UPTURN
0x00b8: 0x0451, # CYRILLIC SMALL LETTER IO
0x00b9: 0x2116, # NUMERO SIGN
0x00ba: 0x0454, # CYRILLIC SMALL LETTER UKRAINIAN IE
0x00bc: 0x0458, # CYRILLIC SMALL LETTER JE
0x00bd: 0x0405, # CYRILLIC CAPITAL LETTER DZE
0x00be: 0x0455, # CYRILLIC SMALL LETTER DZE
0x00bf: 0x0457, # CYRILLIC SMALL LETTER YI
0x00c0: 0x0410, # CYRILLIC CAPITAL LETTER A
0x00c1: 0x0411, # CYRILLIC CAPITAL LETTER BE
0x00c2: 0x0412, # CYRILLIC CAPITAL LETTER VE
0x00c3: 0x0413, # CYRILLIC CAPITAL LETTER GHE
0x00c4: 0x0414, # CYRILLIC CAPITAL LETTER DE
0x00c5: 0x0415, # CYRILLIC CAPITAL LETTER IE
0x00c6: 0x0416, # CYRILLIC CAPITAL LETTER ZHE
0x00c7: 0x0417, # CYRILLIC CAPITAL LETTER ZE
0x00c8: 0x0418, # CYRILLIC CAPITAL LETTER I
0x00c9: 0x0419, # CYRILLIC CAPITAL LETTER SHORT I
0x00ca: 0x041a, # CYRILLIC CAPITAL LETTER KA
0x00cb: 0x041b, # CYRILLIC CAPITAL LETTER EL
0x00cc: 0x041c, # CYRILLIC CAPITAL LETTER EM
0x00cd: 0x041d, # CYRILLIC CAPITAL LETTER EN
0x00ce: 0x041e, # CYRILLIC CAPITAL LETTER O
0x00cf: 0x041f, # CYRILLIC CAPITAL LETTER PE
0x00d0: 0x0420, # CYRILLIC CAPITAL LETTER ER
0x00d1: 0x0421, # CYRILLIC CAPITAL LETTER ES
0x00d2: 0x0422, # CYRILLIC CAPITAL LETTER TE
0x00d3: 0x0423, # CYRILLIC CAPITAL LETTER U
0x00d4: 0x0424, # CYRILLIC CAPITAL LETTER EF
0x00d5: 0x0425, # CYRILLIC CAPITAL LETTER HA
0x00d6: 0x0426, # CYRILLIC CAPITAL LETTER TSE
0x00d7: 0x0427, # CYRILLIC CAPITAL LETTER CHE
0x00d8: 0x0428, # CYRILLIC CAPITAL LETTER SHA
0x00d9: 0x0429, # CYRILLIC CAPITAL LETTER SHCHA
0x00da: 0x042a, # CYRILLIC CAPITAL LETTER HARD SIGN
0x00db: 0x042b, # CYRILLIC CAPITAL LETTER YERU
0x00dc: 0x042c, # CYRILLIC CAPITAL LETTER SOFT SIGN
0x00dd: 0x042d, # CYRILLIC CAPITAL LETTER E
0x00de: 0x042e, # CYRILLIC CAPITAL LETTER YU
0x00df: 0x042f, # CYRILLIC CAPITAL LETTER YA
0x00e0: 0x0430, # CYRILLIC SMALL LETTER A
0x00e1: 0x0431, # CYRILLIC SMALL LETTER BE
0x00e2: 0x0432, # CYRILLIC SMALL LETTER VE
0x00e3: 0x0433, # CYRILLIC SMALL LETTER GHE
0x00e4: 0x0434, # CYRILLIC SMALL LETTER DE
0x00e5: 0x0435, # CYRILLIC SMALL LETTER IE
0x00e6: 0x0436, # CYRILLIC SMALL LETTER ZHE
0x00e7: 0x0437, # CYRILLIC SMALL LETTER ZE
0x00e8: 0x0438, # CYRILLIC SMALL LETTER I
0x00e9: 0x0439, # CYRILLIC SMALL LETTER SHORT I
0x00ea: 0x043a, # CYRILLIC SMALL LETTER KA
0x00eb: 0x043b, # CYRILLIC SMALL LETTER EL
0x00ec: 0x043c, # CYRILLIC SMALL LETTER EM
0x00ed: 0x043d, # CYRILLIC SMALL LETTER EN
0x00ee: 0x043e, # CYRILLIC SMALL LETTER O
0x00ef: 0x043f, # CYRILLIC SMALL LETTER PE
0x00f0: 0x0440, # CYRILLIC SMALL LETTER ER
0x00f1: 0x0441, # CYRILLIC SMALL LETTER ES
0x00f2: 0x0442, # CYRILLIC SMALL LETTER TE
0x00f3: 0x0443, # CYRILLIC SMALL LETTER U
0x00f4: 0x0444, # CYRILLIC SMALL LETTER EF
0x00f5: 0x0445, # CYRILLIC SMALL LETTER HA
0x00f6: 0x0446, # CYRILLIC SMALL LETTER TSE
0x00f7: 0x0447, # CYRILLIC SMALL LETTER CHE
0x00f8: 0x0448, # CYRILLIC SMALL LETTER SHA
0x00f9: 0x0449, # CYRILLIC SMALL LETTER SHCHA
0x00fa: 0x044a, # CYRILLIC SMALL LETTER HARD SIGN
0x00fb: 0x044b, # CYRILLIC SMALL LETTER YERU
0x00fc: 0x044c, # CYRILLIC SMALL LETTER SOFT SIGN
0x00fd: 0x044d, # CYRILLIC SMALL LETTER E
0x00fe: 0x044e, # CYRILLIC SMALL LETTER YU
0x00ff: 0x044f, # CYRILLIC SMALL LETTER YA
})
### Encoding Map
encoding_map = {}
for k,v in decoding_map.items():
encoding_map[v] = k

+ 0
- 78
lib/jython/Lib/encodings/cp1252.py View File

@@ -1,78 +0,0 @@
""" Python Character Mapping Codec generated from 'CP1252.TXT' with gencodec.py.
Written by Marc-Andre Lemburg (mal@lemburg.com).
(c) Copyright CNRI, All Rights Reserved. NO WARRANTY.
(c) Copyright 2000 Guido van Rossum.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_map)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_map)
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return (Codec().encode,Codec().decode,StreamReader,StreamWriter)
### Decoding Map
decoding_map = codecs.make_identity_dict(range(256))
decoding_map.update({
0x0080: 0x20ac, # EURO SIGN
0x0081: None, # UNDEFINED
0x0082: 0x201a, # SINGLE LOW-9 QUOTATION MARK
0x0083: 0x0192, # LATIN SMALL LETTER F WITH HOOK
0x0084: 0x201e, # DOUBLE LOW-9 QUOTATION MARK
0x0085: 0x2026, # HORIZONTAL ELLIPSIS
0x0086: 0x2020, # DAGGER
0x0087: 0x2021, # DOUBLE DAGGER
0x0088: 0x02c6, # MODIFIER LETTER CIRCUMFLEX ACCENT
0x0089: 0x2030, # PER MILLE SIGN
0x008a: 0x0160, # LATIN CAPITAL LETTER S WITH CARON
0x008b: 0x2039, # SINGLE LEFT-POINTING ANGLE QUOTATION MARK
0x008c: 0x0152, # LATIN CAPITAL LIGATURE OE
0x008d: None, # UNDEFINED
0x008e: 0x017d, # LATIN CAPITAL LETTER Z WITH CARON
0x008f: None, # UNDEFINED
0x0090: None, # UNDEFINED
0x0091: 0x2018, # LEFT SINGLE QUOTATION MARK
0x0092: 0x2019, # RIGHT SINGLE QUOTATION MARK
0x0093: 0x201c, # LEFT DOUBLE QUOTATION MARK
0x0094: 0x201d, # RIGHT DOUBLE QUOTATION MARK
0x0095: 0x2022, # BULLET
0x0096: 0x2013, # EN DASH
0x0097: 0x2014, # EM DASH
0x0098: 0x02dc, # SMALL TILDE
0x0099: 0x2122, # TRADE MARK SIGN
0x009a: 0x0161, # LATIN SMALL LETTER S WITH CARON
0x009b: 0x203a, # SINGLE RIGHT-POINTING ANGLE QUOTATION MARK
0x009c: 0x0153, # LATIN SMALL LIGATURE OE
0x009d: None, # UNDEFINED
0x009e: 0x017e, # LATIN SMALL LETTER Z WITH CARON
0x009f: 0x0178, # LATIN CAPITAL LETTER Y WITH DIAERESIS
})
### Encoding Map
encoding_map = {}
for k,v in decoding_map.items():
encoding_map[v] = k

+ 0
- 153
lib/jython/Lib/encodings/cp1253.py View File

@@ -1,153 +0,0 @@
""" Python Character Mapping Codec generated from 'CP1253.TXT' with gencodec.py.
Written by Marc-Andre Lemburg (mal@lemburg.com).
(c) Copyright CNRI, All Rights Reserved. NO WARRANTY.
(c) Copyright 2000 Guido van Rossum.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_map)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_map)
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return (Codec().encode,Codec().decode,StreamReader,StreamWriter)
### Decoding Map
decoding_map = codecs.make_identity_dict(range(256))
decoding_map.update({
0x0080: 0x20ac, # EURO SIGN
0x0081: None, # UNDEFINED
0x0082: 0x201a, # SINGLE LOW-9 QUOTATION MARK
0x0083: 0x0192, # LATIN SMALL LETTER F WITH HOOK
0x0084: 0x201e, # DOUBLE LOW-9 QUOTATION MARK
0x0085: 0x2026, # HORIZONTAL ELLIPSIS
0x0086: 0x2020, # DAGGER
0x0087: 0x2021, # DOUBLE DAGGER
0x0088: None, # UNDEFINED
0x0089: 0x2030, # PER MILLE SIGN
0x008a: None, # UNDEFINED
0x008b: 0x2039, # SINGLE LEFT-POINTING ANGLE QUOTATION MARK
0x008c: None, # UNDEFINED
0x008d: None, # UNDEFINED
0x008e: None, # UNDEFINED
0x008f: None, # UNDEFINED
0x0090: None, # UNDEFINED
0x0091: 0x2018, # LEFT SINGLE QUOTATION MARK
0x0092: 0x2019, # RIGHT SINGLE QUOTATION MARK
0x0093: 0x201c, # LEFT DOUBLE QUOTATION MARK
0x0094: 0x201d, # RIGHT DOUBLE QUOTATION MARK
0x0095: 0x2022, # BULLET
0x0096: 0x2013, # EN DASH
0x0097: 0x2014, # EM DASH
0x0098: None, # UNDEFINED
0x0099: 0x2122, # TRADE MARK SIGN
0x009a: None, # UNDEFINED
0x009b: 0x203a, # SINGLE RIGHT-POINTING ANGLE QUOTATION MARK
0x009c: None, # UNDEFINED
0x009d: None, # UNDEFINED
0x009e: None, # UNDEFINED
0x009f: None, # UNDEFINED
0x00a1: 0x0385, # GREEK DIALYTIKA TONOS
0x00a2: 0x0386, # GREEK CAPITAL LETTER ALPHA WITH TONOS
0x00aa: None, # UNDEFINED
0x00af: 0x2015, # HORIZONTAL BAR
0x00b4: 0x0384, # GREEK TONOS
0x00b8: 0x0388, # GREEK CAPITAL LETTER EPSILON WITH TONOS
0x00b9: 0x0389, # GREEK CAPITAL LETTER ETA WITH TONOS
0x00ba: 0x038a, # GREEK CAPITAL LETTER IOTA WITH TONOS
0x00bc: 0x038c, # GREEK CAPITAL LETTER OMICRON WITH TONOS
0x00be: 0x038e, # GREEK CAPITAL LETTER UPSILON WITH TONOS
0x00bf: 0x038f, # GREEK CAPITAL LETTER OMEGA WITH TONOS
0x00c0: 0x0390, # GREEK SMALL LETTER IOTA WITH DIALYTIKA AND TONOS
0x00c1: 0x0391, # GREEK CAPITAL LETTER ALPHA
0x00c2: 0x0392, # GREEK CAPITAL LETTER BETA
0x00c3: 0x0393, # GREEK CAPITAL LETTER GAMMA
0x00c4: 0x0394, # GREEK CAPITAL LETTER DELTA
0x00c5: 0x0395, # GREEK CAPITAL LETTER EPSILON
0x00c6: 0x0396, # GREEK CAPITAL LETTER ZETA
0x00c7: 0x0397, # GREEK CAPITAL LETTER ETA
0x00c8: 0x0398, # GREEK CAPITAL LETTER THETA
0x00c9: 0x0399, # GREEK CAPITAL LETTER IOTA
0x00ca: 0x039a, # GREEK CAPITAL LETTER KAPPA
0x00cb: 0x039b, # GREEK CAPITAL LETTER LAMDA
0x00cc: 0x039c, # GREEK CAPITAL LETTER MU
0x00cd: 0x039d, # GREEK CAPITAL LETTER NU
0x00ce: 0x039e, # GREEK CAPITAL LETTER XI
0x00cf: 0x039f, # GREEK CAPITAL LETTER OMICRON
0x00d0: 0x03a0, # GREEK CAPITAL LETTER PI
0x00d1: 0x03a1, # GREEK CAPITAL LETTER RHO
0x00d2: None, # UNDEFINED
0x00d3: 0x03a3, # GREEK CAPITAL LETTER SIGMA
0x00d4: 0x03a4, # GREEK CAPITAL LETTER TAU
0x00d5: 0x03a5, # GREEK CAPITAL LETTER UPSILON
0x00d6: 0x03a6, # GREEK CAPITAL LETTER PHI
0x00d7: 0x03a7, # GREEK CAPITAL LETTER CHI
0x00d8: 0x03a8, # GREEK CAPITAL LETTER PSI
0x00d9: 0x03a9, # GREEK CAPITAL LETTER OMEGA
0x00da: 0x03aa, # GREEK CAPITAL LETTER IOTA WITH DIALYTIKA
0x00db: 0x03ab, # GREEK CAPITAL LETTER UPSILON WITH DIALYTIKA
0x00dc: 0x03ac, # GREEK SMALL LETTER ALPHA WITH TONOS
0x00dd: 0x03ad, # GREEK SMALL LETTER EPSILON WITH TONOS
0x00de: 0x03ae, # GREEK SMALL LETTER ETA WITH TONOS
0x00df: 0x03af, # GREEK SMALL LETTER IOTA WITH TONOS
0x00e0: 0x03b0, # GREEK SMALL LETTER UPSILON WITH DIALYTIKA AND TONOS
0x00e1: 0x03b1, # GREEK SMALL LETTER ALPHA
0x00e2: 0x03b2, # GREEK SMALL LETTER BETA
0x00e3: 0x03b3, # GREEK SMALL LETTER GAMMA
0x00e4: 0x03b4, # GREEK SMALL LETTER DELTA
0x00e5: 0x03b5, # GREEK SMALL LETTER EPSILON
0x00e6: 0x03b6, # GREEK SMALL LETTER ZETA
0x00e7: 0x03b7, # GREEK SMALL LETTER ETA
0x00e8: 0x03b8, # GREEK SMALL LETTER THETA
0x00e9: 0x03b9, # GREEK SMALL LETTER IOTA
0x00ea: 0x03ba, # GREEK SMALL LETTER KAPPA
0x00eb: 0x03bb, # GREEK SMALL LETTER LAMDA
0x00ec: 0x03bc, # GREEK SMALL LETTER MU
0x00ed: 0x03bd, # GREEK SMALL LETTER NU
0x00ee: 0x03be, # GREEK SMALL LETTER XI
0x00ef: 0x03bf, # GREEK SMALL LETTER OMICRON
0x00f0: 0x03c0, # GREEK SMALL LETTER PI
0x00f1: 0x03c1, # GREEK SMALL LETTER RHO
0x00f2: 0x03c2, # GREEK SMALL LETTER FINAL SIGMA
0x00f3: 0x03c3, # GREEK SMALL LETTER SIGMA
0x00f4: 0x03c4, # GREEK SMALL LETTER TAU
0x00f5: 0x03c5, # GREEK SMALL LETTER UPSILON
0x00f6: 0x03c6, # GREEK SMALL LETTER PHI
0x00f7: 0x03c7, # GREEK SMALL LETTER CHI
0x00f8: 0x03c8, # GREEK SMALL LETTER PSI
0x00f9: 0x03c9, # GREEK SMALL LETTER OMEGA
0x00fa: 0x03ca, # GREEK SMALL LETTER IOTA WITH DIALYTIKA
0x00fb: 0x03cb, # GREEK SMALL LETTER UPSILON WITH DIALYTIKA
0x00fc: 0x03cc, # GREEK SMALL LETTER OMICRON WITH TONOS
0x00fd: 0x03cd, # GREEK SMALL LETTER UPSILON WITH TONOS
0x00fe: 0x03ce, # GREEK SMALL LETTER OMEGA WITH TONOS
0x00ff: None, # UNDEFINED
})
### Encoding Map
encoding_map = {}
for k,v in decoding_map.items():
encoding_map[v] = k

+ 0
- 84
lib/jython/Lib/encodings/cp1254.py View File

@@ -1,84 +0,0 @@
""" Python Character Mapping Codec generated from 'CP1254.TXT' with gencodec.py.
Written by Marc-Andre Lemburg (mal@lemburg.com).
(c) Copyright CNRI, All Rights Reserved. NO WARRANTY.
(c) Copyright 2000 Guido van Rossum.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_map)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_map)
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return (Codec().encode,Codec().decode,StreamReader,StreamWriter)
### Decoding Map
decoding_map = codecs.make_identity_dict(range(256))
decoding_map.update({
0x0080: 0x20ac, # EURO SIGN
0x0081: None, # UNDEFINED
0x0082: 0x201a, # SINGLE LOW-9 QUOTATION MARK
0x0083: 0x0192, # LATIN SMALL LETTER F WITH HOOK
0x0084: 0x201e, # DOUBLE LOW-9 QUOTATION MARK
0x0085: 0x2026, # HORIZONTAL ELLIPSIS
0x0086: 0x2020, # DAGGER
0x0087: 0x2021, # DOUBLE DAGGER
0x0088: 0x02c6, # MODIFIER LETTER CIRCUMFLEX ACCENT
0x0089: 0x2030, # PER MILLE SIGN
0x008a: 0x0160, # LATIN CAPITAL LETTER S WITH CARON
0x008b: 0x2039, # SINGLE LEFT-POINTING ANGLE QUOTATION MARK
0x008c: 0x0152, # LATIN CAPITAL LIGATURE OE
0x008d: None, # UNDEFINED
0x008e: None, # UNDEFINED
0x008f: None, # UNDEFINED
0x0090: None, # UNDEFINED
0x0091: 0x2018, # LEFT SINGLE QUOTATION MARK
0x0092: 0x2019, # RIGHT SINGLE QUOTATION MARK
0x0093: 0x201c, # LEFT DOUBLE QUOTATION MARK
0x0094: 0x201d, # RIGHT DOUBLE QUOTATION MARK
0x0095: 0x2022, # BULLET
0x0096: 0x2013, # EN DASH
0x0097: 0x2014, # EM DASH
0x0098: 0x02dc, # SMALL TILDE
0x0099: 0x2122, # TRADE MARK SIGN
0x009a: 0x0161, # LATIN SMALL LETTER S WITH CARON
0x009b: 0x203a, # SINGLE RIGHT-POINTING ANGLE QUOTATION MARK
0x009c: 0x0153, # LATIN SMALL LIGATURE OE
0x009d: None, # UNDEFINED
0x009e: None, # UNDEFINED
0x009f: 0x0178, # LATIN CAPITAL LETTER Y WITH DIAERESIS
0x00d0: 0x011e, # LATIN CAPITAL LETTER G WITH BREVE
0x00dd: 0x0130, # LATIN CAPITAL LETTER I WITH DOT ABOVE
0x00de: 0x015e, # LATIN CAPITAL LETTER S WITH CEDILLA
0x00f0: 0x011f, # LATIN SMALL LETTER G WITH BREVE
0x00fd: 0x0131, # LATIN SMALL LETTER DOTLESS I
0x00fe: 0x015f, # LATIN SMALL LETTER S WITH CEDILLA
})
### Encoding Map
encoding_map = {}
for k,v in decoding_map.items():
encoding_map[v] = k

+ 0
- 145
lib/jython/Lib/encodings/cp1255.py View File

@@ -1,145 +0,0 @@
""" Python Character Mapping Codec generated from 'CP1255.TXT' with gencodec.py.
Written by Marc-Andre Lemburg (mal@lemburg.com).
(c) Copyright CNRI, All Rights Reserved. NO WARRANTY.
(c) Copyright 2000 Guido van Rossum.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_map)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_map)
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return (Codec().encode,Codec().decode,StreamReader,StreamWriter)
### Decoding Map
decoding_map = codecs.make_identity_dict(range(256))
decoding_map.update({
0x0080: 0x20ac, # EURO SIGN
0x0081: None, # UNDEFINED
0x0082: 0x201a, # SINGLE LOW-9 QUOTATION MARK
0x0083: 0x0192, # LATIN SMALL LETTER F WITH HOOK
0x0084: 0x201e, # DOUBLE LOW-9 QUOTATION MARK
0x0085: 0x2026, # HORIZONTAL ELLIPSIS
0x0086: 0x2020, # DAGGER
0x0087: 0x2021, # DOUBLE DAGGER
0x0088: 0x02c6, # MODIFIER LETTER CIRCUMFLEX ACCENT
0x0089: 0x2030, # PER MILLE SIGN
0x008a: None, # UNDEFINED
0x008b: 0x2039, # SINGLE LEFT-POINTING ANGLE QUOTATION MARK
0x008c: None, # UNDEFINED
0x008d: None, # UNDEFINED
0x008e: None, # UNDEFINED
0x008f: None, # UNDEFINED
0x0090: None, # UNDEFINED
0x0091: 0x2018, # LEFT SINGLE QUOTATION MARK
0x0092: 0x2019, # RIGHT SINGLE QUOTATION MARK
0x0093: 0x201c, # LEFT DOUBLE QUOTATION MARK
0x0094: 0x201d, # RIGHT DOUBLE QUOTATION MARK
0x0095: 0x2022, # BULLET
0x0096: 0x2013, # EN DASH
0x0097: 0x2014, # EM DASH
0x0098: 0x02dc, # SMALL TILDE
0x0099: 0x2122, # TRADE MARK SIGN
0x009a: None, # UNDEFINED
0x009b: 0x203a, # SINGLE RIGHT-POINTING ANGLE QUOTATION MARK
0x009c: None, # UNDEFINED
0x009d: None, # UNDEFINED
0x009e: None, # UNDEFINED
0x009f: None, # UNDEFINED
0x00a4: 0x20aa, # NEW SHEQEL SIGN
0x00aa: 0x00d7, # MULTIPLICATION SIGN
0x00ba: 0x00f7, # DIVISION SIGN
0x00c0: 0x05b0, # HEBREW POINT SHEVA
0x00c1: 0x05b1, # HEBREW POINT HATAF SEGOL
0x00c2: 0x05b2, # HEBREW POINT HATAF PATAH
0x00c3: 0x05b3, # HEBREW POINT HATAF QAMATS
0x00c4: 0x05b4, # HEBREW POINT HIRIQ
0x00c5: 0x05b5, # HEBREW POINT TSERE
0x00c6: 0x05b6, # HEBREW POINT SEGOL
0x00c7: 0x05b7, # HEBREW POINT PATAH
0x00c8: 0x05b8, # HEBREW POINT QAMATS
0x00c9: 0x05b9, # HEBREW POINT HOLAM
0x00ca: None, # UNDEFINED
0x00cb: 0x05bb, # HEBREW POINT QUBUTS
0x00cc: 0x05bc, # HEBREW POINT DAGESH OR MAPIQ
0x00cd: 0x05bd, # HEBREW POINT METEG
0x00ce: 0x05be, # HEBREW PUNCTUATION MAQAF
0x00cf: 0x05bf, # HEBREW POINT RAFE
0x00d0: 0x05c0, # HEBREW PUNCTUATION PASEQ
0x00d1: 0x05c1, # HEBREW POINT SHIN DOT
0x00d2: 0x05c2, # HEBREW POINT SIN DOT
0x00d3: 0x05c3, # HEBREW PUNCTUATION SOF PASUQ
0x00d4: 0x05f0, # HEBREW LIGATURE YIDDISH DOUBLE VAV
0x00d5: 0x05f1, # HEBREW LIGATURE YIDDISH VAV YOD
0x00d6: 0x05f2, # HEBREW LIGATURE YIDDISH DOUBLE YOD
0x00d7: 0x05f3, # HEBREW PUNCTUATION GERESH
0x00d8: 0x05f4, # HEBREW PUNCTUATION GERSHAYIM
0x00d9: None, # UNDEFINED
0x00da: None, # UNDEFINED
0x00db: None, # UNDEFINED
0x00dc: None, # UNDEFINED
0x00dd: None, # UNDEFINED
0x00de: None, # UNDEFINED
0x00df: None, # UNDEFINED
0x00e0: 0x05d0, # HEBREW LETTER ALEF
0x00e1: 0x05d1, # HEBREW LETTER BET
0x00e2: 0x05d2, # HEBREW LETTER GIMEL
0x00e3: 0x05d3, # HEBREW LETTER DALET
0x00e4: 0x05d4, # HEBREW LETTER HE
0x00e5: 0x05d5, # HEBREW LETTER VAV
0x00e6: 0x05d6, # HEBREW LETTER ZAYIN
0x00e7: 0x05d7, # HEBREW LETTER HET
0x00e8: 0x05d8, # HEBREW LETTER TET
0x00e9: 0x05d9, # HEBREW LETTER YOD
0x00ea: 0x05da, # HEBREW LETTER FINAL KAF
0x00eb: 0x05db, # HEBREW LETTER KAF
0x00ec: 0x05dc, # HEBREW LETTER LAMED
0x00ed: 0x05dd, # HEBREW LETTER FINAL MEM
0x00ee: 0x05de, # HEBREW LETTER MEM
0x00ef: 0x05df, # HEBREW LETTER FINAL NUN
0x00f0: 0x05e0, # HEBREW LETTER NUN
0x00f1: 0x05e1, # HEBREW LETTER SAMEKH
0x00f2: 0x05e2, # HEBREW LETTER AYIN
0x00f3: 0x05e3, # HEBREW LETTER FINAL PE
0x00f4: 0x05e4, # HEBREW LETTER PE
0x00f5: 0x05e5, # HEBREW LETTER FINAL TSADI
0x00f6: 0x05e6, # HEBREW LETTER TSADI
0x00f7: 0x05e7, # HEBREW LETTER QOF
0x00f8: 0x05e8, # HEBREW LETTER RESH
0x00f9: 0x05e9, # HEBREW LETTER SHIN
0x00fa: 0x05ea, # HEBREW LETTER TAV
0x00fb: None, # UNDEFINED
0x00fc: None, # UNDEFINED
0x00fd: 0x200e, # LEFT-TO-RIGHT MARK
0x00fe: 0x200f, # RIGHT-TO-LEFT MARK
0x00ff: None, # UNDEFINED
})
### Encoding Map
encoding_map = {}
for k,v in decoding_map.items():
encoding_map[v] = k

+ 0
- 131
lib/jython/Lib/encodings/cp1256.py View File

@@ -1,131 +0,0 @@
""" Python Character Mapping Codec generated from 'CP1256.TXT' with gencodec.py.
Written by Marc-Andre Lemburg (mal@lemburg.com).
(c) Copyright CNRI, All Rights Reserved. NO WARRANTY.
(c) Copyright 2000 Guido van Rossum.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_map)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_map)
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return (Codec().encode,Codec().decode,StreamReader,StreamWriter)
### Decoding Map
decoding_map = codecs.make_identity_dict(range(256))
decoding_map.update({
0x0080: 0x20ac, # EURO SIGN
0x0081: 0x067e, # ARABIC LETTER PEH
0x0082: 0x201a, # SINGLE LOW-9 QUOTATION MARK
0x0083: 0x0192, # LATIN SMALL LETTER F WITH HOOK
0x0084: 0x201e, # DOUBLE LOW-9 QUOTATION MARK
0x0085: 0x2026, # HORIZONTAL ELLIPSIS
0x0086: 0x2020, # DAGGER
0x0087: 0x2021, # DOUBLE DAGGER
0x0088: 0x02c6, # MODIFIER LETTER CIRCUMFLEX ACCENT
0x0089: 0x2030, # PER MILLE SIGN
0x008a: 0x0679, # ARABIC LETTER TTEH
0x008b: 0x2039, # SINGLE LEFT-POINTING ANGLE QUOTATION MARK
0x008c: 0x0152, # LATIN CAPITAL LIGATURE OE
0x008d: 0x0686, # ARABIC LETTER TCHEH
0x008e: 0x0698, # ARABIC LETTER JEH
0x008f: 0x0688, # ARABIC LETTER DDAL
0x0090: 0x06af, # ARABIC LETTER GAF
0x0091: 0x2018, # LEFT SINGLE QUOTATION MARK
0x0092: 0x2019, # RIGHT SINGLE QUOTATION MARK
0x0093: 0x201c, # LEFT DOUBLE QUOTATION MARK
0x0094: 0x201d, # RIGHT DOUBLE QUOTATION MARK
0x0095: 0x2022, # BULLET
0x0096: 0x2013, # EN DASH
0x0097: 0x2014, # EM DASH
0x0098: 0x06a9, # ARABIC LETTER KEHEH
0x0099: 0x2122, # TRADE MARK SIGN
0x009a: 0x0691, # ARABIC LETTER RREH
0x009b: 0x203a, # SINGLE RIGHT-POINTING ANGLE QUOTATION MARK
0x009c: 0x0153, # LATIN SMALL LIGATURE OE
0x009d: 0x200c, # ZERO WIDTH NON-JOINER
0x009e: 0x200d, # ZERO WIDTH JOINER
0x009f: 0x06ba, # ARABIC LETTER NOON GHUNNA
0x00a1: 0x060c, # ARABIC COMMA
0x00aa: 0x06be, # ARABIC LETTER HEH DOACHASHMEE
0x00ba: 0x061b, # ARABIC SEMICOLON
0x00bf: 0x061f, # ARABIC QUESTION MARK
0x00c0: 0x06c1, # ARABIC LETTER HEH GOAL
0x00c1: 0x0621, # ARABIC LETTER HAMZA
0x00c2: 0x0622, # ARABIC LETTER ALEF WITH MADDA ABOVE
0x00c3: 0x0623, # ARABIC LETTER ALEF WITH HAMZA ABOVE
0x00c4: 0x0624, # ARABIC LETTER WAW WITH HAMZA ABOVE
0x00c5: 0x0625, # ARABIC LETTER ALEF WITH HAMZA BELOW
0x00c6: 0x0626, # ARABIC LETTER YEH WITH HAMZA ABOVE
0x00c7: 0x0627, # ARABIC LETTER ALEF
0x00c8: 0x0628, # ARABIC LETTER BEH
0x00c9: 0x0629, # ARABIC LETTER TEH MARBUTA
0x00ca: 0x062a, # ARABIC LETTER TEH
0x00cb: 0x062b, # ARABIC LETTER THEH
0x00cc: 0x062c, # ARABIC LETTER JEEM
0x00cd: 0x062d, # ARABIC LETTER HAH
0x00ce: 0x062e, # ARABIC LETTER KHAH
0x00cf: 0x062f, # ARABIC LETTER DAL
0x00d0: 0x0630, # ARABIC LETTER THAL
0x00d1: 0x0631, # ARABIC LETTER REH
0x00d2: 0x0632, # ARABIC LETTER ZAIN
0x00d3: 0x0633, # ARABIC LETTER SEEN
0x00d4: 0x0634, # ARABIC LETTER SHEEN
0x00d5: 0x0635, # ARABIC LETTER SAD
0x00d6: 0x0636, # ARABIC LETTER DAD
0x00d8: 0x0637, # ARABIC LETTER TAH
0x00d9: 0x0638, # ARABIC LETTER ZAH
0x00da: 0x0639, # ARABIC LETTER AIN
0x00db: 0x063a, # ARABIC LETTER GHAIN
0x00dc: 0x0640, # ARABIC TATWEEL
0x00dd: 0x0641, # ARABIC LETTER FEH
0x00de: 0x0642, # ARABIC LETTER QAF
0x00df: 0x0643, # ARABIC LETTER KAF
0x00e1: 0x0644, # ARABIC LETTER LAM
0x00e3: 0x0645, # ARABIC LETTER MEEM
0x00e4: 0x0646, # ARABIC LETTER NOON
0x00e5: 0x0647, # ARABIC LETTER HEH
0x00e6: 0x0648, # ARABIC LETTER WAW
0x00ec: 0x0649, # ARABIC LETTER ALEF MAKSURA
0x00ed: 0x064a, # ARABIC LETTER YEH
0x00f0: 0x064b, # ARABIC FATHATAN
0x00f1: 0x064c, # ARABIC DAMMATAN
0x00f2: 0x064d, # ARABIC KASRATAN
0x00f3: 0x064e, # ARABIC FATHA
0x00f5: 0x064f, # ARABIC DAMMA
0x00f6: 0x0650, # ARABIC KASRA
0x00f8: 0x0651, # ARABIC SHADDA
0x00fa: 0x0652, # ARABIC SUKUN
0x00fd: 0x200e, # LEFT-TO-RIGHT MARK
0x00fe: 0x200f, # RIGHT-TO-LEFT MARK
0x00ff: 0x06d2, # ARABIC LETTER YEH BARREE
})
### Encoding Map
encoding_map = {}
for k,v in decoding_map.items():
encoding_map[v] = k

+ 0
- 133
lib/jython/Lib/encodings/cp1257.py View File

@@ -1,133 +0,0 @@
""" Python Character Mapping Codec generated from 'CP1257.TXT' with gencodec.py.
Written by Marc-Andre Lemburg (mal@lemburg.com).
(c) Copyright CNRI, All Rights Reserved. NO WARRANTY.
(c) Copyright 2000 Guido van Rossum.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_map)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_map)
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return (Codec().encode,Codec().decode,StreamReader,StreamWriter)
### Decoding Map
decoding_map = codecs.make_identity_dict(range(256))
decoding_map.update({
0x0080: 0x20ac, # EURO SIGN
0x0081: None, # UNDEFINED
0x0082: 0x201a, # SINGLE LOW-9 QUOTATION MARK
0x0083: None, # UNDEFINED
0x0084: 0x201e, # DOUBLE LOW-9 QUOTATION MARK
0x0085: 0x2026, # HORIZONTAL ELLIPSIS
0x0086: 0x2020, # DAGGER
0x0087: 0x2021, # DOUBLE DAGGER
0x0088: None, # UNDEFINED
0x0089: 0x2030, # PER MILLE SIGN
0x008a: None, # UNDEFINED
0x008b: 0x2039, # SINGLE LEFT-POINTING ANGLE QUOTATION MARK
0x008c: None, # UNDEFINED
0x008d: 0x00a8, # DIAERESIS
0x008e: 0x02c7, # CARON
0x008f: 0x00b8, # CEDILLA
0x0090: None, # UNDEFINED
0x0091: 0x2018, # LEFT SINGLE QUOTATION MARK
0x0092: 0x2019, # RIGHT SINGLE QUOTATION MARK
0x0093: 0x201c, # LEFT DOUBLE QUOTATION MARK
0x0094: 0x201d, # RIGHT DOUBLE QUOTATION MARK
0x0095: 0x2022, # BULLET
0x0096: 0x2013, # EN DASH
0x0097: 0x2014, # EM DASH
0x0098: None, # UNDEFINED
0x0099: 0x2122, # TRADE MARK SIGN
0x009a: None, # UNDEFINED
0x009b: 0x203a, # SINGLE RIGHT-POINTING ANGLE QUOTATION MARK
0x009c: None, # UNDEFINED
0x009d: 0x00af, # MACRON
0x009e: 0x02db, # OGONEK
0x009f: None, # UNDEFINED
0x00a1: None, # UNDEFINED
0x00a5: None, # UNDEFINED
0x00a8: 0x00d8, # LATIN CAPITAL LETTER O WITH STROKE
0x00aa: 0x0156, # LATIN CAPITAL LETTER R WITH CEDILLA
0x00af: 0x00c6, # LATIN CAPITAL LETTER AE
0x00b8: 0x00f8, # LATIN SMALL LETTER O WITH STROKE
0x00ba: 0x0157, # LATIN SMALL LETTER R WITH CEDILLA
0x00bf: 0x00e6, # LATIN SMALL LETTER AE
0x00c0: 0x0104, # LATIN CAPITAL LETTER A WITH OGONEK
0x00c1: 0x012e, # LATIN CAPITAL LETTER I WITH OGONEK
0x00c2: 0x0100, # LATIN CAPITAL LETTER A WITH MACRON
0x00c3: 0x0106, # LATIN CAPITAL LETTER C WITH ACUTE
0x00c6: 0x0118, # LATIN CAPITAL LETTER E WITH OGONEK
0x00c7: 0x0112, # LATIN CAPITAL LETTER E WITH MACRON
0x00c8: 0x010c, # LATIN CAPITAL LETTER C WITH CARON
0x00ca: 0x0179, # LATIN CAPITAL LETTER Z WITH ACUTE
0x00cb: 0x0116, # LATIN CAPITAL LETTER E WITH DOT ABOVE
0x00cc: 0x0122, # LATIN CAPITAL LETTER G WITH CEDILLA
0x00cd: 0x0136, # LATIN CAPITAL LETTER K WITH CEDILLA
0x00ce: 0x012a, # LATIN CAPITAL LETTER I WITH MACRON
0x00cf: 0x013b, # LATIN CAPITAL LETTER L WITH CEDILLA
0x00d0: 0x0160, # LATIN CAPITAL LETTER S WITH CARON
0x00d1: 0x0143, # LATIN CAPITAL LETTER N WITH ACUTE
0x00d2: 0x0145, # LATIN CAPITAL LETTER N WITH CEDILLA
0x00d4: 0x014c, # LATIN CAPITAL LETTER O WITH MACRON
0x00d8: 0x0172, # LATIN CAPITAL LETTER U WITH OGONEK
0x00d9: 0x0141, # LATIN CAPITAL LETTER L WITH STROKE
0x00da: 0x015a, # LATIN CAPITAL LETTER S WITH ACUTE
0x00db: 0x016a, # LATIN CAPITAL LETTER U WITH MACRON
0x00dd: 0x017b, # LATIN CAPITAL LETTER Z WITH DOT ABOVE
0x00de: 0x017d, # LATIN CAPITAL LETTER Z WITH CARON
0x00e0: 0x0105, # LATIN SMALL LETTER A WITH OGONEK
0x00e1: 0x012f, # LATIN SMALL LETTER I WITH OGONEK
0x00e2: 0x0101, # LATIN SMALL LETTER A WITH MACRON
0x00e3: 0x0107, # LATIN SMALL LETTER C WITH ACUTE
0x00e6: 0x0119, # LATIN SMALL LETTER E WITH OGONEK
0x00e7: 0x0113, # LATIN SMALL LETTER E WITH MACRON
0x00e8: 0x010d, # LATIN SMALL LETTER C WITH CARON
0x00ea: 0x017a, # LATIN SMALL LETTER Z WITH ACUTE
0x00eb: 0x0117, # LATIN SMALL LETTER E WITH DOT ABOVE
0x00ec: 0x0123, # LATIN SMALL LETTER G WITH CEDILLA
0x00ed: 0x0137, # LATIN SMALL LETTER K WITH CEDILLA
0x00ee: 0x012b, # LATIN SMALL LETTER I WITH MACRON
0x00ef: 0x013c, # LATIN SMALL LETTER L WITH CEDILLA
0x00f0: 0x0161, # LATIN SMALL LETTER S WITH CARON
0x00f1: 0x0144, # LATIN SMALL LETTER N WITH ACUTE
0x00f2: 0x0146, # LATIN SMALL LETTER N WITH CEDILLA
0x00f4: 0x014d, # LATIN SMALL LETTER O WITH MACRON
0x00f8: 0x0173, # LATIN SMALL LETTER U WITH OGONEK
0x00f9: 0x0142, # LATIN SMALL LETTER L WITH STROKE
0x00fa: 0x015b, # LATIN SMALL LETTER S WITH ACUTE
0x00fb: 0x016b, # LATIN SMALL LETTER U WITH MACRON
0x00fd: 0x017c, # LATIN SMALL LETTER Z WITH DOT ABOVE
0x00fe: 0x017e, # LATIN SMALL LETTER Z WITH CARON
0x00ff: 0x02d9, # DOT ABOVE
})
### Encoding Map
encoding_map = {}
for k,v in decoding_map.items():
encoding_map[v] = k

+ 0
- 92
lib/jython/Lib/encodings/cp1258.py View File

@@ -1,92 +0,0 @@
""" Python Character Mapping Codec generated from 'CP1258.TXT' with gencodec.py.
Written by Marc-Andre Lemburg (mal@lemburg.com).
(c) Copyright CNRI, All Rights Reserved. NO WARRANTY.
(c) Copyright 2000 Guido van Rossum.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_map)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_map)
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return (Codec().encode,Codec().decode,StreamReader,StreamWriter)
### Decoding Map
decoding_map = codecs.make_identity_dict(range(256))
decoding_map.update({
0x0080: 0x20ac, # EURO SIGN
0x0081: None, # UNDEFINED
0x0082: 0x201a, # SINGLE LOW-9 QUOTATION MARK
0x0083: 0x0192, # LATIN SMALL LETTER F WITH HOOK
0x0084: 0x201e, # DOUBLE LOW-9 QUOTATION MARK
0x0085: 0x2026, # HORIZONTAL ELLIPSIS
0x0086: 0x2020, # DAGGER
0x0087: 0x2021, # DOUBLE DAGGER
0x0088: 0x02c6, # MODIFIER LETTER CIRCUMFLEX ACCENT
0x0089: 0x2030, # PER MILLE SIGN
0x008a: None, # UNDEFINED
0x008b: 0x2039, # SINGLE LEFT-POINTING ANGLE QUOTATION MARK
0x008c: 0x0152, # LATIN CAPITAL LIGATURE OE
0x008d: None, # UNDEFINED
0x008e: None, # UNDEFINED
0x008f: None, # UNDEFINED
0x0090: None, # UNDEFINED
0x0091: 0x2018, # LEFT SINGLE QUOTATION MARK
0x0092: 0x2019, # RIGHT SINGLE QUOTATION MARK
0x0093: 0x201c, # LEFT DOUBLE QUOTATION MARK
0x0094: 0x201d, # RIGHT DOUBLE QUOTATION MARK
0x0095: 0x2022, # BULLET
0x0096: 0x2013, # EN DASH
0x0097: 0x2014, # EM DASH
0x0098: 0x02dc, # SMALL TILDE
0x0099: 0x2122, # TRADE MARK SIGN
0x009a: None, # UNDEFINED
0x009b: 0x203a, # SINGLE RIGHT-POINTING ANGLE QUOTATION MARK
0x009c: 0x0153, # LATIN SMALL LIGATURE OE
0x009d: None, # UNDEFINED
0x009e: None, # UNDEFINED
0x009f: 0x0178, # LATIN CAPITAL LETTER Y WITH DIAERESIS
0x00c3: 0x0102, # LATIN CAPITAL LETTER A WITH BREVE
0x00cc: 0x0300, # COMBINING GRAVE ACCENT
0x00d0: 0x0110, # LATIN CAPITAL LETTER D WITH STROKE
0x00d2: 0x0309, # COMBINING HOOK ABOVE
0x00d5: 0x01a0, # LATIN CAPITAL LETTER O WITH HORN
0x00dd: 0x01af, # LATIN CAPITAL LETTER U WITH HORN
0x00de: 0x0303, # COMBINING TILDE
0x00e3: 0x0103, # LATIN SMALL LETTER A WITH BREVE
0x00ec: 0x0301, # COMBINING ACUTE ACCENT
0x00f0: 0x0111, # LATIN SMALL LETTER D WITH STROKE
0x00f2: 0x0323, # COMBINING DOT BELOW
0x00f5: 0x01a1, # LATIN SMALL LETTER O WITH HORN
0x00fd: 0x01b0, # LATIN SMALL LETTER U WITH HORN
0x00fe: 0x20ab, # DONG SIGN
})
### Encoding Map
encoding_map = {}
for k,v in decoding_map.items():
encoding_map[v] = k

+ 0
- 282
lib/jython/Lib/encodings/cp424.py View File

@@ -1,282 +0,0 @@
""" Python Character Mapping Codec generated from 'CP424.TXT' with gencodec.py.
Written by Marc-Andre Lemburg (mal@lemburg.com).
(c) Copyright CNRI, All Rights Reserved. NO WARRANTY.
(c) Copyright 2000 Guido van Rossum.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_map)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_map)
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return (Codec().encode,Codec().decode,StreamReader,StreamWriter)
### Decoding Map
decoding_map = codecs.make_identity_dict(range(256))
decoding_map.update({
0x0004: 0x009c, # SELECT
0x0005: 0x0009, # HORIZONTAL TABULATION
0x0006: 0x0086, # REQUIRED NEW LINE
0x0007: 0x007f, # DELETE
0x0008: 0x0097, # GRAPHIC ESCAPE
0x0009: 0x008d, # SUPERSCRIPT
0x000a: 0x008e, # REPEAT
0x0014: 0x009d, # RESTORE/ENABLE PRESENTATION
0x0015: 0x0085, # NEW LINE
0x0016: 0x0008, # BACKSPACE
0x0017: 0x0087, # PROGRAM OPERATOR COMMUNICATION
0x001a: 0x0092, # UNIT BACK SPACE
0x001b: 0x008f, # CUSTOMER USE ONE
0x0020: 0x0080, # DIGIT SELECT
0x0021: 0x0081, # START OF SIGNIFICANCE
0x0022: 0x0082, # FIELD SEPARATOR
0x0023: 0x0083, # WORD UNDERSCORE
0x0024: 0x0084, # BYPASS OR INHIBIT PRESENTATION
0x0025: 0x000a, # LINE FEED
0x0026: 0x0017, # END OF TRANSMISSION BLOCK
0x0027: 0x001b, # ESCAPE
0x0028: 0x0088, # SET ATTRIBUTE
0x0029: 0x0089, # START FIELD EXTENDED
0x002a: 0x008a, # SET MODE OR SWITCH
0x002b: 0x008b, # CONTROL SEQUENCE PREFIX
0x002c: 0x008c, # MODIFY FIELD ATTRIBUTE
0x002d: 0x0005, # ENQUIRY
0x002e: 0x0006, # ACKNOWLEDGE
0x002f: 0x0007, # BELL
0x0030: 0x0090, # <reserved>
0x0031: 0x0091, # <reserved>
0x0032: 0x0016, # SYNCHRONOUS IDLE
0x0033: 0x0093, # INDEX RETURN
0x0034: 0x0094, # PRESENTATION POSITION
0x0035: 0x0095, # TRANSPARENT
0x0036: 0x0096, # NUMERIC BACKSPACE
0x0037: 0x0004, # END OF TRANSMISSION
0x0038: 0x0098, # SUBSCRIPT
0x0039: 0x0099, # INDENT TABULATION
0x003a: 0x009a, # REVERSE FORM FEED
0x003b: 0x009b, # CUSTOMER USE THREE
0x003c: 0x0014, # DEVICE CONTROL FOUR
0x003d: 0x0015, # NEGATIVE ACKNOWLEDGE
0x003e: 0x009e, # <reserved>
0x003f: 0x001a, # SUBSTITUTE
0x0040: 0x0020, # SPACE
0x0041: 0x05d0, # HEBREW LETTER ALEF
0x0042: 0x05d1, # HEBREW LETTER BET
0x0043: 0x05d2, # HEBREW LETTER GIMEL
0x0044: 0x05d3, # HEBREW LETTER DALET
0x0045: 0x05d4, # HEBREW LETTER HE
0x0046: 0x05d5, # HEBREW LETTER VAV
0x0047: 0x05d6, # HEBREW LETTER ZAYIN
0x0048: 0x05d7, # HEBREW LETTER HET
0x0049: 0x05d8, # HEBREW LETTER TET
0x004a: 0x00a2, # CENT SIGN
0x004b: 0x002e, # FULL STOP
0x004c: 0x003c, # LESS-THAN SIGN
0x004d: 0x0028, # LEFT PARENTHESIS
0x004e: 0x002b, # PLUS SIGN
0x004f: 0x007c, # VERTICAL LINE
0x0050: 0x0026, # AMPERSAND
0x0051: 0x05d9, # HEBREW LETTER YOD
0x0052: 0x05da, # HEBREW LETTER FINAL KAF
0x0053: 0x05db, # HEBREW LETTER KAF
0x0054: 0x05dc, # HEBREW LETTER LAMED
0x0055: 0x05dd, # HEBREW LETTER FINAL MEM
0x0056: 0x05de, # HEBREW LETTER MEM
0x0057: 0x05df, # HEBREW LETTER FINAL NUN
0x0058: 0x05e0, # HEBREW LETTER NUN
0x0059: 0x05e1, # HEBREW LETTER SAMEKH
0x005a: 0x0021, # EXCLAMATION MARK
0x005b: 0x0024, # DOLLAR SIGN
0x005c: 0x002a, # ASTERISK
0x005d: 0x0029, # RIGHT PARENTHESIS
0x005e: 0x003b, # SEMICOLON
0x005f: 0x00ac, # NOT SIGN
0x0060: 0x002d, # HYPHEN-MINUS
0x0061: 0x002f, # SOLIDUS
0x0062: 0x05e2, # HEBREW LETTER AYIN
0x0063: 0x05e3, # HEBREW LETTER FINAL PE
0x0064: 0x05e4, # HEBREW LETTER PE
0x0065: 0x05e5, # HEBREW LETTER FINAL TSADI
0x0066: 0x05e6, # HEBREW LETTER TSADI
0x0067: 0x05e7, # HEBREW LETTER QOF
0x0068: 0x05e8, # HEBREW LETTER RESH
0x0069: 0x05e9, # HEBREW LETTER SHIN
0x006a: 0x00a6, # BROKEN BAR
0x006b: 0x002c, # COMMA
0x006c: 0x0025, # PERCENT SIGN
0x006d: 0x005f, # LOW LINE
0x006e: 0x003e, # GREATER-THAN SIGN
0x006f: 0x003f, # QUESTION MARK
0x0070: None, # UNDEFINED
0x0071: 0x05ea, # HEBREW LETTER TAV
0x0072: None, # UNDEFINED
0x0073: None, # UNDEFINED
0x0074: 0x00a0, # NO-BREAK SPACE
0x0075: None, # UNDEFINED
0x0076: None, # UNDEFINED
0x0077: None, # UNDEFINED
0x0078: 0x2017, # DOUBLE LOW LINE
0x0079: 0x0060, # GRAVE ACCENT
0x007a: 0x003a, # COLON
0x007b: 0x0023, # NUMBER SIGN
0x007c: 0x0040, # COMMERCIAL AT
0x007d: 0x0027, # APOSTROPHE
0x007e: 0x003d, # EQUALS SIGN
0x007f: 0x0022, # QUOTATION MARK
0x0080: None, # UNDEFINED
0x0081: 0x0061, # LATIN SMALL LETTER A
0x0082: 0x0062, # LATIN SMALL LETTER B
0x0083: 0x0063, # LATIN SMALL LETTER C
0x0084: 0x0064, # LATIN SMALL LETTER D
0x0085: 0x0065, # LATIN SMALL LETTER E
0x0086: 0x0066, # LATIN SMALL LETTER F
0x0087: 0x0067, # LATIN SMALL LETTER G
0x0088: 0x0068, # LATIN SMALL LETTER H
0x0089: 0x0069, # LATIN SMALL LETTER I
0x008a: 0x00ab, # LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
0x008b: 0x00bb, # RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
0x008c: None, # UNDEFINED
0x008d: None, # UNDEFINED
0x008e: None, # UNDEFINED
0x008f: 0x00b1, # PLUS-MINUS SIGN
0x0090: 0x00b0, # DEGREE SIGN
0x0091: 0x006a, # LATIN SMALL LETTER J
0x0092: 0x006b, # LATIN SMALL LETTER K
0x0093: 0x006c, # LATIN SMALL LETTER L
0x0094: 0x006d, # LATIN SMALL LETTER M
0x0095: 0x006e, # LATIN SMALL LETTER N
0x0096: 0x006f, # LATIN SMALL LETTER O
0x0097: 0x0070, # LATIN SMALL LETTER P
0x0098: 0x0071, # LATIN SMALL LETTER Q
0x0099: 0x0072, # LATIN SMALL LETTER R
0x009a: None, # UNDEFINED
0x009b: None, # UNDEFINED
0x009c: None, # UNDEFINED
0x009d: 0x00b8, # CEDILLA
0x009e: None, # UNDEFINED
0x009f: 0x00a4, # CURRENCY SIGN
0x00a0: 0x00b5, # MICRO SIGN
0x00a1: 0x007e, # TILDE
0x00a2: 0x0073, # LATIN SMALL LETTER S
0x00a3: 0x0074, # LATIN SMALL LETTER T
0x00a4: 0x0075, # LATIN SMALL LETTER U
0x00a5: 0x0076, # LATIN SMALL LETTER V
0x00a6: 0x0077, # LATIN SMALL LETTER W
0x00a7: 0x0078, # LATIN SMALL LETTER X
0x00a8: 0x0079, # LATIN SMALL LETTER Y
0x00a9: 0x007a, # LATIN SMALL LETTER Z
0x00aa: None, # UNDEFINED
0x00ab: None, # UNDEFINED
0x00ac: None, # UNDEFINED
0x00ad: None, # UNDEFINED
0x00ae: None, # UNDEFINED
0x00af: 0x00ae, # REGISTERED SIGN
0x00b0: 0x005e, # CIRCUMFLEX ACCENT
0x00b1: 0x00a3, # POUND SIGN
0x00b2: 0x00a5, # YEN SIGN
0x00b3: 0x00b7, # MIDDLE DOT
0x00b4: 0x00a9, # COPYRIGHT SIGN
0x00b5: 0x00a7, # SECTION SIGN
0x00b7: 0x00bc, # VULGAR FRACTION ONE QUARTER
0x00b8: 0x00bd, # VULGAR FRACTION ONE HALF
0x00b9: 0x00be, # VULGAR FRACTION THREE QUARTERS
0x00ba: 0x005b, # LEFT SQUARE BRACKET
0x00bb: 0x005d, # RIGHT SQUARE BRACKET
0x00bc: 0x00af, # MACRON
0x00bd: 0x00a8, # DIAERESIS
0x00be: 0x00b4, # ACUTE ACCENT
0x00bf: 0x00d7, # MULTIPLICATION SIGN
0x00c0: 0x007b, # LEFT CURLY BRACKET
0x00c1: 0x0041, # LATIN CAPITAL LETTER A
0x00c2: 0x0042, # LATIN CAPITAL LETTER B
0x00c3: 0x0043, # LATIN CAPITAL LETTER C
0x00c4: 0x0044, # LATIN CAPITAL LETTER D
0x00c5: 0x0045, # LATIN CAPITAL LETTER E
0x00c6: 0x0046, # LATIN CAPITAL LETTER F
0x00c7: 0x0047, # LATIN CAPITAL LETTER G
0x00c8: 0x0048, # LATIN CAPITAL LETTER H
0x00c9: 0x0049, # LATIN CAPITAL LETTER I
0x00ca: 0x00ad, # SOFT HYPHEN
0x00cb: None, # UNDEFINED
0x00cc: None, # UNDEFINED
0x00cd: None, # UNDEFINED
0x00ce: None, # UNDEFINED
0x00cf: None, # UNDEFINED
0x00d0: 0x007d, # RIGHT CURLY BRACKET
0x00d1: 0x004a, # LATIN CAPITAL LETTER J
0x00d2: 0x004b, # LATIN CAPITAL LETTER K
0x00d3: 0x004c, # LATIN CAPITAL LETTER L
0x00d4: 0x004d, # LATIN CAPITAL LETTER M
0x00d5: 0x004e, # LATIN CAPITAL LETTER N
0x00d6: 0x004f, # LATIN CAPITAL LETTER O
0x00d7: 0x0050, # LATIN CAPITAL LETTER P
0x00d8: 0x0051, # LATIN CAPITAL LETTER Q
0x00d9: 0x0052, # LATIN CAPITAL LETTER R
0x00da: 0x00b9, # SUPERSCRIPT ONE
0x00db: None, # UNDEFINED
0x00dc: None, # UNDEFINED
0x00dd: None, # UNDEFINED
0x00de: None, # UNDEFINED
0x00df: None, # UNDEFINED
0x00e0: 0x005c, # REVERSE SOLIDUS
0x00e1: 0x00f7, # DIVISION SIGN
0x00e2: 0x0053, # LATIN CAPITAL LETTER S
0x00e3: 0x0054, # LATIN CAPITAL LETTER T
0x00e4: 0x0055, # LATIN CAPITAL LETTER U
0x00e5: 0x0056, # LATIN CAPITAL LETTER V
0x00e6: 0x0057, # LATIN CAPITAL LETTER W
0x00e7: 0x0058, # LATIN CAPITAL LETTER X
0x00e8: 0x0059, # LATIN CAPITAL LETTER Y
0x00e9: 0x005a, # LATIN CAPITAL LETTER Z
0x00ea: 0x00b2, # SUPERSCRIPT TWO
0x00eb: None, # UNDEFINED
0x00ec: None, # UNDEFINED
0x00ed: None, # UNDEFINED
0x00ee: None, # UNDEFINED
0x00ef: None, # UNDEFINED
0x00f0: 0x0030, # DIGIT ZERO
0x00f1: 0x0031, # DIGIT ONE
0x00f2: 0x0032, # DIGIT TWO
0x00f3: 0x0033, # DIGIT THREE
0x00f4: 0x0034, # DIGIT FOUR
0x00f5: 0x0035, # DIGIT FIVE
0x00f6: 0x0036, # DIGIT SIX
0x00f7: 0x0037, # DIGIT SEVEN
0x00f8: 0x0038, # DIGIT EIGHT
0x00f9: 0x0039, # DIGIT NINE
0x00fa: 0x00b3, # SUPERSCRIPT THREE
0x00fb: None, # UNDEFINED
0x00fc: None, # UNDEFINED
0x00fd: None, # UNDEFINED
0x00fe: None, # UNDEFINED
0x00ff: 0x009f, # EIGHT ONES
})
### Encoding Map
encoding_map = {}
for k,v in decoding_map.items():
encoding_map[v] = k

+ 0
- 174
lib/jython/Lib/encodings/cp437.py View File

@@ -1,174 +0,0 @@
""" Python Character Mapping Codec generated from 'CP437.TXT' with gencodec.py.
Written by Marc-Andre Lemburg (mal@lemburg.com).
(c) Copyright CNRI, All Rights Reserved. NO WARRANTY.
(c) Copyright 2000 Guido van Rossum.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_map)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_map)
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return (Codec().encode,Codec().decode,StreamReader,StreamWriter)
### Decoding Map
decoding_map = codecs.make_identity_dict(range(256))
decoding_map.update({
0x0080: 0x00c7, # LATIN CAPITAL LETTER C WITH CEDILLA
0x0081: 0x00fc, # LATIN SMALL LETTER U WITH DIAERESIS
0x0082: 0x00e9, # LATIN SMALL LETTER E WITH ACUTE
0x0083: 0x00e2, # LATIN SMALL LETTER A WITH CIRCUMFLEX
0x0084: 0x00e4, # LATIN SMALL LETTER A WITH DIAERESIS
0x0085: 0x00e0, # LATIN SMALL LETTER A WITH GRAVE
0x0086: 0x00e5, # LATIN SMALL LETTER A WITH RING ABOVE
0x0087: 0x00e7, # LATIN SMALL LETTER C WITH CEDILLA
0x0088: 0x00ea, # LATIN SMALL LETTER E WITH CIRCUMFLEX
0x0089: 0x00eb, # LATIN SMALL LETTER E WITH DIAERESIS
0x008a: 0x00e8, # LATIN SMALL LETTER E WITH GRAVE
0x008b: 0x00ef, # LATIN SMALL LETTER I WITH DIAERESIS
0x008c: 0x00ee, # LATIN SMALL LETTER I WITH CIRCUMFLEX
0x008d: 0x00ec, # LATIN SMALL LETTER I WITH GRAVE
0x008e: 0x00c4, # LATIN CAPITAL LETTER A WITH DIAERESIS
0x008f: 0x00c5, # LATIN CAPITAL LETTER A WITH RING ABOVE
0x0090: 0x00c9, # LATIN CAPITAL LETTER E WITH ACUTE
0x0091: 0x00e6, # LATIN SMALL LIGATURE AE
0x0092: 0x00c6, # LATIN CAPITAL LIGATURE AE
0x0093: 0x00f4, # LATIN SMALL LETTER O WITH CIRCUMFLEX
0x0094: 0x00f6, # LATIN SMALL LETTER O WITH DIAERESIS
0x0095: 0x00f2, # LATIN SMALL LETTER O WITH GRAVE
0x0096: 0x00fb, # LATIN SMALL LETTER U WITH CIRCUMFLEX
0x0097: 0x00f9, # LATIN SMALL LETTER U WITH GRAVE
0x0098: 0x00ff, # LATIN SMALL LETTER Y WITH DIAERESIS
0x0099: 0x00d6, # LATIN CAPITAL LETTER O WITH DIAERESIS
0x009a: 0x00dc, # LATIN CAPITAL LETTER U WITH DIAERESIS
0x009b: 0x00a2, # CENT SIGN
0x009c: 0x00a3, # POUND SIGN
0x009d: 0x00a5, # YEN SIGN
0x009e: 0x20a7, # PESETA SIGN
0x009f: 0x0192, # LATIN SMALL LETTER F WITH HOOK
0x00a0: 0x00e1, # LATIN SMALL LETTER A WITH ACUTE
0x00a1: 0x00ed, # LATIN SMALL LETTER I WITH ACUTE
0x00a2: 0x00f3, # LATIN SMALL LETTER O WITH ACUTE
0x00a3: 0x00fa, # LATIN SMALL LETTER U WITH ACUTE
0x00a4: 0x00f1, # LATIN SMALL LETTER N WITH TILDE
0x00a5: 0x00d1, # LATIN CAPITAL LETTER N WITH TILDE
0x00a6: 0x00aa, # FEMININE ORDINAL INDICATOR
0x00a7: 0x00ba, # MASCULINE ORDINAL INDICATOR
0x00a8: 0x00bf, # INVERTED QUESTION MARK
0x00a9: 0x2310, # REVERSED NOT SIGN
0x00aa: 0x00ac, # NOT SIGN
0x00ab: 0x00bd, # VULGAR FRACTION ONE HALF
0x00ac: 0x00bc, # VULGAR FRACTION ONE QUARTER
0x00ad: 0x00a1, # INVERTED EXCLAMATION MARK
0x00ae: 0x00ab, # LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00af: 0x00bb, # RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00b0: 0x2591, # LIGHT SHADE
0x00b1: 0x2592, # MEDIUM SHADE
0x00b2: 0x2593, # DARK SHADE
0x00b3: 0x2502, # BOX DRAWINGS LIGHT VERTICAL
0x00b4: 0x2524, # BOX DRAWINGS LIGHT VERTICAL AND LEFT
0x00b5: 0x2561, # BOX DRAWINGS VERTICAL SINGLE AND LEFT DOUBLE
0x00b6: 0x2562, # BOX DRAWINGS VERTICAL DOUBLE AND LEFT SINGLE
0x00b7: 0x2556, # BOX DRAWINGS DOWN DOUBLE AND LEFT SINGLE
0x00b8: 0x2555, # BOX DRAWINGS DOWN SINGLE AND LEFT DOUBLE
0x00b9: 0x2563, # BOX DRAWINGS DOUBLE VERTICAL AND LEFT
0x00ba: 0x2551, # BOX DRAWINGS DOUBLE VERTICAL
0x00bb: 0x2557, # BOX DRAWINGS DOUBLE DOWN AND LEFT
0x00bc: 0x255d, # BOX DRAWINGS DOUBLE UP AND LEFT
0x00bd: 0x255c, # BOX DRAWINGS UP DOUBLE AND LEFT SINGLE
0x00be: 0x255b, # BOX DRAWINGS UP SINGLE AND LEFT DOUBLE
0x00bf: 0x2510, # BOX DRAWINGS LIGHT DOWN AND LEFT
0x00c0: 0x2514, # BOX DRAWINGS LIGHT UP AND RIGHT
0x00c1: 0x2534, # BOX DRAWINGS LIGHT UP AND HORIZONTAL
0x00c2: 0x252c, # BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
0x00c3: 0x251c, # BOX DRAWINGS LIGHT VERTICAL AND RIGHT
0x00c4: 0x2500, # BOX DRAWINGS LIGHT HORIZONTAL
0x00c5: 0x253c, # BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
0x00c6: 0x255e, # BOX DRAWINGS VERTICAL SINGLE AND RIGHT DOUBLE
0x00c7: 0x255f, # BOX DRAWINGS VERTICAL DOUBLE AND RIGHT SINGLE
0x00c8: 0x255a, # BOX DRAWINGS DOUBLE UP AND RIGHT
0x00c9: 0x2554, # BOX DRAWINGS DOUBLE DOWN AND RIGHT
0x00ca: 0x2569, # BOX DRAWINGS DOUBLE UP AND HORIZONTAL
0x00cb: 0x2566, # BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
0x00cc: 0x2560, # BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
0x00cd: 0x2550, # BOX DRAWINGS DOUBLE HORIZONTAL
0x00ce: 0x256c, # BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
0x00cf: 0x2567, # BOX DRAWINGS UP SINGLE AND HORIZONTAL DOUBLE
0x00d0: 0x2568, # BOX DRAWINGS UP DOUBLE AND HORIZONTAL SINGLE
0x00d1: 0x2564, # BOX DRAWINGS DOWN SINGLE AND HORIZONTAL DOUBLE
0x00d2: 0x2565, # BOX DRAWINGS DOWN DOUBLE AND HORIZONTAL SINGLE
0x00d3: 0x2559, # BOX DRAWINGS UP DOUBLE AND RIGHT SINGLE
0x00d4: 0x2558, # BOX DRAWINGS UP SINGLE AND RIGHT DOUBLE
0x00d5: 0x2552, # BOX DRAWINGS DOWN SINGLE AND RIGHT DOUBLE
0x00d6: 0x2553, # BOX DRAWINGS DOWN DOUBLE AND RIGHT SINGLE
0x00d7: 0x256b, # BOX DRAWINGS VERTICAL DOUBLE AND HORIZONTAL SINGLE
0x00d8: 0x256a, # BOX DRAWINGS VERTICAL SINGLE AND HORIZONTAL DOUBLE
0x00d9: 0x2518, # BOX DRAWINGS LIGHT UP AND LEFT
0x00da: 0x250c, # BOX DRAWINGS LIGHT DOWN AND RIGHT
0x00db: 0x2588, # FULL BLOCK
0x00dc: 0x2584, # LOWER HALF BLOCK
0x00dd: 0x258c, # LEFT HALF BLOCK
0x00de: 0x2590, # RIGHT HALF BLOCK
0x00df: 0x2580, # UPPER HALF BLOCK
0x00e0: 0x03b1, # GREEK SMALL LETTER ALPHA
0x00e1: 0x00df, # LATIN SMALL LETTER SHARP S
0x00e2: 0x0393, # GREEK CAPITAL LETTER GAMMA
0x00e3: 0x03c0, # GREEK SMALL LETTER PI
0x00e4: 0x03a3, # GREEK CAPITAL LETTER SIGMA
0x00e5: 0x03c3, # GREEK SMALL LETTER SIGMA
0x00e6: 0x00b5, # MICRO SIGN
0x00e7: 0x03c4, # GREEK SMALL LETTER TAU
0x00e8: 0x03a6, # GREEK CAPITAL LETTER PHI
0x00e9: 0x0398, # GREEK CAPITAL LETTER THETA
0x00ea: 0x03a9, # GREEK CAPITAL LETTER OMEGA
0x00eb: 0x03b4, # GREEK SMALL LETTER DELTA
0x00ec: 0x221e, # INFINITY
0x00ed: 0x03c6, # GREEK SMALL LETTER PHI
0x00ee: 0x03b5, # GREEK SMALL LETTER EPSILON
0x00ef: 0x2229, # INTERSECTION
0x00f0: 0x2261, # IDENTICAL TO
0x00f1: 0x00b1, # PLUS-MINUS SIGN
0x00f2: 0x2265, # GREATER-THAN OR EQUAL TO
0x00f3: 0x2264, # LESS-THAN OR EQUAL TO
0x00f4: 0x2320, # TOP HALF INTEGRAL
0x00f5: 0x2321, # BOTTOM HALF INTEGRAL
0x00f6: 0x00f7, # DIVISION SIGN
0x00f7: 0x2248, # ALMOST EQUAL TO
0x00f8: 0x00b0, # DEGREE SIGN
0x00f9: 0x2219, # BULLET OPERATOR
0x00fa: 0x00b7, # MIDDLE DOT
0x00fb: 0x221a, # SQUARE ROOT
0x00fc: 0x207f, # SUPERSCRIPT LATIN SMALL LETTER N
0x00fd: 0x00b2, # SUPERSCRIPT TWO
0x00fe: 0x25a0, # BLACK SQUARE
0x00ff: 0x00a0, # NO-BREAK SPACE
})
### Encoding Map
encoding_map = {}
for k,v in decoding_map.items():
encoding_map[v] = k

+ 0
- 282
lib/jython/Lib/encodings/cp500.py View File

@@ -1,282 +0,0 @@
""" Python Character Mapping Codec generated from 'CP500.TXT' with gencodec.py.
Written by Marc-Andre Lemburg (mal@lemburg.com).
(c) Copyright CNRI, All Rights Reserved. NO WARRANTY.
(c) Copyright 2000 Guido van Rossum.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_map)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_map)
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return (Codec().encode,Codec().decode,StreamReader,StreamWriter)
### Decoding Map
decoding_map = codecs.make_identity_dict(range(256))
decoding_map.update({
0x0004: 0x009c, # CONTROL
0x0005: 0x0009, # HORIZONTAL TABULATION
0x0006: 0x0086, # CONTROL
0x0007: 0x007f, # DELETE
0x0008: 0x0097, # CONTROL
0x0009: 0x008d, # CONTROL
0x000a: 0x008e, # CONTROL
0x0014: 0x009d, # CONTROL
0x0015: 0x0085, # CONTROL
0x0016: 0x0008, # BACKSPACE
0x0017: 0x0087, # CONTROL
0x001a: 0x0092, # CONTROL
0x001b: 0x008f, # CONTROL
0x0020: 0x0080, # CONTROL
0x0021: 0x0081, # CONTROL
0x0022: 0x0082, # CONTROL
0x0023: 0x0083, # CONTROL
0x0024: 0x0084, # CONTROL
0x0025: 0x000a, # LINE FEED
0x0026: 0x0017, # END OF TRANSMISSION BLOCK
0x0027: 0x001b, # ESCAPE
0x0028: 0x0088, # CONTROL
0x0029: 0x0089, # CONTROL
0x002a: 0x008a, # CONTROL
0x002b: 0x008b, # CONTROL
0x002c: 0x008c, # CONTROL
0x002d: 0x0005, # ENQUIRY
0x002e: 0x0006, # ACKNOWLEDGE
0x002f: 0x0007, # BELL
0x0030: 0x0090, # CONTROL
0x0031: 0x0091, # CONTROL
0x0032: 0x0016, # SYNCHRONOUS IDLE
0x0033: 0x0093, # CONTROL
0x0034: 0x0094, # CONTROL
0x0035: 0x0095, # CONTROL
0x0036: 0x0096, # CONTROL
0x0037: 0x0004, # END OF TRANSMISSION
0x0038: 0x0098, # CONTROL
0x0039: 0x0099, # CONTROL
0x003a: 0x009a, # CONTROL
0x003b: 0x009b, # CONTROL
0x003c: 0x0014, # DEVICE CONTROL FOUR
0x003d: 0x0015, # NEGATIVE ACKNOWLEDGE
0x003e: 0x009e, # CONTROL
0x003f: 0x001a, # SUBSTITUTE
0x0040: 0x0020, # SPACE
0x0041: 0x00a0, # NO-BREAK SPACE
0x0042: 0x00e2, # LATIN SMALL LETTER A WITH CIRCUMFLEX
0x0043: 0x00e4, # LATIN SMALL LETTER A WITH DIAERESIS
0x0044: 0x00e0, # LATIN SMALL LETTER A WITH GRAVE
0x0045: 0x00e1, # LATIN SMALL LETTER A WITH ACUTE
0x0046: 0x00e3, # LATIN SMALL LETTER A WITH TILDE
0x0047: 0x00e5, # LATIN SMALL LETTER A WITH RING ABOVE
0x0048: 0x00e7, # LATIN SMALL LETTER C WITH CEDILLA
0x0049: 0x00f1, # LATIN SMALL LETTER N WITH TILDE
0x004a: 0x005b, # LEFT SQUARE BRACKET
0x004b: 0x002e, # FULL STOP
0x004c: 0x003c, # LESS-THAN SIGN
0x004d: 0x0028, # LEFT PARENTHESIS
0x004e: 0x002b, # PLUS SIGN
0x004f: 0x0021, # EXCLAMATION MARK
0x0050: 0x0026, # AMPERSAND
0x0051: 0x00e9, # LATIN SMALL LETTER E WITH ACUTE
0x0052: 0x00ea, # LATIN SMALL LETTER E WITH CIRCUMFLEX
0x0053: 0x00eb, # LATIN SMALL LETTER E WITH DIAERESIS
0x0054: 0x00e8, # LATIN SMALL LETTER E WITH GRAVE
0x0055: 0x00ed, # LATIN SMALL LETTER I WITH ACUTE
0x0056: 0x00ee, # LATIN SMALL LETTER I WITH CIRCUMFLEX
0x0057: 0x00ef, # LATIN SMALL LETTER I WITH DIAERESIS
0x0058: 0x00ec, # LATIN SMALL LETTER I WITH GRAVE
0x0059: 0x00df, # LATIN SMALL LETTER SHARP S (GERMAN)
0x005a: 0x005d, # RIGHT SQUARE BRACKET
0x005b: 0x0024, # DOLLAR SIGN
0x005c: 0x002a, # ASTERISK
0x005d: 0x0029, # RIGHT PARENTHESIS
0x005e: 0x003b, # SEMICOLON
0x005f: 0x005e, # CIRCUMFLEX ACCENT
0x0060: 0x002d, # HYPHEN-MINUS
0x0061: 0x002f, # SOLIDUS
0x0062: 0x00c2, # LATIN CAPITAL LETTER A WITH CIRCUMFLEX
0x0063: 0x00c4, # LATIN CAPITAL LETTER A WITH DIAERESIS
0x0064: 0x00c0, # LATIN CAPITAL LETTER A WITH GRAVE
0x0065: 0x00c1, # LATIN CAPITAL LETTER A WITH ACUTE
0x0066: 0x00c3, # LATIN CAPITAL LETTER A WITH TILDE
0x0067: 0x00c5, # LATIN CAPITAL LETTER A WITH RING ABOVE
0x0068: 0x00c7, # LATIN CAPITAL LETTER C WITH CEDILLA
0x0069: 0x00d1, # LATIN CAPITAL LETTER N WITH TILDE
0x006a: 0x00a6, # BROKEN BAR
0x006b: 0x002c, # COMMA
0x006c: 0x0025, # PERCENT SIGN
0x006d: 0x005f, # LOW LINE
0x006e: 0x003e, # GREATER-THAN SIGN
0x006f: 0x003f, # QUESTION MARK
0x0070: 0x00f8, # LATIN SMALL LETTER O WITH STROKE
0x0071: 0x00c9, # LATIN CAPITAL LETTER E WITH ACUTE
0x0072: 0x00ca, # LATIN CAPITAL LETTER E WITH CIRCUMFLEX
0x0073: 0x00cb, # LATIN CAPITAL LETTER E WITH DIAERESIS
0x0074: 0x00c8, # LATIN CAPITAL LETTER E WITH GRAVE
0x0075: 0x00cd, # LATIN CAPITAL LETTER I WITH ACUTE
0x0076: 0x00ce, # LATIN CAPITAL LETTER I WITH CIRCUMFLEX
0x0077: 0x00cf, # LATIN CAPITAL LETTER I WITH DIAERESIS
0x0078: 0x00cc, # LATIN CAPITAL LETTER I WITH GRAVE
0x0079: 0x0060, # GRAVE ACCENT
0x007a: 0x003a, # COLON
0x007b: 0x0023, # NUMBER SIGN
0x007c: 0x0040, # COMMERCIAL AT
0x007d: 0x0027, # APOSTROPHE
0x007e: 0x003d, # EQUALS SIGN
0x007f: 0x0022, # QUOTATION MARK
0x0080: 0x00d8, # LATIN CAPITAL LETTER O WITH STROKE
0x0081: 0x0061, # LATIN SMALL LETTER A
0x0082: 0x0062, # LATIN SMALL LETTER B
0x0083: 0x0063, # LATIN SMALL LETTER C
0x0084: 0x0064, # LATIN SMALL LETTER D
0x0085: 0x0065, # LATIN SMALL LETTER E
0x0086: 0x0066, # LATIN SMALL LETTER F
0x0087: 0x0067, # LATIN SMALL LETTER G
0x0088: 0x0068, # LATIN SMALL LETTER H
0x0089: 0x0069, # LATIN SMALL LETTER I
0x008a: 0x00ab, # LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
0x008b: 0x00bb, # RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
0x008c: 0x00f0, # LATIN SMALL LETTER ETH (ICELANDIC)
0x008d: 0x00fd, # LATIN SMALL LETTER Y WITH ACUTE
0x008e: 0x00fe, # LATIN SMALL LETTER THORN (ICELANDIC)
0x008f: 0x00b1, # PLUS-MINUS SIGN
0x0090: 0x00b0, # DEGREE SIGN
0x0091: 0x006a, # LATIN SMALL LETTER J
0x0092: 0x006b, # LATIN SMALL LETTER K
0x0093: 0x006c, # LATIN SMALL LETTER L
0x0094: 0x006d, # LATIN SMALL LETTER M
0x0095: 0x006e, # LATIN SMALL LETTER N
0x0096: 0x006f, # LATIN SMALL LETTER O
0x0097: 0x0070, # LATIN SMALL LETTER P
0x0098: 0x0071, # LATIN SMALL LETTER Q
0x0099: 0x0072, # LATIN SMALL LETTER R
0x009a: 0x00aa, # FEMININE ORDINAL INDICATOR
0x009b: 0x00ba, # MASCULINE ORDINAL INDICATOR
0x009c: 0x00e6, # LATIN SMALL LIGATURE AE
0x009d: 0x00b8, # CEDILLA
0x009e: 0x00c6, # LATIN CAPITAL LIGATURE AE
0x009f: 0x00a4, # CURRENCY SIGN
0x00a0: 0x00b5, # MICRO SIGN
0x00a1: 0x007e, # TILDE
0x00a2: 0x0073, # LATIN SMALL LETTER S
0x00a3: 0x0074, # LATIN SMALL LETTER T
0x00a4: 0x0075, # LATIN SMALL LETTER U
0x00a5: 0x0076, # LATIN SMALL LETTER V
0x00a6: 0x0077, # LATIN SMALL LETTER W
0x00a7: 0x0078, # LATIN SMALL LETTER X
0x00a8: 0x0079, # LATIN SMALL LETTER Y
0x00a9: 0x007a, # LATIN SMALL LETTER Z
0x00aa: 0x00a1, # INVERTED EXCLAMATION MARK
0x00ab: 0x00bf, # INVERTED QUESTION MARK
0x00ac: 0x00d0, # LATIN CAPITAL LETTER ETH (ICELANDIC)
0x00ad: 0x00dd, # LATIN CAPITAL LETTER Y WITH ACUTE
0x00ae: 0x00de, # LATIN CAPITAL LETTER THORN (ICELANDIC)
0x00af: 0x00ae, # REGISTERED SIGN
0x00b0: 0x00a2, # CENT SIGN
0x00b1: 0x00a3, # POUND SIGN
0x00b2: 0x00a5, # YEN SIGN
0x00b3: 0x00b7, # MIDDLE DOT
0x00b4: 0x00a9, # COPYRIGHT SIGN
0x00b5: 0x00a7, # SECTION SIGN
0x00b7: 0x00bc, # VULGAR FRACTION ONE QUARTER
0x00b8: 0x00bd, # VULGAR FRACTION ONE HALF
0x00b9: 0x00be, # VULGAR FRACTION THREE QUARTERS
0x00ba: 0x00ac, # NOT SIGN
0x00bb: 0x007c, # VERTICAL LINE
0x00bc: 0x00af, # MACRON
0x00bd: 0x00a8, # DIAERESIS
0x00be: 0x00b4, # ACUTE ACCENT
0x00bf: 0x00d7, # MULTIPLICATION SIGN
0x00c0: 0x007b, # LEFT CURLY BRACKET
0x00c1: 0x0041, # LATIN CAPITAL LETTER A
0x00c2: 0x0042, # LATIN CAPITAL LETTER B
0x00c3: 0x0043, # LATIN CAPITAL LETTER C
0x00c4: 0x0044, # LATIN CAPITAL LETTER D
0x00c5: 0x0045, # LATIN CAPITAL LETTER E
0x00c6: 0x0046, # LATIN CAPITAL LETTER F
0x00c7: 0x0047, # LATIN CAPITAL LETTER G
0x00c8: 0x0048, # LATIN CAPITAL LETTER H
0x00c9: 0x0049, # LATIN CAPITAL LETTER I
0x00ca: 0x00ad, # SOFT HYPHEN
0x00cb: 0x00f4, # LATIN SMALL LETTER O WITH CIRCUMFLEX
0x00cc: 0x00f6, # LATIN SMALL LETTER O WITH DIAERESIS
0x00cd: 0x00f2, # LATIN SMALL LETTER O WITH GRAVE
0x00ce: 0x00f3, # LATIN SMALL LETTER O WITH ACUTE
0x00cf: 0x00f5, # LATIN SMALL LETTER O WITH TILDE
0x00d0: 0x007d, # RIGHT CURLY BRACKET
0x00d1: 0x004a, # LATIN CAPITAL LETTER J
0x00d2: 0x004b, # LATIN CAPITAL LETTER K
0x00d3: 0x004c, # LATIN CAPITAL LETTER L
0x00d4: 0x004d, # LATIN CAPITAL LETTER M
0x00d5: 0x004e, # LATIN CAPITAL LETTER N
0x00d6: 0x004f, # LATIN CAPITAL LETTER O
0x00d7: 0x0050, # LATIN CAPITAL LETTER P
0x00d8: 0x0051, # LATIN CAPITAL LETTER Q
0x00d9: 0x0052, # LATIN CAPITAL LETTER R
0x00da: 0x00b9, # SUPERSCRIPT ONE
0x00db: 0x00fb, # LATIN SMALL LETTER U WITH CIRCUMFLEX
0x00dc: 0x00fc, # LATIN SMALL LETTER U WITH DIAERESIS
0x00dd: 0x00f9, # LATIN SMALL LETTER U WITH GRAVE
0x00de: 0x00fa, # LATIN SMALL LETTER U WITH ACUTE
0x00df: 0x00ff, # LATIN SMALL LETTER Y WITH DIAERESIS
0x00e0: 0x005c, # REVERSE SOLIDUS
0x00e1: 0x00f7, # DIVISION SIGN
0x00e2: 0x0053, # LATIN CAPITAL LETTER S
0x00e3: 0x0054, # LATIN CAPITAL LETTER T
0x00e4: 0x0055, # LATIN CAPITAL LETTER U
0x00e5: 0x0056, # LATIN CAPITAL LETTER V
0x00e6: 0x0057, # LATIN CAPITAL LETTER W
0x00e7: 0x0058, # LATIN CAPITAL LETTER X
0x00e8: 0x0059, # LATIN CAPITAL LETTER Y
0x00e9: 0x005a, # LATIN CAPITAL LETTER Z
0x00ea: 0x00b2, # SUPERSCRIPT TWO
0x00eb: 0x00d4, # LATIN CAPITAL LETTER O WITH CIRCUMFLEX
0x00ec: 0x00d6, # LATIN CAPITAL LETTER O WITH DIAERESIS
0x00ed: 0x00d2, # LATIN CAPITAL LETTER O WITH GRAVE
0x00ee: 0x00d3, # LATIN CAPITAL LETTER O WITH ACUTE
0x00ef: 0x00d5, # LATIN CAPITAL LETTER O WITH TILDE
0x00f0: 0x0030, # DIGIT ZERO
0x00f1: 0x0031, # DIGIT ONE
0x00f2: 0x0032, # DIGIT TWO
0x00f3: 0x0033, # DIGIT THREE
0x00f4: 0x0034, # DIGIT FOUR
0x00f5: 0x0035, # DIGIT FIVE
0x00f6: 0x0036, # DIGIT SIX
0x00f7: 0x0037, # DIGIT SEVEN
0x00f8: 0x0038, # DIGIT EIGHT
0x00f9: 0x0039, # DIGIT NINE
0x00fa: 0x00b3, # SUPERSCRIPT THREE
0x00fb: 0x00db, # LATIN CAPITAL LETTER U WITH CIRCUMFLEX
0x00fc: 0x00dc, # LATIN CAPITAL LETTER U WITH DIAERESIS
0x00fd: 0x00d9, # LATIN CAPITAL LETTER U WITH GRAVE
0x00fe: 0x00da, # LATIN CAPITAL LETTER U WITH ACUTE
0x00ff: 0x009f, # CONTROL
})
### Encoding Map
encoding_map = {}
for k,v in decoding_map.items():
encoding_map[v] = k

+ 0
- 174
lib/jython/Lib/encodings/cp737.py View File

@@ -1,174 +0,0 @@
""" Python Character Mapping Codec generated from 'CP737.TXT' with gencodec.py.
Written by Marc-Andre Lemburg (mal@lemburg.com).
(c) Copyright CNRI, All Rights Reserved. NO WARRANTY.
(c) Copyright 2000 Guido van Rossum.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_map)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_map)
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return (Codec().encode,Codec().decode,StreamReader,StreamWriter)
### Decoding Map
decoding_map = codecs.make_identity_dict(range(256))
decoding_map.update({
0x0080: 0x0391, # GREEK CAPITAL LETTER ALPHA
0x0081: 0x0392, # GREEK CAPITAL LETTER BETA
0x0082: 0x0393, # GREEK CAPITAL LETTER GAMMA
0x0083: 0x0394, # GREEK CAPITAL LETTER DELTA
0x0084: 0x0395, # GREEK CAPITAL LETTER EPSILON
0x0085: 0x0396, # GREEK CAPITAL LETTER ZETA
0x0086: 0x0397, # GREEK CAPITAL LETTER ETA
0x0087: 0x0398, # GREEK CAPITAL LETTER THETA
0x0088: 0x0399, # GREEK CAPITAL LETTER IOTA
0x0089: 0x039a, # GREEK CAPITAL LETTER KAPPA
0x008a: 0x039b, # GREEK CAPITAL LETTER LAMDA
0x008b: 0x039c, # GREEK CAPITAL LETTER MU
0x008c: 0x039d, # GREEK CAPITAL LETTER NU
0x008d: 0x039e, # GREEK CAPITAL LETTER XI
0x008e: 0x039f, # GREEK CAPITAL LETTER OMICRON
0x008f: 0x03a0, # GREEK CAPITAL LETTER PI
0x0090: 0x03a1, # GREEK CAPITAL LETTER RHO
0x0091: 0x03a3, # GREEK CAPITAL LETTER SIGMA
0x0092: 0x03a4, # GREEK CAPITAL LETTER TAU
0x0093: 0x03a5, # GREEK CAPITAL LETTER UPSILON
0x0094: 0x03a6, # GREEK CAPITAL LETTER PHI
0x0095: 0x03a7, # GREEK CAPITAL LETTER CHI
0x0096: 0x03a8, # GREEK CAPITAL LETTER PSI
0x0097: 0x03a9, # GREEK CAPITAL LETTER OMEGA
0x0098: 0x03b1, # GREEK SMALL LETTER ALPHA
0x0099: 0x03b2, # GREEK SMALL LETTER BETA
0x009a: 0x03b3, # GREEK SMALL LETTER GAMMA
0x009b: 0x03b4, # GREEK SMALL LETTER DELTA
0x009c: 0x03b5, # GREEK SMALL LETTER EPSILON
0x009d: 0x03b6, # GREEK SMALL LETTER ZETA
0x009e: 0x03b7, # GREEK SMALL LETTER ETA
0x009f: 0x03b8, # GREEK SMALL LETTER THETA
0x00a0: 0x03b9, # GREEK SMALL LETTER IOTA
0x00a1: 0x03ba, # GREEK SMALL LETTER KAPPA
0x00a2: 0x03bb, # GREEK SMALL LETTER LAMDA
0x00a3: 0x03bc, # GREEK SMALL LETTER MU
0x00a4: 0x03bd, # GREEK SMALL LETTER NU
0x00a5: 0x03be, # GREEK SMALL LETTER XI
0x00a6: 0x03bf, # GREEK SMALL LETTER OMICRON
0x00a7: 0x03c0, # GREEK SMALL LETTER PI
0x00a8: 0x03c1, # GREEK SMALL LETTER RHO
0x00a9: 0x03c3, # GREEK SMALL LETTER SIGMA
0x00aa: 0x03c2, # GREEK SMALL LETTER FINAL SIGMA
0x00ab: 0x03c4, # GREEK SMALL LETTER TAU
0x00ac: 0x03c5, # GREEK SMALL LETTER UPSILON
0x00ad: 0x03c6, # GREEK SMALL LETTER PHI
0x00ae: 0x03c7, # GREEK SMALL LETTER CHI
0x00af: 0x03c8, # GREEK SMALL LETTER PSI
0x00b0: 0x2591, # LIGHT SHADE
0x00b1: 0x2592, # MEDIUM SHADE
0x00b2: 0x2593, # DARK SHADE
0x00b3: 0x2502, # BOX DRAWINGS LIGHT VERTICAL
0x00b4: 0x2524, # BOX DRAWINGS LIGHT VERTICAL AND LEFT
0x00b5: 0x2561, # BOX DRAWINGS VERTICAL SINGLE AND LEFT DOUBLE
0x00b6: 0x2562, # BOX DRAWINGS VERTICAL DOUBLE AND LEFT SINGLE
0x00b7: 0x2556, # BOX DRAWINGS DOWN DOUBLE AND LEFT SINGLE
0x00b8: 0x2555, # BOX DRAWINGS DOWN SINGLE AND LEFT DOUBLE
0x00b9: 0x2563, # BOX DRAWINGS DOUBLE VERTICAL AND LEFT
0x00ba: 0x2551, # BOX DRAWINGS DOUBLE VERTICAL
0x00bb: 0x2557, # BOX DRAWINGS DOUBLE DOWN AND LEFT
0x00bc: 0x255d, # BOX DRAWINGS DOUBLE UP AND LEFT
0x00bd: 0x255c, # BOX DRAWINGS UP DOUBLE AND LEFT SINGLE
0x00be: 0x255b, # BOX DRAWINGS UP SINGLE AND LEFT DOUBLE
0x00bf: 0x2510, # BOX DRAWINGS LIGHT DOWN AND LEFT
0x00c0: 0x2514, # BOX DRAWINGS LIGHT UP AND RIGHT
0x00c1: 0x2534, # BOX DRAWINGS LIGHT UP AND HORIZONTAL
0x00c2: 0x252c, # BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
0x00c3: 0x251c, # BOX DRAWINGS LIGHT VERTICAL AND RIGHT
0x00c4: 0x2500, # BOX DRAWINGS LIGHT HORIZONTAL
0x00c5: 0x253c, # BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
0x00c6: 0x255e, # BOX DRAWINGS VERTICAL SINGLE AND RIGHT DOUBLE
0x00c7: 0x255f, # BOX DRAWINGS VERTICAL DOUBLE AND RIGHT SINGLE
0x00c8: 0x255a, # BOX DRAWINGS DOUBLE UP AND RIGHT
0x00c9: 0x2554, # BOX DRAWINGS DOUBLE DOWN AND RIGHT
0x00ca: 0x2569, # BOX DRAWINGS DOUBLE UP AND HORIZONTAL
0x00cb: 0x2566, # BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
0x00cc: 0x2560, # BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
0x00cd: 0x2550, # BOX DRAWINGS DOUBLE HORIZONTAL
0x00ce: 0x256c, # BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
0x00cf: 0x2567, # BOX DRAWINGS UP SINGLE AND HORIZONTAL DOUBLE
0x00d0: 0x2568, # BOX DRAWINGS UP DOUBLE AND HORIZONTAL SINGLE
0x00d1: 0x2564, # BOX DRAWINGS DOWN SINGLE AND HORIZONTAL DOUBLE
0x00d2: 0x2565, # BOX DRAWINGS DOWN DOUBLE AND HORIZONTAL SINGLE
0x00d3: 0x2559, # BOX DRAWINGS UP DOUBLE AND RIGHT SINGLE
0x00d4: 0x2558, # BOX DRAWINGS UP SINGLE AND RIGHT DOUBLE
0x00d5: 0x2552, # BOX DRAWINGS DOWN SINGLE AND RIGHT DOUBLE
0x00d6: 0x2553, # BOX DRAWINGS DOWN DOUBLE AND RIGHT SINGLE
0x00d7: 0x256b, # BOX DRAWINGS VERTICAL DOUBLE AND HORIZONTAL SINGLE
0x00d8: 0x256a, # BOX DRAWINGS VERTICAL SINGLE AND HORIZONTAL DOUBLE
0x00d9: 0x2518, # BOX DRAWINGS LIGHT UP AND LEFT
0x00da: 0x250c, # BOX DRAWINGS LIGHT DOWN AND RIGHT
0x00db: 0x2588, # FULL BLOCK
0x00dc: 0x2584, # LOWER HALF BLOCK
0x00dd: 0x258c, # LEFT HALF BLOCK
0x00de: 0x2590, # RIGHT HALF BLOCK
0x00df: 0x2580, # UPPER HALF BLOCK
0x00e0: 0x03c9, # GREEK SMALL LETTER OMEGA
0x00e1: 0x03ac, # GREEK SMALL LETTER ALPHA WITH TONOS
0x00e2: 0x03ad, # GREEK SMALL LETTER EPSILON WITH TONOS
0x00e3: 0x03ae, # GREEK SMALL LETTER ETA WITH TONOS
0x00e4: 0x03ca, # GREEK SMALL LETTER IOTA WITH DIALYTIKA
0x00e5: 0x03af, # GREEK SMALL LETTER IOTA WITH TONOS
0x00e6: 0x03cc, # GREEK SMALL LETTER OMICRON WITH TONOS
0x00e7: 0x03cd, # GREEK SMALL LETTER UPSILON WITH TONOS
0x00e8: 0x03cb, # GREEK SMALL LETTER UPSILON WITH DIALYTIKA
0x00e9: 0x03ce, # GREEK SMALL LETTER OMEGA WITH TONOS
0x00ea: 0x0386, # GREEK CAPITAL LETTER ALPHA WITH TONOS
0x00eb: 0x0388, # GREEK CAPITAL LETTER EPSILON WITH TONOS
0x00ec: 0x0389, # GREEK CAPITAL LETTER ETA WITH TONOS
0x00ed: 0x038a, # GREEK CAPITAL LETTER IOTA WITH TONOS
0x00ee: 0x038c, # GREEK CAPITAL LETTER OMICRON WITH TONOS
0x00ef: 0x038e, # GREEK CAPITAL LETTER UPSILON WITH TONOS
0x00f0: 0x038f, # GREEK CAPITAL LETTER OMEGA WITH TONOS
0x00f1: 0x00b1, # PLUS-MINUS SIGN
0x00f2: 0x2265, # GREATER-THAN OR EQUAL TO
0x00f3: 0x2264, # LESS-THAN OR EQUAL TO
0x00f4: 0x03aa, # GREEK CAPITAL LETTER IOTA WITH DIALYTIKA
0x00f5: 0x03ab, # GREEK CAPITAL LETTER UPSILON WITH DIALYTIKA
0x00f6: 0x00f7, # DIVISION SIGN
0x00f7: 0x2248, # ALMOST EQUAL TO
0x00f8: 0x00b0, # DEGREE SIGN
0x00f9: 0x2219, # BULLET OPERATOR
0x00fa: 0x00b7, # MIDDLE DOT
0x00fb: 0x221a, # SQUARE ROOT
0x00fc: 0x207f, # SUPERSCRIPT LATIN SMALL LETTER N
0x00fd: 0x00b2, # SUPERSCRIPT TWO
0x00fe: 0x25a0, # BLACK SQUARE
0x00ff: 0x00a0, # NO-BREAK SPACE
})
### Encoding Map
encoding_map = {}
for k,v in decoding_map.items():
encoding_map[v] = k

+ 0
- 174
lib/jython/Lib/encodings/cp775.py View File

@@ -1,174 +0,0 @@
""" Python Character Mapping Codec generated from 'CP775.TXT' with gencodec.py.
Written by Marc-Andre Lemburg (mal@lemburg.com).
(c) Copyright CNRI, All Rights Reserved. NO WARRANTY.
(c) Copyright 2000 Guido van Rossum.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_map)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_map)
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return (Codec().encode,Codec().decode,StreamReader,StreamWriter)
### Decoding Map
decoding_map = codecs.make_identity_dict(range(256))
decoding_map.update({
0x0080: 0x0106, # LATIN CAPITAL LETTER C WITH ACUTE
0x0081: 0x00fc, # LATIN SMALL LETTER U WITH DIAERESIS
0x0082: 0x00e9, # LATIN SMALL LETTER E WITH ACUTE
0x0083: 0x0101, # LATIN SMALL LETTER A WITH MACRON
0x0084: 0x00e4, # LATIN SMALL LETTER A WITH DIAERESIS
0x0085: 0x0123, # LATIN SMALL LETTER G WITH CEDILLA
0x0086: 0x00e5, # LATIN SMALL LETTER A WITH RING ABOVE
0x0087: 0x0107, # LATIN SMALL LETTER C WITH ACUTE
0x0088: 0x0142, # LATIN SMALL LETTER L WITH STROKE
0x0089: 0x0113, # LATIN SMALL LETTER E WITH MACRON
0x008a: 0x0156, # LATIN CAPITAL LETTER R WITH CEDILLA
0x008b: 0x0157, # LATIN SMALL LETTER R WITH CEDILLA
0x008c: 0x012b, # LATIN SMALL LETTER I WITH MACRON
0x008d: 0x0179, # LATIN CAPITAL LETTER Z WITH ACUTE
0x008e: 0x00c4, # LATIN CAPITAL LETTER A WITH DIAERESIS
0x008f: 0x00c5, # LATIN CAPITAL LETTER A WITH RING ABOVE
0x0090: 0x00c9, # LATIN CAPITAL LETTER E WITH ACUTE
0x0091: 0x00e6, # LATIN SMALL LIGATURE AE
0x0092: 0x00c6, # LATIN CAPITAL LIGATURE AE
0x0093: 0x014d, # LATIN SMALL LETTER O WITH MACRON
0x0094: 0x00f6, # LATIN SMALL LETTER O WITH DIAERESIS
0x0095: 0x0122, # LATIN CAPITAL LETTER G WITH CEDILLA
0x0096: 0x00a2, # CENT SIGN
0x0097: 0x015a, # LATIN CAPITAL LETTER S WITH ACUTE
0x0098: 0x015b, # LATIN SMALL LETTER S WITH ACUTE
0x0099: 0x00d6, # LATIN CAPITAL LETTER O WITH DIAERESIS
0x009a: 0x00dc, # LATIN CAPITAL LETTER U WITH DIAERESIS
0x009b: 0x00f8, # LATIN SMALL LETTER O WITH STROKE
0x009c: 0x00a3, # POUND SIGN
0x009d: 0x00d8, # LATIN CAPITAL LETTER O WITH STROKE
0x009e: 0x00d7, # MULTIPLICATION SIGN
0x009f: 0x00a4, # CURRENCY SIGN
0x00a0: 0x0100, # LATIN CAPITAL LETTER A WITH MACRON
0x00a1: 0x012a, # LATIN CAPITAL LETTER I WITH MACRON
0x00a2: 0x00f3, # LATIN SMALL LETTER O WITH ACUTE
0x00a3: 0x017b, # LATIN CAPITAL LETTER Z WITH DOT ABOVE
0x00a4: 0x017c, # LATIN SMALL LETTER Z WITH DOT ABOVE
0x00a5: 0x017a, # LATIN SMALL LETTER Z WITH ACUTE
0x00a6: 0x201d, # RIGHT DOUBLE QUOTATION MARK
0x00a7: 0x00a6, # BROKEN BAR
0x00a8: 0x00a9, # COPYRIGHT SIGN
0x00a9: 0x00ae, # REGISTERED SIGN
0x00aa: 0x00ac, # NOT SIGN
0x00ab: 0x00bd, # VULGAR FRACTION ONE HALF
0x00ac: 0x00bc, # VULGAR FRACTION ONE QUARTER
0x00ad: 0x0141, # LATIN CAPITAL LETTER L WITH STROKE
0x00ae: 0x00ab, # LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00af: 0x00bb, # RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00b0: 0x2591, # LIGHT SHADE
0x00b1: 0x2592, # MEDIUM SHADE
0x00b2: 0x2593, # DARK SHADE
0x00b3: 0x2502, # BOX DRAWINGS LIGHT VERTICAL
0x00b4: 0x2524, # BOX DRAWINGS LIGHT VERTICAL AND LEFT
0x00b5: 0x0104, # LATIN CAPITAL LETTER A WITH OGONEK
0x00b6: 0x010c, # LATIN CAPITAL LETTER C WITH CARON
0x00b7: 0x0118, # LATIN CAPITAL LETTER E WITH OGONEK
0x00b8: 0x0116, # LATIN CAPITAL LETTER E WITH DOT ABOVE
0x00b9: 0x2563, # BOX DRAWINGS DOUBLE VERTICAL AND LEFT
0x00ba: 0x2551, # BOX DRAWINGS DOUBLE VERTICAL
0x00bb: 0x2557, # BOX DRAWINGS DOUBLE DOWN AND LEFT
0x00bc: 0x255d, # BOX DRAWINGS DOUBLE UP AND LEFT
0x00bd: 0x012e, # LATIN CAPITAL LETTER I WITH OGONEK
0x00be: 0x0160, # LATIN CAPITAL LETTER S WITH CARON
0x00bf: 0x2510, # BOX DRAWINGS LIGHT DOWN AND LEFT
0x00c0: 0x2514, # BOX DRAWINGS LIGHT UP AND RIGHT
0x00c1: 0x2534, # BOX DRAWINGS LIGHT UP AND HORIZONTAL
0x00c2: 0x252c, # BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
0x00c3: 0x251c, # BOX DRAWINGS LIGHT VERTICAL AND RIGHT
0x00c4: 0x2500, # BOX DRAWINGS LIGHT HORIZONTAL
0x00c5: 0x253c, # BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
0x00c6: 0x0172, # LATIN CAPITAL LETTER U WITH OGONEK
0x00c7: 0x016a, # LATIN CAPITAL LETTER U WITH MACRON
0x00c8: 0x255a, # BOX DRAWINGS DOUBLE UP AND RIGHT
0x00c9: 0x2554, # BOX DRAWINGS DOUBLE DOWN AND RIGHT
0x00ca: 0x2569, # BOX DRAWINGS DOUBLE UP AND HORIZONTAL
0x00cb: 0x2566, # BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
0x00cc: 0x2560, # BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
0x00cd: 0x2550, # BOX DRAWINGS DOUBLE HORIZONTAL
0x00ce: 0x256c, # BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
0x00cf: 0x017d, # LATIN CAPITAL LETTER Z WITH CARON
0x00d0: 0x0105, # LATIN SMALL LETTER A WITH OGONEK
0x00d1: 0x010d, # LATIN SMALL LETTER C WITH CARON
0x00d2: 0x0119, # LATIN SMALL LETTER E WITH OGONEK
0x00d3: 0x0117, # LATIN SMALL LETTER E WITH DOT ABOVE
0x00d4: 0x012f, # LATIN SMALL LETTER I WITH OGONEK
0x00d5: 0x0161, # LATIN SMALL LETTER S WITH CARON
0x00d6: 0x0173, # LATIN SMALL LETTER U WITH OGONEK
0x00d7: 0x016b, # LATIN SMALL LETTER U WITH MACRON
0x00d8: 0x017e, # LATIN SMALL LETTER Z WITH CARON
0x00d9: 0x2518, # BOX DRAWINGS LIGHT UP AND LEFT
0x00da: 0x250c, # BOX DRAWINGS LIGHT DOWN AND RIGHT
0x00db: 0x2588, # FULL BLOCK
0x00dc: 0x2584, # LOWER HALF BLOCK
0x00dd: 0x258c, # LEFT HALF BLOCK
0x00de: 0x2590, # RIGHT HALF BLOCK
0x00df: 0x2580, # UPPER HALF BLOCK
0x00e0: 0x00d3, # LATIN CAPITAL LETTER O WITH ACUTE
0x00e1: 0x00df, # LATIN SMALL LETTER SHARP S (GERMAN)
0x00e2: 0x014c, # LATIN CAPITAL LETTER O WITH MACRON
0x00e3: 0x0143, # LATIN CAPITAL LETTER N WITH ACUTE
0x00e4: 0x00f5, # LATIN SMALL LETTER O WITH TILDE
0x00e5: 0x00d5, # LATIN CAPITAL LETTER O WITH TILDE
0x00e6: 0x00b5, # MICRO SIGN
0x00e7: 0x0144, # LATIN SMALL LETTER N WITH ACUTE
0x00e8: 0x0136, # LATIN CAPITAL LETTER K WITH CEDILLA
0x00e9: 0x0137, # LATIN SMALL LETTER K WITH CEDILLA
0x00ea: 0x013b, # LATIN CAPITAL LETTER L WITH CEDILLA
0x00eb: 0x013c, # LATIN SMALL LETTER L WITH CEDILLA
0x00ec: 0x0146, # LATIN SMALL LETTER N WITH CEDILLA
0x00ed: 0x0112, # LATIN CAPITAL LETTER E WITH MACRON
0x00ee: 0x0145, # LATIN CAPITAL LETTER N WITH CEDILLA
0x00ef: 0x2019, # RIGHT SINGLE QUOTATION MARK
0x00f0: 0x00ad, # SOFT HYPHEN
0x00f1: 0x00b1, # PLUS-MINUS SIGN
0x00f2: 0x201c, # LEFT DOUBLE QUOTATION MARK
0x00f3: 0x00be, # VULGAR FRACTION THREE QUARTERS
0x00f4: 0x00b6, # PILCROW SIGN
0x00f5: 0x00a7, # SECTION SIGN
0x00f6: 0x00f7, # DIVISION SIGN
0x00f7: 0x201e, # DOUBLE LOW-9 QUOTATION MARK
0x00f8: 0x00b0, # DEGREE SIGN
0x00f9: 0x2219, # BULLET OPERATOR
0x00fa: 0x00b7, # MIDDLE DOT
0x00fb: 0x00b9, # SUPERSCRIPT ONE
0x00fc: 0x00b3, # SUPERSCRIPT THREE
0x00fd: 0x00b2, # SUPERSCRIPT TWO
0x00fe: 0x25a0, # BLACK SQUARE
0x00ff: 0x00a0, # NO-BREAK SPACE
})
### Encoding Map
encoding_map = {}
for k,v in decoding_map.items():
encoding_map[v] = k

+ 0
- 174
lib/jython/Lib/encodings/cp850.py View File

@@ -1,174 +0,0 @@
""" Python Character Mapping Codec generated from 'CP850.TXT' with gencodec.py.
Written by Marc-Andre Lemburg (mal@lemburg.com).
(c) Copyright CNRI, All Rights Reserved. NO WARRANTY.
(c) Copyright 2000 Guido van Rossum.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_map)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_map)
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return (Codec().encode,Codec().decode,StreamReader,StreamWriter)
### Decoding Map
decoding_map = codecs.make_identity_dict(range(256))
decoding_map.update({
0x0080: 0x00c7, # LATIN CAPITAL LETTER C WITH CEDILLA
0x0081: 0x00fc, # LATIN SMALL LETTER U WITH DIAERESIS
0x0082: 0x00e9, # LATIN SMALL LETTER E WITH ACUTE
0x0083: 0x00e2, # LATIN SMALL LETTER A WITH CIRCUMFLEX
0x0084: 0x00e4, # LATIN SMALL LETTER A WITH DIAERESIS
0x0085: 0x00e0, # LATIN SMALL LETTER A WITH GRAVE
0x0086: 0x00e5, # LATIN SMALL LETTER A WITH RING ABOVE
0x0087: 0x00e7, # LATIN SMALL LETTER C WITH CEDILLA
0x0088: 0x00ea, # LATIN SMALL LETTER E WITH CIRCUMFLEX
0x0089: 0x00eb, # LATIN SMALL LETTER E WITH DIAERESIS
0x008a: 0x00e8, # LATIN SMALL LETTER E WITH GRAVE
0x008b: 0x00ef, # LATIN SMALL LETTER I WITH DIAERESIS
0x008c: 0x00ee, # LATIN SMALL LETTER I WITH CIRCUMFLEX
0x008d: 0x00ec, # LATIN SMALL LETTER I WITH GRAVE
0x008e: 0x00c4, # LATIN CAPITAL LETTER A WITH DIAERESIS
0x008f: 0x00c5, # LATIN CAPITAL LETTER A WITH RING ABOVE
0x0090: 0x00c9, # LATIN CAPITAL LETTER E WITH ACUTE
0x0091: 0x00e6, # LATIN SMALL LIGATURE AE
0x0092: 0x00c6, # LATIN CAPITAL LIGATURE AE
0x0093: 0x00f4, # LATIN SMALL LETTER O WITH CIRCUMFLEX
0x0094: 0x00f6, # LATIN SMALL LETTER O WITH DIAERESIS
0x0095: 0x00f2, # LATIN SMALL LETTER O WITH GRAVE
0x0096: 0x00fb, # LATIN SMALL LETTER U WITH CIRCUMFLEX
0x0097: 0x00f9, # LATIN SMALL LETTER U WITH GRAVE
0x0098: 0x00ff, # LATIN SMALL LETTER Y WITH DIAERESIS
0x0099: 0x00d6, # LATIN CAPITAL LETTER O WITH DIAERESIS
0x009a: 0x00dc, # LATIN CAPITAL LETTER U WITH DIAERESIS
0x009b: 0x00f8, # LATIN SMALL LETTER O WITH STROKE
0x009c: 0x00a3, # POUND SIGN
0x009d: 0x00d8, # LATIN CAPITAL LETTER O WITH STROKE
0x009e: 0x00d7, # MULTIPLICATION SIGN
0x009f: 0x0192, # LATIN SMALL LETTER F WITH HOOK
0x00a0: 0x00e1, # LATIN SMALL LETTER A WITH ACUTE
0x00a1: 0x00ed, # LATIN SMALL LETTER I WITH ACUTE
0x00a2: 0x00f3, # LATIN SMALL LETTER O WITH ACUTE
0x00a3: 0x00fa, # LATIN SMALL LETTER U WITH ACUTE
0x00a4: 0x00f1, # LATIN SMALL LETTER N WITH TILDE
0x00a5: 0x00d1, # LATIN CAPITAL LETTER N WITH TILDE
0x00a6: 0x00aa, # FEMININE ORDINAL INDICATOR
0x00a7: 0x00ba, # MASCULINE ORDINAL INDICATOR
0x00a8: 0x00bf, # INVERTED QUESTION MARK
0x00a9: 0x00ae, # REGISTERED SIGN
0x00aa: 0x00ac, # NOT SIGN
0x00ab: 0x00bd, # VULGAR FRACTION ONE HALF
0x00ac: 0x00bc, # VULGAR FRACTION ONE QUARTER
0x00ad: 0x00a1, # INVERTED EXCLAMATION MARK
0x00ae: 0x00ab, # LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00af: 0x00bb, # RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00b0: 0x2591, # LIGHT SHADE
0x00b1: 0x2592, # MEDIUM SHADE
0x00b2: 0x2593, # DARK SHADE
0x00b3: 0x2502, # BOX DRAWINGS LIGHT VERTICAL
0x00b4: 0x2524, # BOX DRAWINGS LIGHT VERTICAL AND LEFT
0x00b5: 0x00c1, # LATIN CAPITAL LETTER A WITH ACUTE
0x00b6: 0x00c2, # LATIN CAPITAL LETTER A WITH CIRCUMFLEX
0x00b7: 0x00c0, # LATIN CAPITAL LETTER A WITH GRAVE
0x00b8: 0x00a9, # COPYRIGHT SIGN
0x00b9: 0x2563, # BOX DRAWINGS DOUBLE VERTICAL AND LEFT
0x00ba: 0x2551, # BOX DRAWINGS DOUBLE VERTICAL
0x00bb: 0x2557, # BOX DRAWINGS DOUBLE DOWN AND LEFT
0x00bc: 0x255d, # BOX DRAWINGS DOUBLE UP AND LEFT
0x00bd: 0x00a2, # CENT SIGN
0x00be: 0x00a5, # YEN SIGN
0x00bf: 0x2510, # BOX DRAWINGS LIGHT DOWN AND LEFT
0x00c0: 0x2514, # BOX DRAWINGS LIGHT UP AND RIGHT
0x00c1: 0x2534, # BOX DRAWINGS LIGHT UP AND HORIZONTAL
0x00c2: 0x252c, # BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
0x00c3: 0x251c, # BOX DRAWINGS LIGHT VERTICAL AND RIGHT
0x00c4: 0x2500, # BOX DRAWINGS LIGHT HORIZONTAL
0x00c5: 0x253c, # BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
0x00c6: 0x00e3, # LATIN SMALL LETTER A WITH TILDE
0x00c7: 0x00c3, # LATIN CAPITAL LETTER A WITH TILDE
0x00c8: 0x255a, # BOX DRAWINGS DOUBLE UP AND RIGHT
0x00c9: 0x2554, # BOX DRAWINGS DOUBLE DOWN AND RIGHT
0x00ca: 0x2569, # BOX DRAWINGS DOUBLE UP AND HORIZONTAL
0x00cb: 0x2566, # BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
0x00cc: 0x2560, # BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
0x00cd: 0x2550, # BOX DRAWINGS DOUBLE HORIZONTAL
0x00ce: 0x256c, # BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
0x00cf: 0x00a4, # CURRENCY SIGN
0x00d0: 0x00f0, # LATIN SMALL LETTER ETH
0x00d1: 0x00d0, # LATIN CAPITAL LETTER ETH
0x00d2: 0x00ca, # LATIN CAPITAL LETTER E WITH CIRCUMFLEX
0x00d3: 0x00cb, # LATIN CAPITAL LETTER E WITH DIAERESIS
0x00d4: 0x00c8, # LATIN CAPITAL LETTER E WITH GRAVE
0x00d5: 0x0131, # LATIN SMALL LETTER DOTLESS I
0x00d6: 0x00cd, # LATIN CAPITAL LETTER I WITH ACUTE
0x00d7: 0x00ce, # LATIN CAPITAL LETTER I WITH CIRCUMFLEX
0x00d8: 0x00cf, # LATIN CAPITAL LETTER I WITH DIAERESIS
0x00d9: 0x2518, # BOX DRAWINGS LIGHT UP AND LEFT
0x00da: 0x250c, # BOX DRAWINGS LIGHT DOWN AND RIGHT
0x00db: 0x2588, # FULL BLOCK
0x00dc: 0x2584, # LOWER HALF BLOCK
0x00dd: 0x00a6, # BROKEN BAR
0x00de: 0x00cc, # LATIN CAPITAL LETTER I WITH GRAVE
0x00df: 0x2580, # UPPER HALF BLOCK
0x00e0: 0x00d3, # LATIN CAPITAL LETTER O WITH ACUTE
0x00e1: 0x00df, # LATIN SMALL LETTER SHARP S
0x00e2: 0x00d4, # LATIN CAPITAL LETTER O WITH CIRCUMFLEX
0x00e3: 0x00d2, # LATIN CAPITAL LETTER O WITH GRAVE
0x00e4: 0x00f5, # LATIN SMALL LETTER O WITH TILDE
0x00e5: 0x00d5, # LATIN CAPITAL LETTER O WITH TILDE
0x00e6: 0x00b5, # MICRO SIGN
0x00e7: 0x00fe, # LATIN SMALL LETTER THORN
0x00e8: 0x00de, # LATIN CAPITAL LETTER THORN
0x00e9: 0x00da, # LATIN CAPITAL LETTER U WITH ACUTE
0x00ea: 0x00db, # LATIN CAPITAL LETTER U WITH CIRCUMFLEX
0x00eb: 0x00d9, # LATIN CAPITAL LETTER U WITH GRAVE
0x00ec: 0x00fd, # LATIN SMALL LETTER Y WITH ACUTE
0x00ed: 0x00dd, # LATIN CAPITAL LETTER Y WITH ACUTE
0x00ee: 0x00af, # MACRON
0x00ef: 0x00b4, # ACUTE ACCENT
0x00f0: 0x00ad, # SOFT HYPHEN
0x00f1: 0x00b1, # PLUS-MINUS SIGN
0x00f2: 0x2017, # DOUBLE LOW LINE
0x00f3: 0x00be, # VULGAR FRACTION THREE QUARTERS
0x00f4: 0x00b6, # PILCROW SIGN
0x00f5: 0x00a7, # SECTION SIGN
0x00f6: 0x00f7, # DIVISION SIGN
0x00f7: 0x00b8, # CEDILLA
0x00f8: 0x00b0, # DEGREE SIGN
0x00f9: 0x00a8, # DIAERESIS
0x00fa: 0x00b7, # MIDDLE DOT
0x00fb: 0x00b9, # SUPERSCRIPT ONE
0x00fc: 0x00b3, # SUPERSCRIPT THREE
0x00fd: 0x00b2, # SUPERSCRIPT TWO
0x00fe: 0x25a0, # BLACK SQUARE
0x00ff: 0x00a0, # NO-BREAK SPACE
})
### Encoding Map
encoding_map = {}
for k,v in decoding_map.items():
encoding_map[v] = k

+ 0
- 174
lib/jython/Lib/encodings/cp852.py View File

@@ -1,174 +0,0 @@
""" Python Character Mapping Codec generated from 'CP852.TXT' with gencodec.py.
Written by Marc-Andre Lemburg (mal@lemburg.com).
(c) Copyright CNRI, All Rights Reserved. NO WARRANTY.
(c) Copyright 2000 Guido van Rossum.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_map)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_map)
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return (Codec().encode,Codec().decode,StreamReader,StreamWriter)
### Decoding Map
decoding_map = codecs.make_identity_dict(range(256))
decoding_map.update({
0x0080: 0x00c7, # LATIN CAPITAL LETTER C WITH CEDILLA
0x0081: 0x00fc, # LATIN SMALL LETTER U WITH DIAERESIS
0x0082: 0x00e9, # LATIN SMALL LETTER E WITH ACUTE
0x0083: 0x00e2, # LATIN SMALL LETTER A WITH CIRCUMFLEX
0x0084: 0x00e4, # LATIN SMALL LETTER A WITH DIAERESIS
0x0085: 0x016f, # LATIN SMALL LETTER U WITH RING ABOVE
0x0086: 0x0107, # LATIN SMALL LETTER C WITH ACUTE
0x0087: 0x00e7, # LATIN SMALL LETTER C WITH CEDILLA
0x0088: 0x0142, # LATIN SMALL LETTER L WITH STROKE
0x0089: 0x00eb, # LATIN SMALL LETTER E WITH DIAERESIS
0x008a: 0x0150, # LATIN CAPITAL LETTER O WITH DOUBLE ACUTE
0x008b: 0x0151, # LATIN SMALL LETTER O WITH DOUBLE ACUTE
0x008c: 0x00ee, # LATIN SMALL LETTER I WITH CIRCUMFLEX
0x008d: 0x0179, # LATIN CAPITAL LETTER Z WITH ACUTE
0x008e: 0x00c4, # LATIN CAPITAL LETTER A WITH DIAERESIS
0x008f: 0x0106, # LATIN CAPITAL LETTER C WITH ACUTE
0x0090: 0x00c9, # LATIN CAPITAL LETTER E WITH ACUTE
0x0091: 0x0139, # LATIN CAPITAL LETTER L WITH ACUTE
0x0092: 0x013a, # LATIN SMALL LETTER L WITH ACUTE
0x0093: 0x00f4, # LATIN SMALL LETTER O WITH CIRCUMFLEX
0x0094: 0x00f6, # LATIN SMALL LETTER O WITH DIAERESIS
0x0095: 0x013d, # LATIN CAPITAL LETTER L WITH CARON
0x0096: 0x013e, # LATIN SMALL LETTER L WITH CARON
0x0097: 0x015a, # LATIN CAPITAL LETTER S WITH ACUTE
0x0098: 0x015b, # LATIN SMALL LETTER S WITH ACUTE
0x0099: 0x00d6, # LATIN CAPITAL LETTER O WITH DIAERESIS
0x009a: 0x00dc, # LATIN CAPITAL LETTER U WITH DIAERESIS
0x009b: 0x0164, # LATIN CAPITAL LETTER T WITH CARON
0x009c: 0x0165, # LATIN SMALL LETTER T WITH CARON
0x009d: 0x0141, # LATIN CAPITAL LETTER L WITH STROKE
0x009e: 0x00d7, # MULTIPLICATION SIGN
0x009f: 0x010d, # LATIN SMALL LETTER C WITH CARON
0x00a0: 0x00e1, # LATIN SMALL LETTER A WITH ACUTE
0x00a1: 0x00ed, # LATIN SMALL LETTER I WITH ACUTE
0x00a2: 0x00f3, # LATIN SMALL LETTER O WITH ACUTE
0x00a3: 0x00fa, # LATIN SMALL LETTER U WITH ACUTE
0x00a4: 0x0104, # LATIN CAPITAL LETTER A WITH OGONEK
0x00a5: 0x0105, # LATIN SMALL LETTER A WITH OGONEK
0x00a6: 0x017d, # LATIN CAPITAL LETTER Z WITH CARON
0x00a7: 0x017e, # LATIN SMALL LETTER Z WITH CARON
0x00a8: 0x0118, # LATIN CAPITAL LETTER E WITH OGONEK
0x00a9: 0x0119, # LATIN SMALL LETTER E WITH OGONEK
0x00aa: 0x00ac, # NOT SIGN
0x00ab: 0x017a, # LATIN SMALL LETTER Z WITH ACUTE
0x00ac: 0x010c, # LATIN CAPITAL LETTER C WITH CARON
0x00ad: 0x015f, # LATIN SMALL LETTER S WITH CEDILLA
0x00ae: 0x00ab, # LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00af: 0x00bb, # RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00b0: 0x2591, # LIGHT SHADE
0x00b1: 0x2592, # MEDIUM SHADE
0x00b2: 0x2593, # DARK SHADE
0x00b3: 0x2502, # BOX DRAWINGS LIGHT VERTICAL
0x00b4: 0x2524, # BOX DRAWINGS LIGHT VERTICAL AND LEFT
0x00b5: 0x00c1, # LATIN CAPITAL LETTER A WITH ACUTE
0x00b6: 0x00c2, # LATIN CAPITAL LETTER A WITH CIRCUMFLEX
0x00b7: 0x011a, # LATIN CAPITAL LETTER E WITH CARON
0x00b8: 0x015e, # LATIN CAPITAL LETTER S WITH CEDILLA
0x00b9: 0x2563, # BOX DRAWINGS DOUBLE VERTICAL AND LEFT
0x00ba: 0x2551, # BOX DRAWINGS DOUBLE VERTICAL
0x00bb: 0x2557, # BOX DRAWINGS DOUBLE DOWN AND LEFT
0x00bc: 0x255d, # BOX DRAWINGS DOUBLE UP AND LEFT
0x00bd: 0x017b, # LATIN CAPITAL LETTER Z WITH DOT ABOVE
0x00be: 0x017c, # LATIN SMALL LETTER Z WITH DOT ABOVE
0x00bf: 0x2510, # BOX DRAWINGS LIGHT DOWN AND LEFT
0x00c0: 0x2514, # BOX DRAWINGS LIGHT UP AND RIGHT
0x00c1: 0x2534, # BOX DRAWINGS LIGHT UP AND HORIZONTAL
0x00c2: 0x252c, # BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
0x00c3: 0x251c, # BOX DRAWINGS LIGHT VERTICAL AND RIGHT
0x00c4: 0x2500, # BOX DRAWINGS LIGHT HORIZONTAL
0x00c5: 0x253c, # BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
0x00c6: 0x0102, # LATIN CAPITAL LETTER A WITH BREVE
0x00c7: 0x0103, # LATIN SMALL LETTER A WITH BREVE
0x00c8: 0x255a, # BOX DRAWINGS DOUBLE UP AND RIGHT
0x00c9: 0x2554, # BOX DRAWINGS DOUBLE DOWN AND RIGHT
0x00ca: 0x2569, # BOX DRAWINGS DOUBLE UP AND HORIZONTAL
0x00cb: 0x2566, # BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
0x00cc: 0x2560, # BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
0x00cd: 0x2550, # BOX DRAWINGS DOUBLE HORIZONTAL
0x00ce: 0x256c, # BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
0x00cf: 0x00a4, # CURRENCY SIGN
0x00d0: 0x0111, # LATIN SMALL LETTER D WITH STROKE
0x00d1: 0x0110, # LATIN CAPITAL LETTER D WITH STROKE
0x00d2: 0x010e, # LATIN CAPITAL LETTER D WITH CARON
0x00d3: 0x00cb, # LATIN CAPITAL LETTER E WITH DIAERESIS
0x00d4: 0x010f, # LATIN SMALL LETTER D WITH CARON
0x00d5: 0x0147, # LATIN CAPITAL LETTER N WITH CARON
0x00d6: 0x00cd, # LATIN CAPITAL LETTER I WITH ACUTE
0x00d7: 0x00ce, # LATIN CAPITAL LETTER I WITH CIRCUMFLEX
0x00d8: 0x011b, # LATIN SMALL LETTER E WITH CARON
0x00d9: 0x2518, # BOX DRAWINGS LIGHT UP AND LEFT
0x00da: 0x250c, # BOX DRAWINGS LIGHT DOWN AND RIGHT
0x00db: 0x2588, # FULL BLOCK
0x00dc: 0x2584, # LOWER HALF BLOCK
0x00dd: 0x0162, # LATIN CAPITAL LETTER T WITH CEDILLA
0x00de: 0x016e, # LATIN CAPITAL LETTER U WITH RING ABOVE
0x00df: 0x2580, # UPPER HALF BLOCK
0x00e0: 0x00d3, # LATIN CAPITAL LETTER O WITH ACUTE
0x00e1: 0x00df, # LATIN SMALL LETTER SHARP S
0x00e2: 0x00d4, # LATIN CAPITAL LETTER O WITH CIRCUMFLEX
0x00e3: 0x0143, # LATIN CAPITAL LETTER N WITH ACUTE
0x00e4: 0x0144, # LATIN SMALL LETTER N WITH ACUTE
0x00e5: 0x0148, # LATIN SMALL LETTER N WITH CARON
0x00e6: 0x0160, # LATIN CAPITAL LETTER S WITH CARON
0x00e7: 0x0161, # LATIN SMALL LETTER S WITH CARON
0x00e8: 0x0154, # LATIN CAPITAL LETTER R WITH ACUTE
0x00e9: 0x00da, # LATIN CAPITAL LETTER U WITH ACUTE
0x00ea: 0x0155, # LATIN SMALL LETTER R WITH ACUTE
0x00eb: 0x0170, # LATIN CAPITAL LETTER U WITH DOUBLE ACUTE
0x00ec: 0x00fd, # LATIN SMALL LETTER Y WITH ACUTE
0x00ed: 0x00dd, # LATIN CAPITAL LETTER Y WITH ACUTE
0x00ee: 0x0163, # LATIN SMALL LETTER T WITH CEDILLA
0x00ef: 0x00b4, # ACUTE ACCENT
0x00f0: 0x00ad, # SOFT HYPHEN
0x00f1: 0x02dd, # DOUBLE ACUTE ACCENT
0x00f2: 0x02db, # OGONEK
0x00f3: 0x02c7, # CARON
0x00f4: 0x02d8, # BREVE
0x00f5: 0x00a7, # SECTION SIGN
0x00f6: 0x00f7, # DIVISION SIGN
0x00f7: 0x00b8, # CEDILLA
0x00f8: 0x00b0, # DEGREE SIGN
0x00f9: 0x00a8, # DIAERESIS
0x00fa: 0x02d9, # DOT ABOVE
0x00fb: 0x0171, # LATIN SMALL LETTER U WITH DOUBLE ACUTE
0x00fc: 0x0158, # LATIN CAPITAL LETTER R WITH CARON
0x00fd: 0x0159, # LATIN SMALL LETTER R WITH CARON
0x00fe: 0x25a0, # BLACK SQUARE
0x00ff: 0x00a0, # NO-BREAK SPACE
})
### Encoding Map
encoding_map = {}
for k,v in decoding_map.items():
encoding_map[v] = k

+ 0
- 174
lib/jython/Lib/encodings/cp855.py View File

@@ -1,174 +0,0 @@
""" Python Character Mapping Codec generated from 'CP855.TXT' with gencodec.py.
Written by Marc-Andre Lemburg (mal@lemburg.com).
(c) Copyright CNRI, All Rights Reserved. NO WARRANTY.
(c) Copyright 2000 Guido van Rossum.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_map)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_map)
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return (Codec().encode,Codec().decode,StreamReader,StreamWriter)
### Decoding Map
decoding_map = codecs.make_identity_dict(range(256))
decoding_map.update({
0x0080: 0x0452, # CYRILLIC SMALL LETTER DJE
0x0081: 0x0402, # CYRILLIC CAPITAL LETTER DJE
0x0082: 0x0453, # CYRILLIC SMALL LETTER GJE
0x0083: 0x0403, # CYRILLIC CAPITAL LETTER GJE
0x0084: 0x0451, # CYRILLIC SMALL LETTER IO
0x0085: 0x0401, # CYRILLIC CAPITAL LETTER IO
0x0086: 0x0454, # CYRILLIC SMALL LETTER UKRAINIAN IE
0x0087: 0x0404, # CYRILLIC CAPITAL LETTER UKRAINIAN IE
0x0088: 0x0455, # CYRILLIC SMALL LETTER DZE
0x0089: 0x0405, # CYRILLIC CAPITAL LETTER DZE
0x008a: 0x0456, # CYRILLIC SMALL LETTER BYELORUSSIAN-UKRAINIAN I
0x008b: 0x0406, # CYRILLIC CAPITAL LETTER BYELORUSSIAN-UKRAINIAN I
0x008c: 0x0457, # CYRILLIC SMALL LETTER YI
0x008d: 0x0407, # CYRILLIC CAPITAL LETTER YI
0x008e: 0x0458, # CYRILLIC SMALL LETTER JE
0x008f: 0x0408, # CYRILLIC CAPITAL LETTER JE
0x0090: 0x0459, # CYRILLIC SMALL LETTER LJE
0x0091: 0x0409, # CYRILLIC CAPITAL LETTER LJE
0x0092: 0x045a, # CYRILLIC SMALL LETTER NJE
0x0093: 0x040a, # CYRILLIC CAPITAL LETTER NJE
0x0094: 0x045b, # CYRILLIC SMALL LETTER TSHE
0x0095: 0x040b, # CYRILLIC CAPITAL LETTER TSHE
0x0096: 0x045c, # CYRILLIC SMALL LETTER KJE
0x0097: 0x040c, # CYRILLIC CAPITAL LETTER KJE
0x0098: 0x045e, # CYRILLIC SMALL LETTER SHORT U
0x0099: 0x040e, # CYRILLIC CAPITAL LETTER SHORT U
0x009a: 0x045f, # CYRILLIC SMALL LETTER DZHE
0x009b: 0x040f, # CYRILLIC CAPITAL LETTER DZHE
0x009c: 0x044e, # CYRILLIC SMALL LETTER YU
0x009d: 0x042e, # CYRILLIC CAPITAL LETTER YU
0x009e: 0x044a, # CYRILLIC SMALL LETTER HARD SIGN
0x009f: 0x042a, # CYRILLIC CAPITAL LETTER HARD SIGN
0x00a0: 0x0430, # CYRILLIC SMALL LETTER A
0x00a1: 0x0410, # CYRILLIC CAPITAL LETTER A
0x00a2: 0x0431, # CYRILLIC SMALL LETTER BE
0x00a3: 0x0411, # CYRILLIC CAPITAL LETTER BE
0x00a4: 0x0446, # CYRILLIC SMALL LETTER TSE
0x00a5: 0x0426, # CYRILLIC CAPITAL LETTER TSE
0x00a6: 0x0434, # CYRILLIC SMALL LETTER DE
0x00a7: 0x0414, # CYRILLIC CAPITAL LETTER DE
0x00a8: 0x0435, # CYRILLIC SMALL LETTER IE
0x00a9: 0x0415, # CYRILLIC CAPITAL LETTER IE
0x00aa: 0x0444, # CYRILLIC SMALL LETTER EF
0x00ab: 0x0424, # CYRILLIC CAPITAL LETTER EF
0x00ac: 0x0433, # CYRILLIC SMALL LETTER GHE
0x00ad: 0x0413, # CYRILLIC CAPITAL LETTER GHE
0x00ae: 0x00ab, # LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00af: 0x00bb, # RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00b0: 0x2591, # LIGHT SHADE
0x00b1: 0x2592, # MEDIUM SHADE
0x00b2: 0x2593, # DARK SHADE
0x00b3: 0x2502, # BOX DRAWINGS LIGHT VERTICAL
0x00b4: 0x2524, # BOX DRAWINGS LIGHT VERTICAL AND LEFT
0x00b5: 0x0445, # CYRILLIC SMALL LETTER HA
0x00b6: 0x0425, # CYRILLIC CAPITAL LETTER HA
0x00b7: 0x0438, # CYRILLIC SMALL LETTER I
0x00b8: 0x0418, # CYRILLIC CAPITAL LETTER I
0x00b9: 0x2563, # BOX DRAWINGS DOUBLE VERTICAL AND LEFT
0x00ba: 0x2551, # BOX DRAWINGS DOUBLE VERTICAL
0x00bb: 0x2557, # BOX DRAWINGS DOUBLE DOWN AND LEFT
0x00bc: 0x255d, # BOX DRAWINGS DOUBLE UP AND LEFT
0x00bd: 0x0439, # CYRILLIC SMALL LETTER SHORT I
0x00be: 0x0419, # CYRILLIC CAPITAL LETTER SHORT I
0x00bf: 0x2510, # BOX DRAWINGS LIGHT DOWN AND LEFT
0x00c0: 0x2514, # BOX DRAWINGS LIGHT UP AND RIGHT
0x00c1: 0x2534, # BOX DRAWINGS LIGHT UP AND HORIZONTAL
0x00c2: 0x252c, # BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
0x00c3: 0x251c, # BOX DRAWINGS LIGHT VERTICAL AND RIGHT
0x00c4: 0x2500, # BOX DRAWINGS LIGHT HORIZONTAL
0x00c5: 0x253c, # BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
0x00c6: 0x043a, # CYRILLIC SMALL LETTER KA
0x00c7: 0x041a, # CYRILLIC CAPITAL LETTER KA
0x00c8: 0x255a, # BOX DRAWINGS DOUBLE UP AND RIGHT
0x00c9: 0x2554, # BOX DRAWINGS DOUBLE DOWN AND RIGHT
0x00ca: 0x2569, # BOX DRAWINGS DOUBLE UP AND HORIZONTAL
0x00cb: 0x2566, # BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
0x00cc: 0x2560, # BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
0x00cd: 0x2550, # BOX DRAWINGS DOUBLE HORIZONTAL
0x00ce: 0x256c, # BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
0x00cf: 0x00a4, # CURRENCY SIGN
0x00d0: 0x043b, # CYRILLIC SMALL LETTER EL
0x00d1: 0x041b, # CYRILLIC CAPITAL LETTER EL
0x00d2: 0x043c, # CYRILLIC SMALL LETTER EM
0x00d3: 0x041c, # CYRILLIC CAPITAL LETTER EM
0x00d4: 0x043d, # CYRILLIC SMALL LETTER EN
0x00d5: 0x041d, # CYRILLIC CAPITAL LETTER EN
0x00d6: 0x043e, # CYRILLIC SMALL LETTER O
0x00d7: 0x041e, # CYRILLIC CAPITAL LETTER O
0x00d8: 0x043f, # CYRILLIC SMALL LETTER PE
0x00d9: 0x2518, # BOX DRAWINGS LIGHT UP AND LEFT
0x00da: 0x250c, # BOX DRAWINGS LIGHT DOWN AND RIGHT
0x00db: 0x2588, # FULL BLOCK
0x00dc: 0x2584, # LOWER HALF BLOCK
0x00dd: 0x041f, # CYRILLIC CAPITAL LETTER PE
0x00de: 0x044f, # CYRILLIC SMALL LETTER YA
0x00df: 0x2580, # UPPER HALF BLOCK
0x00e0: 0x042f, # CYRILLIC CAPITAL LETTER YA
0x00e1: 0x0440, # CYRILLIC SMALL LETTER ER
0x00e2: 0x0420, # CYRILLIC CAPITAL LETTER ER
0x00e3: 0x0441, # CYRILLIC SMALL LETTER ES
0x00e4: 0x0421, # CYRILLIC CAPITAL LETTER ES
0x00e5: 0x0442, # CYRILLIC SMALL LETTER TE
0x00e6: 0x0422, # CYRILLIC CAPITAL LETTER TE
0x00e7: 0x0443, # CYRILLIC SMALL LETTER U
0x00e8: 0x0423, # CYRILLIC CAPITAL LETTER U
0x00e9: 0x0436, # CYRILLIC SMALL LETTER ZHE
0x00ea: 0x0416, # CYRILLIC CAPITAL LETTER ZHE
0x00eb: 0x0432, # CYRILLIC SMALL LETTER VE
0x00ec: 0x0412, # CYRILLIC CAPITAL LETTER VE
0x00ed: 0x044c, # CYRILLIC SMALL LETTER SOFT SIGN
0x00ee: 0x042c, # CYRILLIC CAPITAL LETTER SOFT SIGN
0x00ef: 0x2116, # NUMERO SIGN
0x00f0: 0x00ad, # SOFT HYPHEN
0x00f1: 0x044b, # CYRILLIC SMALL LETTER YERU
0x00f2: 0x042b, # CYRILLIC CAPITAL LETTER YERU
0x00f3: 0x0437, # CYRILLIC SMALL LETTER ZE
0x00f4: 0x0417, # CYRILLIC CAPITAL LETTER ZE
0x00f5: 0x0448, # CYRILLIC SMALL LETTER SHA
0x00f6: 0x0428, # CYRILLIC CAPITAL LETTER SHA
0x00f7: 0x044d, # CYRILLIC SMALL LETTER E
0x00f8: 0x042d, # CYRILLIC CAPITAL LETTER E
0x00f9: 0x0449, # CYRILLIC SMALL LETTER SHCHA
0x00fa: 0x0429, # CYRILLIC CAPITAL LETTER SHCHA
0x00fb: 0x0447, # CYRILLIC SMALL LETTER CHE
0x00fc: 0x0427, # CYRILLIC CAPITAL LETTER CHE
0x00fd: 0x00a7, # SECTION SIGN
0x00fe: 0x25a0, # BLACK SQUARE
0x00ff: 0x00a0, # NO-BREAK SPACE
})
### Encoding Map
encoding_map = {}
for k,v in decoding_map.items():
encoding_map[v] = k

+ 0
- 174
lib/jython/Lib/encodings/cp856.py View File

@@ -1,174 +0,0 @@
""" Python Character Mapping Codec generated from 'CP856.TXT' with gencodec.py.
Written by Marc-Andre Lemburg (mal@lemburg.com).
(c) Copyright CNRI, All Rights Reserved. NO WARRANTY.
(c) Copyright 2000 Guido van Rossum.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_map)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_map)
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return (Codec().encode,Codec().decode,StreamReader,StreamWriter)
### Decoding Map
decoding_map = codecs.make_identity_dict(range(256))
decoding_map.update({
0x0080: 0x05d0, # HEBREW LETTER ALEF
0x0081: 0x05d1, # HEBREW LETTER BET
0x0082: 0x05d2, # HEBREW LETTER GIMEL
0x0083: 0x05d3, # HEBREW LETTER DALET
0x0084: 0x05d4, # HEBREW LETTER HE
0x0085: 0x05d5, # HEBREW LETTER VAV
0x0086: 0x05d6, # HEBREW LETTER ZAYIN
0x0087: 0x05d7, # HEBREW LETTER HET
0x0088: 0x05d8, # HEBREW LETTER TET
0x0089: 0x05d9, # HEBREW LETTER YOD
0x008a: 0x05da, # HEBREW LETTER FINAL KAF
0x008b: 0x05db, # HEBREW LETTER KAF
0x008c: 0x05dc, # HEBREW LETTER LAMED
0x008d: 0x05dd, # HEBREW LETTER FINAL MEM
0x008e: 0x05de, # HEBREW LETTER MEM
0x008f: 0x05df, # HEBREW LETTER FINAL NUN
0x0090: 0x05e0, # HEBREW LETTER NUN
0x0091: 0x05e1, # HEBREW LETTER SAMEKH
0x0092: 0x05e2, # HEBREW LETTER AYIN
0x0093: 0x05e3, # HEBREW LETTER FINAL PE
0x0094: 0x05e4, # HEBREW LETTER PE
0x0095: 0x05e5, # HEBREW LETTER FINAL TSADI
0x0096: 0x05e6, # HEBREW LETTER TSADI
0x0097: 0x05e7, # HEBREW LETTER QOF
0x0098: 0x05e8, # HEBREW LETTER RESH
0x0099: 0x05e9, # HEBREW LETTER SHIN
0x009a: 0x05ea, # HEBREW LETTER TAV
0x009b: None, # UNDEFINED
0x009c: 0x00a3, # POUND SIGN
0x009d: None, # UNDEFINED
0x009e: 0x00d7, # MULTIPLICATION SIGN
0x009f: None, # UNDEFINED
0x00a0: None, # UNDEFINED
0x00a1: None, # UNDEFINED
0x00a2: None, # UNDEFINED
0x00a3: None, # UNDEFINED
0x00a4: None, # UNDEFINED
0x00a5: None, # UNDEFINED
0x00a6: None, # UNDEFINED
0x00a7: None, # UNDEFINED
0x00a8: None, # UNDEFINED
0x00a9: 0x00ae, # REGISTERED SIGN
0x00aa: 0x00ac, # NOT SIGN
0x00ab: 0x00bd, # VULGAR FRACTION ONE HALF
0x00ac: 0x00bc, # VULGAR FRACTION ONE QUARTER
0x00ad: None, # UNDEFINED
0x00ae: 0x00ab, # LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00af: 0x00bb, # RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00b0: 0x2591, # LIGHT SHADE
0x00b1: 0x2592, # MEDIUM SHADE
0x00b2: 0x2593, # DARK SHADE
0x00b3: 0x2502, # BOX DRAWINGS LIGHT VERTICAL
0x00b4: 0x2524, # BOX DRAWINGS LIGHT VERTICAL AND LEFT
0x00b5: None, # UNDEFINED
0x00b6: None, # UNDEFINED
0x00b7: None, # UNDEFINED
0x00b8: 0x00a9, # COPYRIGHT SIGN
0x00b9: 0x2563, # BOX DRAWINGS DOUBLE VERTICAL AND LEFT
0x00ba: 0x2551, # BOX DRAWINGS DOUBLE VERTICAL
0x00bb: 0x2557, # BOX DRAWINGS DOUBLE DOWN AND LEFT
0x00bc: 0x255d, # BOX DRAWINGS DOUBLE UP AND LEFT
0x00bd: 0x00a2, # CENT SIGN
0x00be: 0x00a5, # YEN SIGN
0x00bf: 0x2510, # BOX DRAWINGS LIGHT DOWN AND LEFT
0x00c0: 0x2514, # BOX DRAWINGS LIGHT UP AND RIGHT
0x00c1: 0x2534, # BOX DRAWINGS LIGHT UP AND HORIZONTAL
0x00c2: 0x252c, # BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
0x00c3: 0x251c, # BOX DRAWINGS LIGHT VERTICAL AND RIGHT
0x00c4: 0x2500, # BOX DRAWINGS LIGHT HORIZONTAL
0x00c5: 0x253c, # BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
0x00c6: None, # UNDEFINED
0x00c7: None, # UNDEFINED
0x00c8: 0x255a, # BOX DRAWINGS DOUBLE UP AND RIGHT
0x00c9: 0x2554, # BOX DRAWINGS DOUBLE DOWN AND RIGHT
0x00ca: 0x2569, # BOX DRAWINGS DOUBLE UP AND HORIZONTAL
0x00cb: 0x2566, # BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
0x00cc: 0x2560, # BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
0x00cd: 0x2550, # BOX DRAWINGS DOUBLE HORIZONTAL
0x00ce: 0x256c, # BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
0x00cf: 0x00a4, # CURRENCY SIGN
0x00d0: None, # UNDEFINED
0x00d1: None, # UNDEFINED
0x00d2: None, # UNDEFINED
0x00d3: None, # UNDEFINEDS
0x00d4: None, # UNDEFINED
0x00d5: None, # UNDEFINED
0x00d6: None, # UNDEFINEDE
0x00d7: None, # UNDEFINED
0x00d8: None, # UNDEFINED
0x00d9: 0x2518, # BOX DRAWINGS LIGHT UP AND LEFT
0x00da: 0x250c, # BOX DRAWINGS LIGHT DOWN AND RIGHT
0x00db: 0x2588, # FULL BLOCK
0x00dc: 0x2584, # LOWER HALF BLOCK
0x00dd: 0x00a6, # BROKEN BAR
0x00de: None, # UNDEFINED
0x00df: 0x2580, # UPPER HALF BLOCK
0x00e0: None, # UNDEFINED
0x00e1: None, # UNDEFINED
0x00e2: None, # UNDEFINED
0x00e3: None, # UNDEFINED
0x00e4: None, # UNDEFINED
0x00e5: None, # UNDEFINED
0x00e6: 0x00b5, # MICRO SIGN
0x00e7: None, # UNDEFINED
0x00e8: None, # UNDEFINED
0x00e9: None, # UNDEFINED
0x00ea: None, # UNDEFINED
0x00eb: None, # UNDEFINED
0x00ec: None, # UNDEFINED
0x00ed: None, # UNDEFINED
0x00ee: 0x00af, # MACRON
0x00ef: 0x00b4, # ACUTE ACCENT
0x00f0: 0x00ad, # SOFT HYPHEN
0x00f1: 0x00b1, # PLUS-MINUS SIGN
0x00f2: 0x2017, # DOUBLE LOW LINE
0x00f3: 0x00be, # VULGAR FRACTION THREE QUARTERS
0x00f4: 0x00b6, # PILCROW SIGN
0x00f5: 0x00a7, # SECTION SIGN
0x00f6: 0x00f7, # DIVISION SIGN
0x00f7: 0x00b8, # CEDILLA
0x00f8: 0x00b0, # DEGREE SIGN
0x00f9: 0x00a8, # DIAERESIS
0x00fa: 0x00b7, # MIDDLE DOT
0x00fb: 0x00b9, # SUPERSCRIPT ONE
0x00fc: 0x00b3, # SUPERSCRIPT THREE
0x00fd: 0x00b2, # SUPERSCRIPT TWO
0x00fe: 0x25a0, # BLACK SQUARE
0x00ff: 0x00a0, # NO-BREAK SPACE
})
### Encoding Map
encoding_map = {}
for k,v in decoding_map.items():
encoding_map[v] = k

+ 0
- 173
lib/jython/Lib/encodings/cp857.py View File

@@ -1,173 +0,0 @@
""" Python Character Mapping Codec generated from 'CP857.TXT' with gencodec.py.
Written by Marc-Andre Lemburg (mal@lemburg.com).
(c) Copyright CNRI, All Rights Reserved. NO WARRANTY.
(c) Copyright 2000 Guido van Rossum.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_map)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_map)
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return (Codec().encode,Codec().decode,StreamReader,StreamWriter)
### Decoding Map
decoding_map = codecs.make_identity_dict(range(256))
decoding_map.update({
0x0080: 0x00c7, # LATIN CAPITAL LETTER C WITH CEDILLA
0x0081: 0x00fc, # LATIN SMALL LETTER U WITH DIAERESIS
0x0082: 0x00e9, # LATIN SMALL LETTER E WITH ACUTE
0x0083: 0x00e2, # LATIN SMALL LETTER A WITH CIRCUMFLEX
0x0084: 0x00e4, # LATIN SMALL LETTER A WITH DIAERESIS
0x0085: 0x00e0, # LATIN SMALL LETTER A WITH GRAVE
0x0086: 0x00e5, # LATIN SMALL LETTER A WITH RING ABOVE
0x0087: 0x00e7, # LATIN SMALL LETTER C WITH CEDILLA
0x0088: 0x00ea, # LATIN SMALL LETTER E WITH CIRCUMFLEX
0x0089: 0x00eb, # LATIN SMALL LETTER E WITH DIAERESIS
0x008a: 0x00e8, # LATIN SMALL LETTER E WITH GRAVE
0x008b: 0x00ef, # LATIN SMALL LETTER I WITH DIAERESIS
0x008c: 0x00ee, # LATIN SMALL LETTER I WITH CIRCUMFLEX
0x008d: 0x0131, # LATIN SMALL LETTER DOTLESS I
0x008e: 0x00c4, # LATIN CAPITAL LETTER A WITH DIAERESIS
0x008f: 0x00c5, # LATIN CAPITAL LETTER A WITH RING ABOVE
0x0090: 0x00c9, # LATIN CAPITAL LETTER E WITH ACUTE
0x0091: 0x00e6, # LATIN SMALL LIGATURE AE
0x0092: 0x00c6, # LATIN CAPITAL LIGATURE AE
0x0093: 0x00f4, # LATIN SMALL LETTER O WITH CIRCUMFLEX
0x0094: 0x00f6, # LATIN SMALL LETTER O WITH DIAERESIS
0x0095: 0x00f2, # LATIN SMALL LETTER O WITH GRAVE
0x0096: 0x00fb, # LATIN SMALL LETTER U WITH CIRCUMFLEX
0x0097: 0x00f9, # LATIN SMALL LETTER U WITH GRAVE
0x0098: 0x0130, # LATIN CAPITAL LETTER I WITH DOT ABOVE
0x0099: 0x00d6, # LATIN CAPITAL LETTER O WITH DIAERESIS
0x009a: 0x00dc, # LATIN CAPITAL LETTER U WITH DIAERESIS
0x009b: 0x00f8, # LATIN SMALL LETTER O WITH STROKE
0x009c: 0x00a3, # POUND SIGN
0x009d: 0x00d8, # LATIN CAPITAL LETTER O WITH STROKE
0x009e: 0x015e, # LATIN CAPITAL LETTER S WITH CEDILLA
0x009f: 0x015f, # LATIN SMALL LETTER S WITH CEDILLA
0x00a0: 0x00e1, # LATIN SMALL LETTER A WITH ACUTE
0x00a1: 0x00ed, # LATIN SMALL LETTER I WITH ACUTE
0x00a2: 0x00f3, # LATIN SMALL LETTER O WITH ACUTE
0x00a3: 0x00fa, # LATIN SMALL LETTER U WITH ACUTE
0x00a4: 0x00f1, # LATIN SMALL LETTER N WITH TILDE
0x00a5: 0x00d1, # LATIN CAPITAL LETTER N WITH TILDE
0x00a6: 0x011e, # LATIN CAPITAL LETTER G WITH BREVE
0x00a7: 0x011f, # LATIN SMALL LETTER G WITH BREVE
0x00a8: 0x00bf, # INVERTED QUESTION MARK
0x00a9: 0x00ae, # REGISTERED SIGN
0x00aa: 0x00ac, # NOT SIGN
0x00ab: 0x00bd, # VULGAR FRACTION ONE HALF
0x00ac: 0x00bc, # VULGAR FRACTION ONE QUARTER
0x00ad: 0x00a1, # INVERTED EXCLAMATION MARK
0x00ae: 0x00ab, # LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00af: 0x00bb, # RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00b0: 0x2591, # LIGHT SHADE
0x00b1: 0x2592, # MEDIUM SHADE
0x00b2: 0x2593, # DARK SHADE
0x00b3: 0x2502, # BOX DRAWINGS LIGHT VERTICAL
0x00b4: 0x2524, # BOX DRAWINGS LIGHT VERTICAL AND LEFT
0x00b5: 0x00c1, # LATIN CAPITAL LETTER A WITH ACUTE
0x00b6: 0x00c2, # LATIN CAPITAL LETTER A WITH CIRCUMFLEX
0x00b7: 0x00c0, # LATIN CAPITAL LETTER A WITH GRAVE
0x00b8: 0x00a9, # COPYRIGHT SIGN
0x00b9: 0x2563, # BOX DRAWINGS DOUBLE VERTICAL AND LEFT
0x00ba: 0x2551, # BOX DRAWINGS DOUBLE VERTICAL
0x00bb: 0x2557, # BOX DRAWINGS DOUBLE DOWN AND LEFT
0x00bc: 0x255d, # BOX DRAWINGS DOUBLE UP AND LEFT
0x00bd: 0x00a2, # CENT SIGN
0x00be: 0x00a5, # YEN SIGN
0x00bf: 0x2510, # BOX DRAWINGS LIGHT DOWN AND LEFT
0x00c0: 0x2514, # BOX DRAWINGS LIGHT UP AND RIGHT
0x00c1: 0x2534, # BOX DRAWINGS LIGHT UP AND HORIZONTAL
0x00c2: 0x252c, # BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
0x00c3: 0x251c, # BOX DRAWINGS LIGHT VERTICAL AND RIGHT
0x00c4: 0x2500, # BOX DRAWINGS LIGHT HORIZONTAL
0x00c5: 0x253c, # BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
0x00c6: 0x00e3, # LATIN SMALL LETTER A WITH TILDE
0x00c7: 0x00c3, # LATIN CAPITAL LETTER A WITH TILDE
0x00c8: 0x255a, # BOX DRAWINGS DOUBLE UP AND RIGHT
0x00c9: 0x2554, # BOX DRAWINGS DOUBLE DOWN AND RIGHT
0x00ca: 0x2569, # BOX DRAWINGS DOUBLE UP AND HORIZONTAL
0x00cb: 0x2566, # BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
0x00cc: 0x2560, # BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
0x00cd: 0x2550, # BOX DRAWINGS DOUBLE HORIZONTAL
0x00ce: 0x256c, # BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
0x00cf: 0x00a4, # CURRENCY SIGN
0x00d0: 0x00ba, # MASCULINE ORDINAL INDICATOR
0x00d1: 0x00aa, # FEMININE ORDINAL INDICATOR
0x00d2: 0x00ca, # LATIN CAPITAL LETTER E WITH CIRCUMFLEX
0x00d3: 0x00cb, # LATIN CAPITAL LETTER E WITH DIAERESIS
0x00d4: 0x00c8, # LATIN CAPITAL LETTER E WITH GRAVE
0x00d5: None, # UNDEFINED
0x00d6: 0x00cd, # LATIN CAPITAL LETTER I WITH ACUTE
0x00d7: 0x00ce, # LATIN CAPITAL LETTER I WITH CIRCUMFLEX
0x00d8: 0x00cf, # LATIN CAPITAL LETTER I WITH DIAERESIS
0x00d9: 0x2518, # BOX DRAWINGS LIGHT UP AND LEFT
0x00da: 0x250c, # BOX DRAWINGS LIGHT DOWN AND RIGHT
0x00db: 0x2588, # FULL BLOCK
0x00dc: 0x2584, # LOWER HALF BLOCK
0x00dd: 0x00a6, # BROKEN BAR
0x00de: 0x00cc, # LATIN CAPITAL LETTER I WITH GRAVE
0x00df: 0x2580, # UPPER HALF BLOCK
0x00e0: 0x00d3, # LATIN CAPITAL LETTER O WITH ACUTE
0x00e1: 0x00df, # LATIN SMALL LETTER SHARP S
0x00e2: 0x00d4, # LATIN CAPITAL LETTER O WITH CIRCUMFLEX
0x00e3: 0x00d2, # LATIN CAPITAL LETTER O WITH GRAVE
0x00e4: 0x00f5, # LATIN SMALL LETTER O WITH TILDE
0x00e5: 0x00d5, # LATIN CAPITAL LETTER O WITH TILDE
0x00e6: 0x00b5, # MICRO SIGN
0x00e7: None, # UNDEFINED
0x00e8: 0x00d7, # MULTIPLICATION SIGN
0x00e9: 0x00da, # LATIN CAPITAL LETTER U WITH ACUTE
0x00ea: 0x00db, # LATIN CAPITAL LETTER U WITH CIRCUMFLEX
0x00eb: 0x00d9, # LATIN CAPITAL LETTER U WITH GRAVE
0x00ed: 0x00ff, # LATIN SMALL LETTER Y WITH DIAERESIS
0x00ee: 0x00af, # MACRON
0x00ef: 0x00b4, # ACUTE ACCENT
0x00f0: 0x00ad, # SOFT HYPHEN
0x00f1: 0x00b1, # PLUS-MINUS SIGN
0x00f2: None, # UNDEFINED
0x00f3: 0x00be, # VULGAR FRACTION THREE QUARTERS
0x00f4: 0x00b6, # PILCROW SIGN
0x00f5: 0x00a7, # SECTION SIGN
0x00f6: 0x00f7, # DIVISION SIGN
0x00f7: 0x00b8, # CEDILLA
0x00f8: 0x00b0, # DEGREE SIGN
0x00f9: 0x00a8, # DIAERESIS
0x00fa: 0x00b7, # MIDDLE DOT
0x00fb: 0x00b9, # SUPERSCRIPT ONE
0x00fc: 0x00b3, # SUPERSCRIPT THREE
0x00fd: 0x00b2, # SUPERSCRIPT TWO
0x00fe: 0x25a0, # BLACK SQUARE
0x00ff: 0x00a0, # NO-BREAK SPACE
})
### Encoding Map
encoding_map = {}
for k,v in decoding_map.items():
encoding_map[v] = k

+ 0
- 174
lib/jython/Lib/encodings/cp860.py View File

@@ -1,174 +0,0 @@
""" Python Character Mapping Codec generated from 'CP860.TXT' with gencodec.py.
Written by Marc-Andre Lemburg (mal@lemburg.com).
(c) Copyright CNRI, All Rights Reserved. NO WARRANTY.
(c) Copyright 2000 Guido van Rossum.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_map)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_map)
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return (Codec().encode,Codec().decode,StreamReader,StreamWriter)
### Decoding Map
decoding_map = codecs.make_identity_dict(range(256))
decoding_map.update({
0x0080: 0x00c7, # LATIN CAPITAL LETTER C WITH CEDILLA
0x0081: 0x00fc, # LATIN SMALL LETTER U WITH DIAERESIS
0x0082: 0x00e9, # LATIN SMALL LETTER E WITH ACUTE
0x0083: 0x00e2, # LATIN SMALL LETTER A WITH CIRCUMFLEX
0x0084: 0x00e3, # LATIN SMALL LETTER A WITH TILDE
0x0085: 0x00e0, # LATIN SMALL LETTER A WITH GRAVE
0x0086: 0x00c1, # LATIN CAPITAL LETTER A WITH ACUTE
0x0087: 0x00e7, # LATIN SMALL LETTER C WITH CEDILLA
0x0088: 0x00ea, # LATIN SMALL LETTER E WITH CIRCUMFLEX
0x0089: 0x00ca, # LATIN CAPITAL LETTER E WITH CIRCUMFLEX
0x008a: 0x00e8, # LATIN SMALL LETTER E WITH GRAVE
0x008b: 0x00cd, # LATIN CAPITAL LETTER I WITH ACUTE
0x008c: 0x00d4, # LATIN CAPITAL LETTER O WITH CIRCUMFLEX
0x008d: 0x00ec, # LATIN SMALL LETTER I WITH GRAVE
0x008e: 0x00c3, # LATIN CAPITAL LETTER A WITH TILDE
0x008f: 0x00c2, # LATIN CAPITAL LETTER A WITH CIRCUMFLEX
0x0090: 0x00c9, # LATIN CAPITAL LETTER E WITH ACUTE
0x0091: 0x00c0, # LATIN CAPITAL LETTER A WITH GRAVE
0x0092: 0x00c8, # LATIN CAPITAL LETTER E WITH GRAVE
0x0093: 0x00f4, # LATIN SMALL LETTER O WITH CIRCUMFLEX
0x0094: 0x00f5, # LATIN SMALL LETTER O WITH TILDE
0x0095: 0x00f2, # LATIN SMALL LETTER O WITH GRAVE
0x0096: 0x00da, # LATIN CAPITAL LETTER U WITH ACUTE
0x0097: 0x00f9, # LATIN SMALL LETTER U WITH GRAVE
0x0098: 0x00cc, # LATIN CAPITAL LETTER I WITH GRAVE
0x0099: 0x00d5, # LATIN CAPITAL LETTER O WITH TILDE
0x009a: 0x00dc, # LATIN CAPITAL LETTER U WITH DIAERESIS
0x009b: 0x00a2, # CENT SIGN
0x009c: 0x00a3, # POUND SIGN
0x009d: 0x00d9, # LATIN CAPITAL LETTER U WITH GRAVE
0x009e: 0x20a7, # PESETA SIGN
0x009f: 0x00d3, # LATIN CAPITAL LETTER O WITH ACUTE
0x00a0: 0x00e1, # LATIN SMALL LETTER A WITH ACUTE
0x00a1: 0x00ed, # LATIN SMALL LETTER I WITH ACUTE
0x00a2: 0x00f3, # LATIN SMALL LETTER O WITH ACUTE
0x00a3: 0x00fa, # LATIN SMALL LETTER U WITH ACUTE
0x00a4: 0x00f1, # LATIN SMALL LETTER N WITH TILDE
0x00a5: 0x00d1, # LATIN CAPITAL LETTER N WITH TILDE
0x00a6: 0x00aa, # FEMININE ORDINAL INDICATOR
0x00a7: 0x00ba, # MASCULINE ORDINAL INDICATOR
0x00a8: 0x00bf, # INVERTED QUESTION MARK
0x00a9: 0x00d2, # LATIN CAPITAL LETTER O WITH GRAVE
0x00aa: 0x00ac, # NOT SIGN
0x00ab: 0x00bd, # VULGAR FRACTION ONE HALF
0x00ac: 0x00bc, # VULGAR FRACTION ONE QUARTER
0x00ad: 0x00a1, # INVERTED EXCLAMATION MARK
0x00ae: 0x00ab, # LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00af: 0x00bb, # RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00b0: 0x2591, # LIGHT SHADE
0x00b1: 0x2592, # MEDIUM SHADE
0x00b2: 0x2593, # DARK SHADE
0x00b3: 0x2502, # BOX DRAWINGS LIGHT VERTICAL
0x00b4: 0x2524, # BOX DRAWINGS LIGHT VERTICAL AND LEFT
0x00b5: 0x2561, # BOX DRAWINGS VERTICAL SINGLE AND LEFT DOUBLE
0x00b6: 0x2562, # BOX DRAWINGS VERTICAL DOUBLE AND LEFT SINGLE
0x00b7: 0x2556, # BOX DRAWINGS DOWN DOUBLE AND LEFT SINGLE
0x00b8: 0x2555, # BOX DRAWINGS DOWN SINGLE AND LEFT DOUBLE
0x00b9: 0x2563, # BOX DRAWINGS DOUBLE VERTICAL AND LEFT
0x00ba: 0x2551, # BOX DRAWINGS DOUBLE VERTICAL
0x00bb: 0x2557, # BOX DRAWINGS DOUBLE DOWN AND LEFT
0x00bc: 0x255d, # BOX DRAWINGS DOUBLE UP AND LEFT
0x00bd: 0x255c, # BOX DRAWINGS UP DOUBLE AND LEFT SINGLE
0x00be: 0x255b, # BOX DRAWINGS UP SINGLE AND LEFT DOUBLE
0x00bf: 0x2510, # BOX DRAWINGS LIGHT DOWN AND LEFT
0x00c0: 0x2514, # BOX DRAWINGS LIGHT UP AND RIGHT
0x00c1: 0x2534, # BOX DRAWINGS LIGHT UP AND HORIZONTAL
0x00c2: 0x252c, # BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
0x00c3: 0x251c, # BOX DRAWINGS LIGHT VERTICAL AND RIGHT
0x00c4: 0x2500, # BOX DRAWINGS LIGHT HORIZONTAL
0x00c5: 0x253c, # BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
0x00c6: 0x255e, # BOX DRAWINGS VERTICAL SINGLE AND RIGHT DOUBLE
0x00c7: 0x255f, # BOX DRAWINGS VERTICAL DOUBLE AND RIGHT SINGLE
0x00c8: 0x255a, # BOX DRAWINGS DOUBLE UP AND RIGHT
0x00c9: 0x2554, # BOX DRAWINGS DOUBLE DOWN AND RIGHT
0x00ca: 0x2569, # BOX DRAWINGS DOUBLE UP AND HORIZONTAL
0x00cb: 0x2566, # BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
0x00cc: 0x2560, # BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
0x00cd: 0x2550, # BOX DRAWINGS DOUBLE HORIZONTAL
0x00ce: 0x256c, # BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
0x00cf: 0x2567, # BOX DRAWINGS UP SINGLE AND HORIZONTAL DOUBLE
0x00d0: 0x2568, # BOX DRAWINGS UP DOUBLE AND HORIZONTAL SINGLE
0x00d1: 0x2564, # BOX DRAWINGS DOWN SINGLE AND HORIZONTAL DOUBLE
0x00d2: 0x2565, # BOX DRAWINGS DOWN DOUBLE AND HORIZONTAL SINGLE
0x00d3: 0x2559, # BOX DRAWINGS UP DOUBLE AND RIGHT SINGLE
0x00d4: 0x2558, # BOX DRAWINGS UP SINGLE AND RIGHT DOUBLE
0x00d5: 0x2552, # BOX DRAWINGS DOWN SINGLE AND RIGHT DOUBLE
0x00d6: 0x2553, # BOX DRAWINGS DOWN DOUBLE AND RIGHT SINGLE
0x00d7: 0x256b, # BOX DRAWINGS VERTICAL DOUBLE AND HORIZONTAL SINGLE
0x00d8: 0x256a, # BOX DRAWINGS VERTICAL SINGLE AND HORIZONTAL DOUBLE
0x00d9: 0x2518, # BOX DRAWINGS LIGHT UP AND LEFT
0x00da: 0x250c, # BOX DRAWINGS LIGHT DOWN AND RIGHT
0x00db: 0x2588, # FULL BLOCK
0x00dc: 0x2584, # LOWER HALF BLOCK
0x00dd: 0x258c, # LEFT HALF BLOCK
0x00de: 0x2590, # RIGHT HALF BLOCK
0x00df: 0x2580, # UPPER HALF BLOCK
0x00e0: 0x03b1, # GREEK SMALL LETTER ALPHA
0x00e1: 0x00df, # LATIN SMALL LETTER SHARP S
0x00e2: 0x0393, # GREEK CAPITAL LETTER GAMMA
0x00e3: 0x03c0, # GREEK SMALL LETTER PI
0x00e4: 0x03a3, # GREEK CAPITAL LETTER SIGMA
0x00e5: 0x03c3, # GREEK SMALL LETTER SIGMA
0x00e6: 0x00b5, # MICRO SIGN
0x00e7: 0x03c4, # GREEK SMALL LETTER TAU
0x00e8: 0x03a6, # GREEK CAPITAL LETTER PHI
0x00e9: 0x0398, # GREEK CAPITAL LETTER THETA
0x00ea: 0x03a9, # GREEK CAPITAL LETTER OMEGA
0x00eb: 0x03b4, # GREEK SMALL LETTER DELTA
0x00ec: 0x221e, # INFINITY
0x00ed: 0x03c6, # GREEK SMALL LETTER PHI
0x00ee: 0x03b5, # GREEK SMALL LETTER EPSILON
0x00ef: 0x2229, # INTERSECTION
0x00f0: 0x2261, # IDENTICAL TO
0x00f1: 0x00b1, # PLUS-MINUS SIGN
0x00f2: 0x2265, # GREATER-THAN OR EQUAL TO
0x00f3: 0x2264, # LESS-THAN OR EQUAL TO
0x00f4: 0x2320, # TOP HALF INTEGRAL
0x00f5: 0x2321, # BOTTOM HALF INTEGRAL
0x00f6: 0x00f7, # DIVISION SIGN
0x00f7: 0x2248, # ALMOST EQUAL TO
0x00f8: 0x00b0, # DEGREE SIGN
0x00f9: 0x2219, # BULLET OPERATOR
0x00fa: 0x00b7, # MIDDLE DOT
0x00fb: 0x221a, # SQUARE ROOT
0x00fc: 0x207f, # SUPERSCRIPT LATIN SMALL LETTER N
0x00fd: 0x00b2, # SUPERSCRIPT TWO
0x00fe: 0x25a0, # BLACK SQUARE
0x00ff: 0x00a0, # NO-BREAK SPACE
})
### Encoding Map
encoding_map = {}
for k,v in decoding_map.items():
encoding_map[v] = k

+ 0
- 174
lib/jython/Lib/encodings/cp861.py View File

@@ -1,174 +0,0 @@
""" Python Character Mapping Codec generated from 'CP861.TXT' with gencodec.py.
Written by Marc-Andre Lemburg (mal@lemburg.com).
(c) Copyright CNRI, All Rights Reserved. NO WARRANTY.
(c) Copyright 2000 Guido van Rossum.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_map)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_map)
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return (Codec().encode,Codec().decode,StreamReader,StreamWriter)
### Decoding Map
decoding_map = codecs.make_identity_dict(range(256))
decoding_map.update({
0x0080: 0x00c7, # LATIN CAPITAL LETTER C WITH CEDILLA
0x0081: 0x00fc, # LATIN SMALL LETTER U WITH DIAERESIS
0x0082: 0x00e9, # LATIN SMALL LETTER E WITH ACUTE
0x0083: 0x00e2, # LATIN SMALL LETTER A WITH CIRCUMFLEX
0x0084: 0x00e4, # LATIN SMALL LETTER A WITH DIAERESIS
0x0085: 0x00e0, # LATIN SMALL LETTER A WITH GRAVE
0x0086: 0x00e5, # LATIN SMALL LETTER A WITH RING ABOVE
0x0087: 0x00e7, # LATIN SMALL LETTER C WITH CEDILLA
0x0088: 0x00ea, # LATIN SMALL LETTER E WITH CIRCUMFLEX
0x0089: 0x00eb, # LATIN SMALL LETTER E WITH DIAERESIS
0x008a: 0x00e8, # LATIN SMALL LETTER E WITH GRAVE
0x008b: 0x00d0, # LATIN CAPITAL LETTER ETH
0x008c: 0x00f0, # LATIN SMALL LETTER ETH
0x008d: 0x00de, # LATIN CAPITAL LETTER THORN
0x008e: 0x00c4, # LATIN CAPITAL LETTER A WITH DIAERESIS
0x008f: 0x00c5, # LATIN CAPITAL LETTER A WITH RING ABOVE
0x0090: 0x00c9, # LATIN CAPITAL LETTER E WITH ACUTE
0x0091: 0x00e6, # LATIN SMALL LIGATURE AE
0x0092: 0x00c6, # LATIN CAPITAL LIGATURE AE
0x0093: 0x00f4, # LATIN SMALL LETTER O WITH CIRCUMFLEX
0x0094: 0x00f6, # LATIN SMALL LETTER O WITH DIAERESIS
0x0095: 0x00fe, # LATIN SMALL LETTER THORN
0x0096: 0x00fb, # LATIN SMALL LETTER U WITH CIRCUMFLEX
0x0097: 0x00dd, # LATIN CAPITAL LETTER Y WITH ACUTE
0x0098: 0x00fd, # LATIN SMALL LETTER Y WITH ACUTE
0x0099: 0x00d6, # LATIN CAPITAL LETTER O WITH DIAERESIS
0x009a: 0x00dc, # LATIN CAPITAL LETTER U WITH DIAERESIS
0x009b: 0x00f8, # LATIN SMALL LETTER O WITH STROKE
0x009c: 0x00a3, # POUND SIGN
0x009d: 0x00d8, # LATIN CAPITAL LETTER O WITH STROKE
0x009e: 0x20a7, # PESETA SIGN
0x009f: 0x0192, # LATIN SMALL LETTER F WITH HOOK
0x00a0: 0x00e1, # LATIN SMALL LETTER A WITH ACUTE
0x00a1: 0x00ed, # LATIN SMALL LETTER I WITH ACUTE
0x00a2: 0x00f3, # LATIN SMALL LETTER O WITH ACUTE
0x00a3: 0x00fa, # LATIN SMALL LETTER U WITH ACUTE
0x00a4: 0x00c1, # LATIN CAPITAL LETTER A WITH ACUTE
0x00a5: 0x00cd, # LATIN CAPITAL LETTER I WITH ACUTE
0x00a6: 0x00d3, # LATIN CAPITAL LETTER O WITH ACUTE
0x00a7: 0x00da, # LATIN CAPITAL LETTER U WITH ACUTE
0x00a8: 0x00bf, # INVERTED QUESTION MARK
0x00a9: 0x2310, # REVERSED NOT SIGN
0x00aa: 0x00ac, # NOT SIGN
0x00ab: 0x00bd, # VULGAR FRACTION ONE HALF
0x00ac: 0x00bc, # VULGAR FRACTION ONE QUARTER
0x00ad: 0x00a1, # INVERTED EXCLAMATION MARK
0x00ae: 0x00ab, # LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00af: 0x00bb, # RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00b0: 0x2591, # LIGHT SHADE
0x00b1: 0x2592, # MEDIUM SHADE
0x00b2: 0x2593, # DARK SHADE
0x00b3: 0x2502, # BOX DRAWINGS LIGHT VERTICAL
0x00b4: 0x2524, # BOX DRAWINGS LIGHT VERTICAL AND LEFT
0x00b5: 0x2561, # BOX DRAWINGS VERTICAL SINGLE AND LEFT DOUBLE
0x00b6: 0x2562, # BOX DRAWINGS VERTICAL DOUBLE AND LEFT SINGLE
0x00b7: 0x2556, # BOX DRAWINGS DOWN DOUBLE AND LEFT SINGLE
0x00b8: 0x2555, # BOX DRAWINGS DOWN SINGLE AND LEFT DOUBLE
0x00b9: 0x2563, # BOX DRAWINGS DOUBLE VERTICAL AND LEFT
0x00ba: 0x2551, # BOX DRAWINGS DOUBLE VERTICAL
0x00bb: 0x2557, # BOX DRAWINGS DOUBLE DOWN AND LEFT
0x00bc: 0x255d, # BOX DRAWINGS DOUBLE UP AND LEFT
0x00bd: 0x255c, # BOX DRAWINGS UP DOUBLE AND LEFT SINGLE
0x00be: 0x255b, # BOX DRAWINGS UP SINGLE AND LEFT DOUBLE
0x00bf: 0x2510, # BOX DRAWINGS LIGHT DOWN AND LEFT
0x00c0: 0x2514, # BOX DRAWINGS LIGHT UP AND RIGHT
0x00c1: 0x2534, # BOX DRAWINGS LIGHT UP AND HORIZONTAL
0x00c2: 0x252c, # BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
0x00c3: 0x251c, # BOX DRAWINGS LIGHT VERTICAL AND RIGHT
0x00c4: 0x2500, # BOX DRAWINGS LIGHT HORIZONTAL
0x00c5: 0x253c, # BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
0x00c6: 0x255e, # BOX DRAWINGS VERTICAL SINGLE AND RIGHT DOUBLE
0x00c7: 0x255f, # BOX DRAWINGS VERTICAL DOUBLE AND RIGHT SINGLE
0x00c8: 0x255a, # BOX DRAWINGS DOUBLE UP AND RIGHT
0x00c9: 0x2554, # BOX DRAWINGS DOUBLE DOWN AND RIGHT
0x00ca: 0x2569, # BOX DRAWINGS DOUBLE UP AND HORIZONTAL
0x00cb: 0x2566, # BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
0x00cc: 0x2560, # BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
0x00cd: 0x2550, # BOX DRAWINGS DOUBLE HORIZONTAL
0x00ce: 0x256c, # BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
0x00cf: 0x2567, # BOX DRAWINGS UP SINGLE AND HORIZONTAL DOUBLE
0x00d0: 0x2568, # BOX DRAWINGS UP DOUBLE AND HORIZONTAL SINGLE
0x00d1: 0x2564, # BOX DRAWINGS DOWN SINGLE AND HORIZONTAL DOUBLE
0x00d2: 0x2565, # BOX DRAWINGS DOWN DOUBLE AND HORIZONTAL SINGLE
0x00d3: 0x2559, # BOX DRAWINGS UP DOUBLE AND RIGHT SINGLE
0x00d4: 0x2558, # BOX DRAWINGS UP SINGLE AND RIGHT DOUBLE
0x00d5: 0x2552, # BOX DRAWINGS DOWN SINGLE AND RIGHT DOUBLE
0x00d6: 0x2553, # BOX DRAWINGS DOWN DOUBLE AND RIGHT SINGLE
0x00d7: 0x256b, # BOX DRAWINGS VERTICAL DOUBLE AND HORIZONTAL SINGLE
0x00d8: 0x256a, # BOX DRAWINGS VERTICAL SINGLE AND HORIZONTAL DOUBLE
0x00d9: 0x2518, # BOX DRAWINGS LIGHT UP AND LEFT
0x00da: 0x250c, # BOX DRAWINGS LIGHT DOWN AND RIGHT
0x00db: 0x2588, # FULL BLOCK
0x00dc: 0x2584, # LOWER HALF BLOCK
0x00dd: 0x258c, # LEFT HALF BLOCK
0x00de: 0x2590, # RIGHT HALF BLOCK
0x00df: 0x2580, # UPPER HALF BLOCK
0x00e0: 0x03b1, # GREEK SMALL LETTER ALPHA
0x00e1: 0x00df, # LATIN SMALL LETTER SHARP S
0x00e2: 0x0393, # GREEK CAPITAL LETTER GAMMA
0x00e3: 0x03c0, # GREEK SMALL LETTER PI
0x00e4: 0x03a3, # GREEK CAPITAL LETTER SIGMA
0x00e5: 0x03c3, # GREEK SMALL LETTER SIGMA
0x00e6: 0x00b5, # MICRO SIGN
0x00e7: 0x03c4, # GREEK SMALL LETTER TAU
0x00e8: 0x03a6, # GREEK CAPITAL LETTER PHI
0x00e9: 0x0398, # GREEK CAPITAL LETTER THETA
0x00ea: 0x03a9, # GREEK CAPITAL LETTER OMEGA
0x00eb: 0x03b4, # GREEK SMALL LETTER DELTA
0x00ec: 0x221e, # INFINITY
0x00ed: 0x03c6, # GREEK SMALL LETTER PHI
0x00ee: 0x03b5, # GREEK SMALL LETTER EPSILON
0x00ef: 0x2229, # INTERSECTION
0x00f0: 0x2261, # IDENTICAL TO
0x00f1: 0x00b1, # PLUS-MINUS SIGN
0x00f2: 0x2265, # GREATER-THAN OR EQUAL TO
0x00f3: 0x2264, # LESS-THAN OR EQUAL TO
0x00f4: 0x2320, # TOP HALF INTEGRAL
0x00f5: 0x2321, # BOTTOM HALF INTEGRAL
0x00f6: 0x00f7, # DIVISION SIGN
0x00f7: 0x2248, # ALMOST EQUAL TO
0x00f8: 0x00b0, # DEGREE SIGN
0x00f9: 0x2219, # BULLET OPERATOR
0x00fa: 0x00b7, # MIDDLE DOT
0x00fb: 0x221a, # SQUARE ROOT
0x00fc: 0x207f, # SUPERSCRIPT LATIN SMALL LETTER N
0x00fd: 0x00b2, # SUPERSCRIPT TWO
0x00fe: 0x25a0, # BLACK SQUARE
0x00ff: 0x00a0, # NO-BREAK SPACE
})
### Encoding Map
encoding_map = {}
for k,v in decoding_map.items():
encoding_map[v] = k

+ 0
- 174
lib/jython/Lib/encodings/cp862.py View File

@@ -1,174 +0,0 @@
""" Python Character Mapping Codec generated from 'CP862.TXT' with gencodec.py.
Written by Marc-Andre Lemburg (mal@lemburg.com).
(c) Copyright CNRI, All Rights Reserved. NO WARRANTY.
(c) Copyright 2000 Guido van Rossum.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_map)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_map)
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return (Codec().encode,Codec().decode,StreamReader,StreamWriter)
### Decoding Map
decoding_map = codecs.make_identity_dict(range(256))
decoding_map.update({
0x0080: 0x05d0, # HEBREW LETTER ALEF
0x0081: 0x05d1, # HEBREW LETTER BET
0x0082: 0x05d2, # HEBREW LETTER GIMEL
0x0083: 0x05d3, # HEBREW LETTER DALET
0x0084: 0x05d4, # HEBREW LETTER HE
0x0085: 0x05d5, # HEBREW LETTER VAV
0x0086: 0x05d6, # HEBREW LETTER ZAYIN
0x0087: 0x05d7, # HEBREW LETTER HET
0x0088: 0x05d8, # HEBREW LETTER TET
0x0089: 0x05d9, # HEBREW LETTER YOD
0x008a: 0x05da, # HEBREW LETTER FINAL KAF
0x008b: 0x05db, # HEBREW LETTER KAF
0x008c: 0x05dc, # HEBREW LETTER LAMED
0x008d: 0x05dd, # HEBREW LETTER FINAL MEM
0x008e: 0x05de, # HEBREW LETTER MEM
0x008f: 0x05df, # HEBREW LETTER FINAL NUN
0x0090: 0x05e0, # HEBREW LETTER NUN
0x0091: 0x05e1, # HEBREW LETTER SAMEKH
0x0092: 0x05e2, # HEBREW LETTER AYIN
0x0093: 0x05e3, # HEBREW LETTER FINAL PE
0x0094: 0x05e4, # HEBREW LETTER PE
0x0095: 0x05e5, # HEBREW LETTER FINAL TSADI
0x0096: 0x05e6, # HEBREW LETTER TSADI
0x0097: 0x05e7, # HEBREW LETTER QOF
0x0098: 0x05e8, # HEBREW LETTER RESH
0x0099: 0x05e9, # HEBREW LETTER SHIN
0x009a: 0x05ea, # HEBREW LETTER TAV
0x009b: 0x00a2, # CENT SIGN
0x009c: 0x00a3, # POUND SIGN
0x009d: 0x00a5, # YEN SIGN
0x009e: 0x20a7, # PESETA SIGN
0x009f: 0x0192, # LATIN SMALL LETTER F WITH HOOK
0x00a0: 0x00e1, # LATIN SMALL LETTER A WITH ACUTE
0x00a1: 0x00ed, # LATIN SMALL LETTER I WITH ACUTE
0x00a2: 0x00f3, # LATIN SMALL LETTER O WITH ACUTE
0x00a3: 0x00fa, # LATIN SMALL LETTER U WITH ACUTE
0x00a4: 0x00f1, # LATIN SMALL LETTER N WITH TILDE
0x00a5: 0x00d1, # LATIN CAPITAL LETTER N WITH TILDE
0x00a6: 0x00aa, # FEMININE ORDINAL INDICATOR
0x00a7: 0x00ba, # MASCULINE ORDINAL INDICATOR
0x00a8: 0x00bf, # INVERTED QUESTION MARK
0x00a9: 0x2310, # REVERSED NOT SIGN
0x00aa: 0x00ac, # NOT SIGN
0x00ab: 0x00bd, # VULGAR FRACTION ONE HALF
0x00ac: 0x00bc, # VULGAR FRACTION ONE QUARTER
0x00ad: 0x00a1, # INVERTED EXCLAMATION MARK
0x00ae: 0x00ab, # LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00af: 0x00bb, # RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00b0: 0x2591, # LIGHT SHADE
0x00b1: 0x2592, # MEDIUM SHADE
0x00b2: 0x2593, # DARK SHADE
0x00b3: 0x2502, # BOX DRAWINGS LIGHT VERTICAL
0x00b4: 0x2524, # BOX DRAWINGS LIGHT VERTICAL AND LEFT
0x00b5: 0x2561, # BOX DRAWINGS VERTICAL SINGLE AND LEFT DOUBLE
0x00b6: 0x2562, # BOX DRAWINGS VERTICAL DOUBLE AND LEFT SINGLE
0x00b7: 0x2556, # BOX DRAWINGS DOWN DOUBLE AND LEFT SINGLE
0x00b8: 0x2555, # BOX DRAWINGS DOWN SINGLE AND LEFT DOUBLE
0x00b9: 0x2563, # BOX DRAWINGS DOUBLE VERTICAL AND LEFT
0x00ba: 0x2551, # BOX DRAWINGS DOUBLE VERTICAL
0x00bb: 0x2557, # BOX DRAWINGS DOUBLE DOWN AND LEFT
0x00bc: 0x255d, # BOX DRAWINGS DOUBLE UP AND LEFT
0x00bd: 0x255c, # BOX DRAWINGS UP DOUBLE AND LEFT SINGLE
0x00be: 0x255b, # BOX DRAWINGS UP SINGLE AND LEFT DOUBLE
0x00bf: 0x2510, # BOX DRAWINGS LIGHT DOWN AND LEFT
0x00c0: 0x2514, # BOX DRAWINGS LIGHT UP AND RIGHT
0x00c1: 0x2534, # BOX DRAWINGS LIGHT UP AND HORIZONTAL
0x00c2: 0x252c, # BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
0x00c3: 0x251c, # BOX DRAWINGS LIGHT VERTICAL AND RIGHT
0x00c4: 0x2500, # BOX DRAWINGS LIGHT HORIZONTAL
0x00c5: 0x253c, # BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
0x00c6: 0x255e, # BOX DRAWINGS VERTICAL SINGLE AND RIGHT DOUBLE
0x00c7: 0x255f, # BOX DRAWINGS VERTICAL DOUBLE AND RIGHT SINGLE
0x00c8: 0x255a, # BOX DRAWINGS DOUBLE UP AND RIGHT
0x00c9: 0x2554, # BOX DRAWINGS DOUBLE DOWN AND RIGHT
0x00ca: 0x2569, # BOX DRAWINGS DOUBLE UP AND HORIZONTAL
0x00cb: 0x2566, # BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
0x00cc: 0x2560, # BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
0x00cd: 0x2550, # BOX DRAWINGS DOUBLE HORIZONTAL
0x00ce: 0x256c, # BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
0x00cf: 0x2567, # BOX DRAWINGS UP SINGLE AND HORIZONTAL DOUBLE
0x00d0: 0x2568, # BOX DRAWINGS UP DOUBLE AND HORIZONTAL SINGLE
0x00d1: 0x2564, # BOX DRAWINGS DOWN SINGLE AND HORIZONTAL DOUBLE
0x00d2: 0x2565, # BOX DRAWINGS DOWN DOUBLE AND HORIZONTAL SINGLE
0x00d3: 0x2559, # BOX DRAWINGS UP DOUBLE AND RIGHT SINGLE
0x00d4: 0x2558, # BOX DRAWINGS UP SINGLE AND RIGHT DOUBLE
0x00d5: 0x2552, # BOX DRAWINGS DOWN SINGLE AND RIGHT DOUBLE
0x00d6: 0x2553, # BOX DRAWINGS DOWN DOUBLE AND RIGHT SINGLE
0x00d7: 0x256b, # BOX DRAWINGS VERTICAL DOUBLE AND HORIZONTAL SINGLE
0x00d8: 0x256a, # BOX DRAWINGS VERTICAL SINGLE AND HORIZONTAL DOUBLE
0x00d9: 0x2518, # BOX DRAWINGS LIGHT UP AND LEFT
0x00da: 0x250c, # BOX DRAWINGS LIGHT DOWN AND RIGHT
0x00db: 0x2588, # FULL BLOCK
0x00dc: 0x2584, # LOWER HALF BLOCK
0x00dd: 0x258c, # LEFT HALF BLOCK
0x00de: 0x2590, # RIGHT HALF BLOCK
0x00df: 0x2580, # UPPER HALF BLOCK
0x00e0: 0x03b1, # GREEK SMALL LETTER ALPHA
0x00e1: 0x00df, # LATIN SMALL LETTER SHARP S (GERMAN)
0x00e2: 0x0393, # GREEK CAPITAL LETTER GAMMA
0x00e3: 0x03c0, # GREEK SMALL LETTER PI
0x00e4: 0x03a3, # GREEK CAPITAL LETTER SIGMA
0x00e5: 0x03c3, # GREEK SMALL LETTER SIGMA
0x00e6: 0x00b5, # MICRO SIGN
0x00e7: 0x03c4, # GREEK SMALL LETTER TAU
0x00e8: 0x03a6, # GREEK CAPITAL LETTER PHI
0x00e9: 0x0398, # GREEK CAPITAL LETTER THETA
0x00ea: 0x03a9, # GREEK CAPITAL LETTER OMEGA
0x00eb: 0x03b4, # GREEK SMALL LETTER DELTA
0x00ec: 0x221e, # INFINITY
0x00ed: 0x03c6, # GREEK SMALL LETTER PHI
0x00ee: 0x03b5, # GREEK SMALL LETTER EPSILON
0x00ef: 0x2229, # INTERSECTION
0x00f0: 0x2261, # IDENTICAL TO
0x00f1: 0x00b1, # PLUS-MINUS SIGN
0x00f2: 0x2265, # GREATER-THAN OR EQUAL TO
0x00f3: 0x2264, # LESS-THAN OR EQUAL TO
0x00f4: 0x2320, # TOP HALF INTEGRAL
0x00f5: 0x2321, # BOTTOM HALF INTEGRAL
0x00f6: 0x00f7, # DIVISION SIGN
0x00f7: 0x2248, # ALMOST EQUAL TO
0x00f8: 0x00b0, # DEGREE SIGN
0x00f9: 0x2219, # BULLET OPERATOR
0x00fa: 0x00b7, # MIDDLE DOT
0x00fb: 0x221a, # SQUARE ROOT
0x00fc: 0x207f, # SUPERSCRIPT LATIN SMALL LETTER N
0x00fd: 0x00b2, # SUPERSCRIPT TWO
0x00fe: 0x25a0, # BLACK SQUARE
0x00ff: 0x00a0, # NO-BREAK SPACE
})
### Encoding Map
encoding_map = {}
for k,v in decoding_map.items():
encoding_map[v] = k

+ 0
- 174
lib/jython/Lib/encodings/cp863.py View File

@@ -1,174 +0,0 @@
""" Python Character Mapping Codec generated from 'CP863.TXT' with gencodec.py.
Written by Marc-Andre Lemburg (mal@lemburg.com).
(c) Copyright CNRI, All Rights Reserved. NO WARRANTY.
(c) Copyright 2000 Guido van Rossum.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_map)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_map)
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return (Codec().encode,Codec().decode,StreamReader,StreamWriter)
### Decoding Map
decoding_map = codecs.make_identity_dict(range(256))
decoding_map.update({
0x0080: 0x00c7, # LATIN CAPITAL LETTER C WITH CEDILLA
0x0081: 0x00fc, # LATIN SMALL LETTER U WITH DIAERESIS
0x0082: 0x00e9, # LATIN SMALL LETTER E WITH ACUTE
0x0083: 0x00e2, # LATIN SMALL LETTER A WITH CIRCUMFLEX
0x0084: 0x00c2, # LATIN CAPITAL LETTER A WITH CIRCUMFLEX
0x0085: 0x00e0, # LATIN SMALL LETTER A WITH GRAVE
0x0086: 0x00b6, # PILCROW SIGN
0x0087: 0x00e7, # LATIN SMALL LETTER C WITH CEDILLA
0x0088: 0x00ea, # LATIN SMALL LETTER E WITH CIRCUMFLEX
0x0089: 0x00eb, # LATIN SMALL LETTER E WITH DIAERESIS
0x008a: 0x00e8, # LATIN SMALL LETTER E WITH GRAVE
0x008b: 0x00ef, # LATIN SMALL LETTER I WITH DIAERESIS
0x008c: 0x00ee, # LATIN SMALL LETTER I WITH CIRCUMFLEX
0x008d: 0x2017, # DOUBLE LOW LINE
0x008e: 0x00c0, # LATIN CAPITAL LETTER A WITH GRAVE
0x008f: 0x00a7, # SECTION SIGN
0x0090: 0x00c9, # LATIN CAPITAL LETTER E WITH ACUTE
0x0091: 0x00c8, # LATIN CAPITAL LETTER E WITH GRAVE
0x0092: 0x00ca, # LATIN CAPITAL LETTER E WITH CIRCUMFLEX
0x0093: 0x00f4, # LATIN SMALL LETTER O WITH CIRCUMFLEX
0x0094: 0x00cb, # LATIN CAPITAL LETTER E WITH DIAERESIS
0x0095: 0x00cf, # LATIN CAPITAL LETTER I WITH DIAERESIS
0x0096: 0x00fb, # LATIN SMALL LETTER U WITH CIRCUMFLEX
0x0097: 0x00f9, # LATIN SMALL LETTER U WITH GRAVE
0x0098: 0x00a4, # CURRENCY SIGN
0x0099: 0x00d4, # LATIN CAPITAL LETTER O WITH CIRCUMFLEX
0x009a: 0x00dc, # LATIN CAPITAL LETTER U WITH DIAERESIS
0x009b: 0x00a2, # CENT SIGN
0x009c: 0x00a3, # POUND SIGN
0x009d: 0x00d9, # LATIN CAPITAL LETTER U WITH GRAVE
0x009e: 0x00db, # LATIN CAPITAL LETTER U WITH CIRCUMFLEX
0x009f: 0x0192, # LATIN SMALL LETTER F WITH HOOK
0x00a0: 0x00a6, # BROKEN BAR
0x00a1: 0x00b4, # ACUTE ACCENT
0x00a2: 0x00f3, # LATIN SMALL LETTER O WITH ACUTE
0x00a3: 0x00fa, # LATIN SMALL LETTER U WITH ACUTE
0x00a4: 0x00a8, # DIAERESIS
0x00a5: 0x00b8, # CEDILLA
0x00a6: 0x00b3, # SUPERSCRIPT THREE
0x00a7: 0x00af, # MACRON
0x00a8: 0x00ce, # LATIN CAPITAL LETTER I WITH CIRCUMFLEX
0x00a9: 0x2310, # REVERSED NOT SIGN
0x00aa: 0x00ac, # NOT SIGN
0x00ab: 0x00bd, # VULGAR FRACTION ONE HALF
0x00ac: 0x00bc, # VULGAR FRACTION ONE QUARTER
0x00ad: 0x00be, # VULGAR FRACTION THREE QUARTERS
0x00ae: 0x00ab, # LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00af: 0x00bb, # RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00b0: 0x2591, # LIGHT SHADE
0x00b1: 0x2592, # MEDIUM SHADE
0x00b2: 0x2593, # DARK SHADE
0x00b3: 0x2502, # BOX DRAWINGS LIGHT VERTICAL
0x00b4: 0x2524, # BOX DRAWINGS LIGHT VERTICAL AND LEFT
0x00b5: 0x2561, # BOX DRAWINGS VERTICAL SINGLE AND LEFT DOUBLE
0x00b6: 0x2562, # BOX DRAWINGS VERTICAL DOUBLE AND LEFT SINGLE
0x00b7: 0x2556, # BOX DRAWINGS DOWN DOUBLE AND LEFT SINGLE
0x00b8: 0x2555, # BOX DRAWINGS DOWN SINGLE AND LEFT DOUBLE
0x00b9: 0x2563, # BOX DRAWINGS DOUBLE VERTICAL AND LEFT
0x00ba: 0x2551, # BOX DRAWINGS DOUBLE VERTICAL
0x00bb: 0x2557, # BOX DRAWINGS DOUBLE DOWN AND LEFT
0x00bc: 0x255d, # BOX DRAWINGS DOUBLE UP AND LEFT
0x00bd: 0x255c, # BOX DRAWINGS UP DOUBLE AND LEFT SINGLE
0x00be: 0x255b, # BOX DRAWINGS UP SINGLE AND LEFT DOUBLE
0x00bf: 0x2510, # BOX DRAWINGS LIGHT DOWN AND LEFT
0x00c0: 0x2514, # BOX DRAWINGS LIGHT UP AND RIGHT
0x00c1: 0x2534, # BOX DRAWINGS LIGHT UP AND HORIZONTAL
0x00c2: 0x252c, # BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
0x00c3: 0x251c, # BOX DRAWINGS LIGHT VERTICAL AND RIGHT
0x00c4: 0x2500, # BOX DRAWINGS LIGHT HORIZONTAL
0x00c5: 0x253c, # BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
0x00c6: 0x255e, # BOX DRAWINGS VERTICAL SINGLE AND RIGHT DOUBLE
0x00c7: 0x255f, # BOX DRAWINGS VERTICAL DOUBLE AND RIGHT SINGLE
0x00c8: 0x255a, # BOX DRAWINGS DOUBLE UP AND RIGHT
0x00c9: 0x2554, # BOX DRAWINGS DOUBLE DOWN AND RIGHT
0x00ca: 0x2569, # BOX DRAWINGS DOUBLE UP AND HORIZONTAL
0x00cb: 0x2566, # BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
0x00cc: 0x2560, # BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
0x00cd: 0x2550, # BOX DRAWINGS DOUBLE HORIZONTAL
0x00ce: 0x256c, # BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
0x00cf: 0x2567, # BOX DRAWINGS UP SINGLE AND HORIZONTAL DOUBLE
0x00d0: 0x2568, # BOX DRAWINGS UP DOUBLE AND HORIZONTAL SINGLE
0x00d1: 0x2564, # BOX DRAWINGS DOWN SINGLE AND HORIZONTAL DOUBLE
0x00d2: 0x2565, # BOX DRAWINGS DOWN DOUBLE AND HORIZONTAL SINGLE
0x00d3: 0x2559, # BOX DRAWINGS UP DOUBLE AND RIGHT SINGLE
0x00d4: 0x2558, # BOX DRAWINGS UP SINGLE AND RIGHT DOUBLE
0x00d5: 0x2552, # BOX DRAWINGS DOWN SINGLE AND RIGHT DOUBLE
0x00d6: 0x2553, # BOX DRAWINGS DOWN DOUBLE AND RIGHT SINGLE
0x00d7: 0x256b, # BOX DRAWINGS VERTICAL DOUBLE AND HORIZONTAL SINGLE
0x00d8: 0x256a, # BOX DRAWINGS VERTICAL SINGLE AND HORIZONTAL DOUBLE
0x00d9: 0x2518, # BOX DRAWINGS LIGHT UP AND LEFT
0x00da: 0x250c, # BOX DRAWINGS LIGHT DOWN AND RIGHT
0x00db: 0x2588, # FULL BLOCK
0x00dc: 0x2584, # LOWER HALF BLOCK
0x00dd: 0x258c, # LEFT HALF BLOCK
0x00de: 0x2590, # RIGHT HALF BLOCK
0x00df: 0x2580, # UPPER HALF BLOCK
0x00e0: 0x03b1, # GREEK SMALL LETTER ALPHA
0x00e1: 0x00df, # LATIN SMALL LETTER SHARP S
0x00e2: 0x0393, # GREEK CAPITAL LETTER GAMMA
0x00e3: 0x03c0, # GREEK SMALL LETTER PI
0x00e4: 0x03a3, # GREEK CAPITAL LETTER SIGMA
0x00e5: 0x03c3, # GREEK SMALL LETTER SIGMA
0x00e6: 0x00b5, # MICRO SIGN
0x00e7: 0x03c4, # GREEK SMALL LETTER TAU
0x00e8: 0x03a6, # GREEK CAPITAL LETTER PHI
0x00e9: 0x0398, # GREEK CAPITAL LETTER THETA
0x00ea: 0x03a9, # GREEK CAPITAL LETTER OMEGA
0x00eb: 0x03b4, # GREEK SMALL LETTER DELTA
0x00ec: 0x221e, # INFINITY
0x00ed: 0x03c6, # GREEK SMALL LETTER PHI
0x00ee: 0x03b5, # GREEK SMALL LETTER EPSILON
0x00ef: 0x2229, # INTERSECTION
0x00f0: 0x2261, # IDENTICAL TO
0x00f1: 0x00b1, # PLUS-MINUS SIGN
0x00f2: 0x2265, # GREATER-THAN OR EQUAL TO
0x00f3: 0x2264, # LESS-THAN OR EQUAL TO
0x00f4: 0x2320, # TOP HALF INTEGRAL
0x00f5: 0x2321, # BOTTOM HALF INTEGRAL
0x00f6: 0x00f7, # DIVISION SIGN
0x00f7: 0x2248, # ALMOST EQUAL TO
0x00f8: 0x00b0, # DEGREE SIGN
0x00f9: 0x2219, # BULLET OPERATOR
0x00fa: 0x00b7, # MIDDLE DOT
0x00fb: 0x221a, # SQUARE ROOT
0x00fc: 0x207f, # SUPERSCRIPT LATIN SMALL LETTER N
0x00fd: 0x00b2, # SUPERSCRIPT TWO
0x00fe: 0x25a0, # BLACK SQUARE
0x00ff: 0x00a0, # NO-BREAK SPACE
})
### Encoding Map
encoding_map = {}
for k,v in decoding_map.items():
encoding_map[v] = k

+ 0
- 172
lib/jython/Lib/encodings/cp864.py View File

@@ -1,172 +0,0 @@
""" Python Character Mapping Codec generated from 'CP864.TXT' with gencodec.py.
Written by Marc-Andre Lemburg (mal@lemburg.com).
(c) Copyright CNRI, All Rights Reserved. NO WARRANTY.
(c) Copyright 2000 Guido van Rossum.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_map)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_map)
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return (Codec().encode,Codec().decode,StreamReader,StreamWriter)
### Decoding Map
decoding_map = codecs.make_identity_dict(range(256))
decoding_map.update({
0x0025: 0x066a, # ARABIC PERCENT SIGN
0x0080: 0x00b0, # DEGREE SIGN
0x0081: 0x00b7, # MIDDLE DOT
0x0082: 0x2219, # BULLET OPERATOR
0x0083: 0x221a, # SQUARE ROOT
0x0084: 0x2592, # MEDIUM SHADE
0x0085: 0x2500, # FORMS LIGHT HORIZONTAL
0x0086: 0x2502, # FORMS LIGHT VERTICAL
0x0087: 0x253c, # FORMS LIGHT VERTICAL AND HORIZONTAL
0x0088: 0x2524, # FORMS LIGHT VERTICAL AND LEFT
0x0089: 0x252c, # FORMS LIGHT DOWN AND HORIZONTAL
0x008a: 0x251c, # FORMS LIGHT VERTICAL AND RIGHT
0x008b: 0x2534, # FORMS LIGHT UP AND HORIZONTAL
0x008c: 0x2510, # FORMS LIGHT DOWN AND LEFT
0x008d: 0x250c, # FORMS LIGHT DOWN AND RIGHT
0x008e: 0x2514, # FORMS LIGHT UP AND RIGHT
0x008f: 0x2518, # FORMS LIGHT UP AND LEFT
0x0090: 0x03b2, # GREEK SMALL BETA
0x0091: 0x221e, # INFINITY
0x0092: 0x03c6, # GREEK SMALL PHI
0x0093: 0x00b1, # PLUS-OR-MINUS SIGN
0x0094: 0x00bd, # FRACTION 1/2
0x0095: 0x00bc, # FRACTION 1/4
0x0096: 0x2248, # ALMOST EQUAL TO
0x0097: 0x00ab, # LEFT POINTING GUILLEMET
0x0098: 0x00bb, # RIGHT POINTING GUILLEMET
0x0099: 0xfef7, # ARABIC LIGATURE LAM WITH ALEF WITH HAMZA ABOVE ISOLATED FORM
0x009a: 0xfef8, # ARABIC LIGATURE LAM WITH ALEF WITH HAMZA ABOVE FINAL FORM
0x009b: None, # UNDEFINED
0x009c: None, # UNDEFINED
0x009d: 0xfefb, # ARABIC LIGATURE LAM WITH ALEF ISOLATED FORM
0x009e: 0xfefc, # ARABIC LIGATURE LAM WITH ALEF FINAL FORM
0x009f: None, # UNDEFINED
0x00a1: 0x00ad, # SOFT HYPHEN
0x00a2: 0xfe82, # ARABIC LETTER ALEF WITH MADDA ABOVE FINAL FORM
0x00a5: 0xfe84, # ARABIC LETTER ALEF WITH HAMZA ABOVE FINAL FORM
0x00a6: None, # UNDEFINED
0x00a7: None, # UNDEFINED
0x00a8: 0xfe8e, # ARABIC LETTER ALEF FINAL FORM
0x00a9: 0xfe8f, # ARABIC LETTER BEH ISOLATED FORM
0x00aa: 0xfe95, # ARABIC LETTER TEH ISOLATED FORM
0x00ab: 0xfe99, # ARABIC LETTER THEH ISOLATED FORM
0x00ac: 0x060c, # ARABIC COMMA
0x00ad: 0xfe9d, # ARABIC LETTER JEEM ISOLATED FORM
0x00ae: 0xfea1, # ARABIC LETTER HAH ISOLATED FORM
0x00af: 0xfea5, # ARABIC LETTER KHAH ISOLATED FORM
0x00b0: 0x0660, # ARABIC-INDIC DIGIT ZERO
0x00b1: 0x0661, # ARABIC-INDIC DIGIT ONE
0x00b2: 0x0662, # ARABIC-INDIC DIGIT TWO
0x00b3: 0x0663, # ARABIC-INDIC DIGIT THREE
0x00b4: 0x0664, # ARABIC-INDIC DIGIT FOUR
0x00b5: 0x0665, # ARABIC-INDIC DIGIT FIVE
0x00b6: 0x0666, # ARABIC-INDIC DIGIT SIX
0x00b7: 0x0667, # ARABIC-INDIC DIGIT SEVEN
0x00b8: 0x0668, # ARABIC-INDIC DIGIT EIGHT
0x00b9: 0x0669, # ARABIC-INDIC DIGIT NINE
0x00ba: 0xfed1, # ARABIC LETTER FEH ISOLATED FORM
0x00bb: 0x061b, # ARABIC SEMICOLON
0x00bc: 0xfeb1, # ARABIC LETTER SEEN ISOLATED FORM
0x00bd: 0xfeb5, # ARABIC LETTER SHEEN ISOLATED FORM
0x00be: 0xfeb9, # ARABIC LETTER SAD ISOLATED FORM
0x00bf: 0x061f, # ARABIC QUESTION MARK
0x00c0: 0x00a2, # CENT SIGN
0x00c1: 0xfe80, # ARABIC LETTER HAMZA ISOLATED FORM
0x00c2: 0xfe81, # ARABIC LETTER ALEF WITH MADDA ABOVE ISOLATED FORM
0x00c3: 0xfe83, # ARABIC LETTER ALEF WITH HAMZA ABOVE ISOLATED FORM
0x00c4: 0xfe85, # ARABIC LETTER WAW WITH HAMZA ABOVE ISOLATED FORM
0x00c5: 0xfeca, # ARABIC LETTER AIN FINAL FORM
0x00c6: 0xfe8b, # ARABIC LETTER YEH WITH HAMZA ABOVE INITIAL FORM
0x00c7: 0xfe8d, # ARABIC LETTER ALEF ISOLATED FORM
0x00c8: 0xfe91, # ARABIC LETTER BEH INITIAL FORM
0x00c9: 0xfe93, # ARABIC LETTER TEH MARBUTA ISOLATED FORM
0x00ca: 0xfe97, # ARABIC LETTER TEH INITIAL FORM
0x00cb: 0xfe9b, # ARABIC LETTER THEH INITIAL FORM
0x00cc: 0xfe9f, # ARABIC LETTER JEEM INITIAL FORM
0x00cd: 0xfea3, # ARABIC LETTER HAH INITIAL FORM
0x00ce: 0xfea7, # ARABIC LETTER KHAH INITIAL FORM
0x00cf: 0xfea9, # ARABIC LETTER DAL ISOLATED FORM
0x00d0: 0xfeab, # ARABIC LETTER THAL ISOLATED FORM
0x00d1: 0xfead, # ARABIC LETTER REH ISOLATED FORM
0x00d2: 0xfeaf, # ARABIC LETTER ZAIN ISOLATED FORM
0x00d3: 0xfeb3, # ARABIC LETTER SEEN INITIAL FORM
0x00d4: 0xfeb7, # ARABIC LETTER SHEEN INITIAL FORM
0x00d5: 0xfebb, # ARABIC LETTER SAD INITIAL FORM
0x00d6: 0xfebf, # ARABIC LETTER DAD INITIAL FORM
0x00d7: 0xfec1, # ARABIC LETTER TAH ISOLATED FORM
0x00d8: 0xfec5, # ARABIC LETTER ZAH ISOLATED FORM
0x00d9: 0xfecb, # ARABIC LETTER AIN INITIAL FORM
0x00da: 0xfecf, # ARABIC LETTER GHAIN INITIAL FORM
0x00db: 0x00a6, # BROKEN VERTICAL BAR
0x00dc: 0x00ac, # NOT SIGN
0x00dd: 0x00f7, # DIVISION SIGN
0x00de: 0x00d7, # MULTIPLICATION SIGN
0x00df: 0xfec9, # ARABIC LETTER AIN ISOLATED FORM
0x00e0: 0x0640, # ARABIC TATWEEL
0x00e1: 0xfed3, # ARABIC LETTER FEH INITIAL FORM
0x00e2: 0xfed7, # ARABIC LETTER QAF INITIAL FORM
0x00e3: 0xfedb, # ARABIC LETTER KAF INITIAL FORM
0x00e4: 0xfedf, # ARABIC LETTER LAM INITIAL FORM
0x00e5: 0xfee3, # ARABIC LETTER MEEM INITIAL FORM
0x00e6: 0xfee7, # ARABIC LETTER NOON INITIAL FORM
0x00e7: 0xfeeb, # ARABIC LETTER HEH INITIAL FORM
0x00e8: 0xfeed, # ARABIC LETTER WAW ISOLATED FORM
0x00e9: 0xfeef, # ARABIC LETTER ALEF MAKSURA ISOLATED FORM
0x00ea: 0xfef3, # ARABIC LETTER YEH INITIAL FORM
0x00eb: 0xfebd, # ARABIC LETTER DAD ISOLATED FORM
0x00ec: 0xfecc, # ARABIC LETTER AIN MEDIAL FORM
0x00ed: 0xfece, # ARABIC LETTER GHAIN FINAL FORM
0x00ee: 0xfecd, # ARABIC LETTER GHAIN ISOLATED FORM
0x00ef: 0xfee1, # ARABIC LETTER MEEM ISOLATED FORM
0x00f0: 0xfe7d, # ARABIC SHADDA MEDIAL FORM
0x00f1: 0x0651, # ARABIC SHADDAH
0x00f2: 0xfee5, # ARABIC LETTER NOON ISOLATED FORM
0x00f3: 0xfee9, # ARABIC LETTER HEH ISOLATED FORM
0x00f4: 0xfeec, # ARABIC LETTER HEH MEDIAL FORM
0x00f5: 0xfef0, # ARABIC LETTER ALEF MAKSURA FINAL FORM
0x00f6: 0xfef2, # ARABIC LETTER YEH FINAL FORM
0x00f7: 0xfed0, # ARABIC LETTER GHAIN MEDIAL FORM
0x00f8: 0xfed5, # ARABIC LETTER QAF ISOLATED FORM
0x00f9: 0xfef5, # ARABIC LIGATURE LAM WITH ALEF WITH MADDA ABOVE ISOLATED FORM
0x00fa: 0xfef6, # ARABIC LIGATURE LAM WITH ALEF WITH MADDA ABOVE FINAL FORM
0x00fb: 0xfedd, # ARABIC LETTER LAM ISOLATED FORM
0x00fc: 0xfed9, # ARABIC LETTER KAF ISOLATED FORM
0x00fd: 0xfef1, # ARABIC LETTER YEH ISOLATED FORM
0x00fe: 0x25a0, # BLACK SQUARE
0x00ff: None, # UNDEFINED
})
### Encoding Map
encoding_map = {}
for k,v in decoding_map.items():
encoding_map[v] = k

+ 0
- 174
lib/jython/Lib/encodings/cp865.py View File

@@ -1,174 +0,0 @@
""" Python Character Mapping Codec generated from 'CP865.TXT' with gencodec.py.
Written by Marc-Andre Lemburg (mal@lemburg.com).
(c) Copyright CNRI, All Rights Reserved. NO WARRANTY.
(c) Copyright 2000 Guido van Rossum.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_map)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_map)
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return (Codec().encode,Codec().decode,StreamReader,StreamWriter)
### Decoding Map
decoding_map = codecs.make_identity_dict(range(256))
decoding_map.update({
0x0080: 0x00c7, # LATIN CAPITAL LETTER C WITH CEDILLA
0x0081: 0x00fc, # LATIN SMALL LETTER U WITH DIAERESIS
0x0082: 0x00e9, # LATIN SMALL LETTER E WITH ACUTE
0x0083: 0x00e2, # LATIN SMALL LETTER A WITH CIRCUMFLEX
0x0084: 0x00e4, # LATIN SMALL LETTER A WITH DIAERESIS
0x0085: 0x00e0, # LATIN SMALL LETTER A WITH GRAVE
0x0086: 0x00e5, # LATIN SMALL LETTER A WITH RING ABOVE
0x0087: 0x00e7, # LATIN SMALL LETTER C WITH CEDILLA
0x0088: 0x00ea, # LATIN SMALL LETTER E WITH CIRCUMFLEX
0x0089: 0x00eb, # LATIN SMALL LETTER E WITH DIAERESIS
0x008a: 0x00e8, # LATIN SMALL LETTER E WITH GRAVE
0x008b: 0x00ef, # LATIN SMALL LETTER I WITH DIAERESIS
0x008c: 0x00ee, # LATIN SMALL LETTER I WITH CIRCUMFLEX
0x008d: 0x00ec, # LATIN SMALL LETTER I WITH GRAVE
0x008e: 0x00c4, # LATIN CAPITAL LETTER A WITH DIAERESIS
0x008f: 0x00c5, # LATIN CAPITAL LETTER A WITH RING ABOVE
0x0090: 0x00c9, # LATIN CAPITAL LETTER E WITH ACUTE
0x0091: 0x00e6, # LATIN SMALL LIGATURE AE
0x0092: 0x00c6, # LATIN CAPITAL LIGATURE AE
0x0093: 0x00f4, # LATIN SMALL LETTER O WITH CIRCUMFLEX
0x0094: 0x00f6, # LATIN SMALL LETTER O WITH DIAERESIS
0x0095: 0x00f2, # LATIN SMALL LETTER O WITH GRAVE
0x0096: 0x00fb, # LATIN SMALL LETTER U WITH CIRCUMFLEX
0x0097: 0x00f9, # LATIN SMALL LETTER U WITH GRAVE
0x0098: 0x00ff, # LATIN SMALL LETTER Y WITH DIAERESIS
0x0099: 0x00d6, # LATIN CAPITAL LETTER O WITH DIAERESIS
0x009a: 0x00dc, # LATIN CAPITAL LETTER U WITH DIAERESIS
0x009b: 0x00f8, # LATIN SMALL LETTER O WITH STROKE
0x009c: 0x00a3, # POUND SIGN
0x009d: 0x00d8, # LATIN CAPITAL LETTER O WITH STROKE
0x009e: 0x20a7, # PESETA SIGN
0x009f: 0x0192, # LATIN SMALL LETTER F WITH HOOK
0x00a0: 0x00e1, # LATIN SMALL LETTER A WITH ACUTE
0x00a1: 0x00ed, # LATIN SMALL LETTER I WITH ACUTE
0x00a2: 0x00f3, # LATIN SMALL LETTER O WITH ACUTE
0x00a3: 0x00fa, # LATIN SMALL LETTER U WITH ACUTE
0x00a4: 0x00f1, # LATIN SMALL LETTER N WITH TILDE
0x00a5: 0x00d1, # LATIN CAPITAL LETTER N WITH TILDE
0x00a6: 0x00aa, # FEMININE ORDINAL INDICATOR
0x00a7: 0x00ba, # MASCULINE ORDINAL INDICATOR
0x00a8: 0x00bf, # INVERTED QUESTION MARK
0x00a9: 0x2310, # REVERSED NOT SIGN
0x00aa: 0x00ac, # NOT SIGN
0x00ab: 0x00bd, # VULGAR FRACTION ONE HALF
0x00ac: 0x00bc, # VULGAR FRACTION ONE QUARTER
0x00ad: 0x00a1, # INVERTED EXCLAMATION MARK
0x00ae: 0x00ab, # LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00af: 0x00a4, # CURRENCY SIGN
0x00b0: 0x2591, # LIGHT SHADE
0x00b1: 0x2592, # MEDIUM SHADE
0x00b2: 0x2593, # DARK SHADE
0x00b3: 0x2502, # BOX DRAWINGS LIGHT VERTICAL
0x00b4: 0x2524, # BOX DRAWINGS LIGHT VERTICAL AND LEFT
0x00b5: 0x2561, # BOX DRAWINGS VERTICAL SINGLE AND LEFT DOUBLE
0x00b6: 0x2562, # BOX DRAWINGS VERTICAL DOUBLE AND LEFT SINGLE
0x00b7: 0x2556, # BOX DRAWINGS DOWN DOUBLE AND LEFT SINGLE
0x00b8: 0x2555, # BOX DRAWINGS DOWN SINGLE AND LEFT DOUBLE
0x00b9: 0x2563, # BOX DRAWINGS DOUBLE VERTICAL AND LEFT
0x00ba: 0x2551, # BOX DRAWINGS DOUBLE VERTICAL
0x00bb: 0x2557, # BOX DRAWINGS DOUBLE DOWN AND LEFT
0x00bc: 0x255d, # BOX DRAWINGS DOUBLE UP AND LEFT
0x00bd: 0x255c, # BOX DRAWINGS UP DOUBLE AND LEFT SINGLE
0x00be: 0x255b, # BOX DRAWINGS UP SINGLE AND LEFT DOUBLE
0x00bf: 0x2510, # BOX DRAWINGS LIGHT DOWN AND LEFT
0x00c0: 0x2514, # BOX DRAWINGS LIGHT UP AND RIGHT
0x00c1: 0x2534, # BOX DRAWINGS LIGHT UP AND HORIZONTAL
0x00c2: 0x252c, # BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
0x00c3: 0x251c, # BOX DRAWINGS LIGHT VERTICAL AND RIGHT
0x00c4: 0x2500, # BOX DRAWINGS LIGHT HORIZONTAL
0x00c5: 0x253c, # BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
0x00c6: 0x255e, # BOX DRAWINGS VERTICAL SINGLE AND RIGHT DOUBLE
0x00c7: 0x255f, # BOX DRAWINGS VERTICAL DOUBLE AND RIGHT SINGLE
0x00c8: 0x255a, # BOX DRAWINGS DOUBLE UP AND RIGHT
0x00c9: 0x2554, # BOX DRAWINGS DOUBLE DOWN AND RIGHT
0x00ca: 0x2569, # BOX DRAWINGS DOUBLE UP AND HORIZONTAL
0x00cb: 0x2566, # BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
0x00cc: 0x2560, # BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
0x00cd: 0x2550, # BOX DRAWINGS DOUBLE HORIZONTAL
0x00ce: 0x256c, # BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
0x00cf: 0x2567, # BOX DRAWINGS UP SINGLE AND HORIZONTAL DOUBLE
0x00d0: 0x2568, # BOX DRAWINGS UP DOUBLE AND HORIZONTAL SINGLE
0x00d1: 0x2564, # BOX DRAWINGS DOWN SINGLE AND HORIZONTAL DOUBLE
0x00d2: 0x2565, # BOX DRAWINGS DOWN DOUBLE AND HORIZONTAL SINGLE
0x00d3: 0x2559, # BOX DRAWINGS UP DOUBLE AND RIGHT SINGLE
0x00d4: 0x2558, # BOX DRAWINGS UP SINGLE AND RIGHT DOUBLE
0x00d5: 0x2552, # BOX DRAWINGS DOWN SINGLE AND RIGHT DOUBLE
0x00d6: 0x2553, # BOX DRAWINGS DOWN DOUBLE AND RIGHT SINGLE
0x00d7: 0x256b, # BOX DRAWINGS VERTICAL DOUBLE AND HORIZONTAL SINGLE
0x00d8: 0x256a, # BOX DRAWINGS VERTICAL SINGLE AND HORIZONTAL DOUBLE
0x00d9: 0x2518, # BOX DRAWINGS LIGHT UP AND LEFT
0x00da: 0x250c, # BOX DRAWINGS LIGHT DOWN AND RIGHT
0x00db: 0x2588, # FULL BLOCK
0x00dc: 0x2584, # LOWER HALF BLOCK
0x00dd: 0x258c, # LEFT HALF BLOCK
0x00de: 0x2590, # RIGHT HALF BLOCK
0x00df: 0x2580, # UPPER HALF BLOCK
0x00e0: 0x03b1, # GREEK SMALL LETTER ALPHA
0x00e1: 0x00df, # LATIN SMALL LETTER SHARP S
0x00e2: 0x0393, # GREEK CAPITAL LETTER GAMMA
0x00e3: 0x03c0, # GREEK SMALL LETTER PI
0x00e4: 0x03a3, # GREEK CAPITAL LETTER SIGMA
0x00e5: 0x03c3, # GREEK SMALL LETTER SIGMA
0x00e6: 0x00b5, # MICRO SIGN
0x00e7: 0x03c4, # GREEK SMALL LETTER TAU
0x00e8: 0x03a6, # GREEK CAPITAL LETTER PHI
0x00e9: 0x0398, # GREEK CAPITAL LETTER THETA
0x00ea: 0x03a9, # GREEK CAPITAL LETTER OMEGA
0x00eb: 0x03b4, # GREEK SMALL LETTER DELTA
0x00ec: 0x221e, # INFINITY
0x00ed: 0x03c6, # GREEK SMALL LETTER PHI
0x00ee: 0x03b5, # GREEK SMALL LETTER EPSILON
0x00ef: 0x2229, # INTERSECTION
0x00f0: 0x2261, # IDENTICAL TO
0x00f1: 0x00b1, # PLUS-MINUS SIGN
0x00f2: 0x2265, # GREATER-THAN OR EQUAL TO
0x00f3: 0x2264, # LESS-THAN OR EQUAL TO
0x00f4: 0x2320, # TOP HALF INTEGRAL
0x00f5: 0x2321, # BOTTOM HALF INTEGRAL
0x00f6: 0x00f7, # DIVISION SIGN
0x00f7: 0x2248, # ALMOST EQUAL TO
0x00f8: 0x00b0, # DEGREE SIGN
0x00f9: 0x2219, # BULLET OPERATOR
0x00fa: 0x00b7, # MIDDLE DOT
0x00fb: 0x221a, # SQUARE ROOT
0x00fc: 0x207f, # SUPERSCRIPT LATIN SMALL LETTER N
0x00fd: 0x00b2, # SUPERSCRIPT TWO
0x00fe: 0x25a0, # BLACK SQUARE
0x00ff: 0x00a0, # NO-BREAK SPACE
})
### Encoding Map
encoding_map = {}
for k,v in decoding_map.items():
encoding_map[v] = k

+ 0
- 174
lib/jython/Lib/encodings/cp866.py View File

@@ -1,174 +0,0 @@
""" Python Character Mapping Codec generated from 'CP866.TXT' with gencodec.py.
Written by Marc-Andre Lemburg (mal@lemburg.com).
(c) Copyright CNRI, All Rights Reserved. NO WARRANTY.
(c) Copyright 2000 Guido van Rossum.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_map)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_map)
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return (Codec().encode,Codec().decode,StreamReader,StreamWriter)
### Decoding Map
decoding_map = codecs.make_identity_dict(range(256))
decoding_map.update({
0x0080: 0x0410, # CYRILLIC CAPITAL LETTER A
0x0081: 0x0411, # CYRILLIC CAPITAL LETTER BE
0x0082: 0x0412, # CYRILLIC CAPITAL LETTER VE
0x0083: 0x0413, # CYRILLIC CAPITAL LETTER GHE
0x0084: 0x0414, # CYRILLIC CAPITAL LETTER DE
0x0085: 0x0415, # CYRILLIC CAPITAL LETTER IE
0x0086: 0x0416, # CYRILLIC CAPITAL LETTER ZHE
0x0087: 0x0417, # CYRILLIC CAPITAL LETTER ZE
0x0088: 0x0418, # CYRILLIC CAPITAL LETTER I
0x0089: 0x0419, # CYRILLIC CAPITAL LETTER SHORT I
0x008a: 0x041a, # CYRILLIC CAPITAL LETTER KA
0x008b: 0x041b, # CYRILLIC CAPITAL LETTER EL
0x008c: 0x041c, # CYRILLIC CAPITAL LETTER EM
0x008d: 0x041d, # CYRILLIC CAPITAL LETTER EN
0x008e: 0x041e, # CYRILLIC CAPITAL LETTER O
0x008f: 0x041f, # CYRILLIC CAPITAL LETTER PE
0x0090: 0x0420, # CYRILLIC CAPITAL LETTER ER
0x0091: 0x0421, # CYRILLIC CAPITAL LETTER ES
0x0092: 0x0422, # CYRILLIC CAPITAL LETTER TE
0x0093: 0x0423, # CYRILLIC CAPITAL LETTER U
0x0094: 0x0424, # CYRILLIC CAPITAL LETTER EF
0x0095: 0x0425, # CYRILLIC CAPITAL LETTER HA
0x0096: 0x0426, # CYRILLIC CAPITAL LETTER TSE
0x0097: 0x0427, # CYRILLIC CAPITAL LETTER CHE
0x0098: 0x0428, # CYRILLIC CAPITAL LETTER SHA
0x0099: 0x0429, # CYRILLIC CAPITAL LETTER SHCHA
0x009a: 0x042a, # CYRILLIC CAPITAL LETTER HARD SIGN
0x009b: 0x042b, # CYRILLIC CAPITAL LETTER YERU
0x009c: 0x042c, # CYRILLIC CAPITAL LETTER SOFT SIGN
0x009d: 0x042d, # CYRILLIC CAPITAL LETTER E
0x009e: 0x042e, # CYRILLIC CAPITAL LETTER YU
0x009f: 0x042f, # CYRILLIC CAPITAL LETTER YA
0x00a0: 0x0430, # CYRILLIC SMALL LETTER A
0x00a1: 0x0431, # CYRILLIC SMALL LETTER BE
0x00a2: 0x0432, # CYRILLIC SMALL LETTER VE
0x00a3: 0x0433, # CYRILLIC SMALL LETTER GHE
0x00a4: 0x0434, # CYRILLIC SMALL LETTER DE
0x00a5: 0x0435, # CYRILLIC SMALL LETTER IE
0x00a6: 0x0436, # CYRILLIC SMALL LETTER ZHE
0x00a7: 0x0437, # CYRILLIC SMALL LETTER ZE
0x00a8: 0x0438, # CYRILLIC SMALL LETTER I
0x00a9: 0x0439, # CYRILLIC SMALL LETTER SHORT I
0x00aa: 0x043a, # CYRILLIC SMALL LETTER KA
0x00ab: 0x043b, # CYRILLIC SMALL LETTER EL
0x00ac: 0x043c, # CYRILLIC SMALL LETTER EM
0x00ad: 0x043d, # CYRILLIC SMALL LETTER EN
0x00ae: 0x043e, # CYRILLIC SMALL LETTER O
0x00af: 0x043f, # CYRILLIC SMALL LETTER PE
0x00b0: 0x2591, # LIGHT SHADE
0x00b1: 0x2592, # MEDIUM SHADE
0x00b2: 0x2593, # DARK SHADE
0x00b3: 0x2502, # BOX DRAWINGS LIGHT VERTICAL
0x00b4: 0x2524, # BOX DRAWINGS LIGHT VERTICAL AND LEFT
0x00b5: 0x2561, # BOX DRAWINGS VERTICAL SINGLE AND LEFT DOUBLE
0x00b6: 0x2562, # BOX DRAWINGS VERTICAL DOUBLE AND LEFT SINGLE
0x00b7: 0x2556, # BOX DRAWINGS DOWN DOUBLE AND LEFT SINGLE
0x00b8: 0x2555, # BOX DRAWINGS DOWN SINGLE AND LEFT DOUBLE
0x00b9: 0x2563, # BOX DRAWINGS DOUBLE VERTICAL AND LEFT
0x00ba: 0x2551, # BOX DRAWINGS DOUBLE VERTICAL
0x00bb: 0x2557, # BOX DRAWINGS DOUBLE DOWN AND LEFT
0x00bc: 0x255d, # BOX DRAWINGS DOUBLE UP AND LEFT
0x00bd: 0x255c, # BOX DRAWINGS UP DOUBLE AND LEFT SINGLE
0x00be: 0x255b, # BOX DRAWINGS UP SINGLE AND LEFT DOUBLE
0x00bf: 0x2510, # BOX DRAWINGS LIGHT DOWN AND LEFT
0x00c0: 0x2514, # BOX DRAWINGS LIGHT UP AND RIGHT
0x00c1: 0x2534, # BOX DRAWINGS LIGHT UP AND HORIZONTAL
0x00c2: 0x252c, # BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
0x00c3: 0x251c, # BOX DRAWINGS LIGHT VERTICAL AND RIGHT
0x00c4: 0x2500, # BOX DRAWINGS LIGHT HORIZONTAL
0x00c5: 0x253c, # BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
0x00c6: 0x255e, # BOX DRAWINGS VERTICAL SINGLE AND RIGHT DOUBLE
0x00c7: 0x255f, # BOX DRAWINGS VERTICAL DOUBLE AND RIGHT SINGLE
0x00c8: 0x255a, # BOX DRAWINGS DOUBLE UP AND RIGHT
0x00c9: 0x2554, # BOX DRAWINGS DOUBLE DOWN AND RIGHT
0x00ca: 0x2569, # BOX DRAWINGS DOUBLE UP AND HORIZONTAL
0x00cb: 0x2566, # BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
0x00cc: 0x2560, # BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
0x00cd: 0x2550, # BOX DRAWINGS DOUBLE HORIZONTAL
0x00ce: 0x256c, # BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
0x00cf: 0x2567, # BOX DRAWINGS UP SINGLE AND HORIZONTAL DOUBLE
0x00d0: 0x2568, # BOX DRAWINGS UP DOUBLE AND HORIZONTAL SINGLE
0x00d1: 0x2564, # BOX DRAWINGS DOWN SINGLE AND HORIZONTAL DOUBLE
0x00d2: 0x2565, # BOX DRAWINGS DOWN DOUBLE AND HORIZONTAL SINGLE
0x00d3: 0x2559, # BOX DRAWINGS UP DOUBLE AND RIGHT SINGLE
0x00d4: 0x2558, # BOX DRAWINGS UP SINGLE AND RIGHT DOUBLE
0x00d5: 0x2552, # BOX DRAWINGS DOWN SINGLE AND RIGHT DOUBLE
0x00d6: 0x2553, # BOX DRAWINGS DOWN DOUBLE AND RIGHT SINGLE
0x00d7: 0x256b, # BOX DRAWINGS VERTICAL DOUBLE AND HORIZONTAL SINGLE
0x00d8: 0x256a, # BOX DRAWINGS VERTICAL SINGLE AND HORIZONTAL DOUBLE
0x00d9: 0x2518, # BOX DRAWINGS LIGHT UP AND LEFT
0x00da: 0x250c, # BOX DRAWINGS LIGHT DOWN AND RIGHT
0x00db: 0x2588, # FULL BLOCK
0x00dc: 0x2584, # LOWER HALF BLOCK
0x00dd: 0x258c, # LEFT HALF BLOCK
0x00de: 0x2590, # RIGHT HALF BLOCK
0x00df: 0x2580, # UPPER HALF BLOCK
0x00e0: 0x0440, # CYRILLIC SMALL LETTER ER
0x00e1: 0x0441, # CYRILLIC SMALL LETTER ES
0x00e2: 0x0442, # CYRILLIC SMALL LETTER TE
0x00e3: 0x0443, # CYRILLIC SMALL LETTER U
0x00e4: 0x0444, # CYRILLIC SMALL LETTER EF
0x00e5: 0x0445, # CYRILLIC SMALL LETTER HA
0x00e6: 0x0446, # CYRILLIC SMALL LETTER TSE
0x00e7: 0x0447, # CYRILLIC SMALL LETTER CHE
0x00e8: 0x0448, # CYRILLIC SMALL LETTER SHA
0x00e9: 0x0449, # CYRILLIC SMALL LETTER SHCHA
0x00ea: 0x044a, # CYRILLIC SMALL LETTER HARD SIGN
0x00eb: 0x044b, # CYRILLIC SMALL LETTER YERU
0x00ec: 0x044c, # CYRILLIC SMALL LETTER SOFT SIGN
0x00ed: 0x044d, # CYRILLIC SMALL LETTER E
0x00ee: 0x044e, # CYRILLIC SMALL LETTER YU
0x00ef: 0x044f, # CYRILLIC SMALL LETTER YA
0x00f0: 0x0401, # CYRILLIC CAPITAL LETTER IO
0x00f1: 0x0451, # CYRILLIC SMALL LETTER IO
0x00f2: 0x0404, # CYRILLIC CAPITAL LETTER UKRAINIAN IE
0x00f3: 0x0454, # CYRILLIC SMALL LETTER UKRAINIAN IE
0x00f4: 0x0407, # CYRILLIC CAPITAL LETTER YI
0x00f5: 0x0457, # CYRILLIC SMALL LETTER YI
0x00f6: 0x040e, # CYRILLIC CAPITAL LETTER SHORT U
0x00f7: 0x045e, # CYRILLIC SMALL LETTER SHORT U
0x00f8: 0x00b0, # DEGREE SIGN
0x00f9: 0x2219, # BULLET OPERATOR
0x00fa: 0x00b7, # MIDDLE DOT
0x00fb: 0x221a, # SQUARE ROOT
0x00fc: 0x2116, # NUMERO SIGN
0x00fd: 0x00a4, # CURRENCY SIGN
0x00fe: 0x25a0, # BLACK SQUARE
0x00ff: 0x00a0, # NO-BREAK SPACE
})
### Encoding Map
encoding_map = {}
for k,v in decoding_map.items():
encoding_map[v] = k

+ 0
- 174
lib/jython/Lib/encodings/cp869.py View File

@@ -1,174 +0,0 @@
""" Python Character Mapping Codec generated from 'CP869.TXT' with gencodec.py.
Written by Marc-Andre Lemburg (mal@lemburg.com).
(c) Copyright CNRI, All Rights Reserved. NO WARRANTY.
(c) Copyright 2000 Guido van Rossum.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_map)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_map)
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return (Codec().encode,Codec().decode,StreamReader,StreamWriter)
### Decoding Map
decoding_map = codecs.make_identity_dict(range(256))
decoding_map.update({
0x0080: None, # UNDEFINED
0x0081: None, # UNDEFINED
0x0082: None, # UNDEFINED
0x0083: None, # UNDEFINED
0x0084: None, # UNDEFINED
0x0085: None, # UNDEFINED
0x0086: 0x0386, # GREEK CAPITAL LETTER ALPHA WITH TONOS
0x0087: None, # UNDEFINED
0x0088: 0x00b7, # MIDDLE DOT
0x0089: 0x00ac, # NOT SIGN
0x008a: 0x00a6, # BROKEN BAR
0x008b: 0x2018, # LEFT SINGLE QUOTATION MARK
0x008c: 0x2019, # RIGHT SINGLE QUOTATION MARK
0x008d: 0x0388, # GREEK CAPITAL LETTER EPSILON WITH TONOS
0x008e: 0x2015, # HORIZONTAL BAR
0x008f: 0x0389, # GREEK CAPITAL LETTER ETA WITH TONOS
0x0090: 0x038a, # GREEK CAPITAL LETTER IOTA WITH TONOS
0x0091: 0x03aa, # GREEK CAPITAL LETTER IOTA WITH DIALYTIKA
0x0092: 0x038c, # GREEK CAPITAL LETTER OMICRON WITH TONOS
0x0093: None, # UNDEFINED
0x0094: None, # UNDEFINED
0x0095: 0x038e, # GREEK CAPITAL LETTER UPSILON WITH TONOS
0x0096: 0x03ab, # GREEK CAPITAL LETTER UPSILON WITH DIALYTIKA
0x0097: 0x00a9, # COPYRIGHT SIGN
0x0098: 0x038f, # GREEK CAPITAL LETTER OMEGA WITH TONOS
0x0099: 0x00b2, # SUPERSCRIPT TWO
0x009a: 0x00b3, # SUPERSCRIPT THREE
0x009b: 0x03ac, # GREEK SMALL LETTER ALPHA WITH TONOS
0x009c: 0x00a3, # POUND SIGN
0x009d: 0x03ad, # GREEK SMALL LETTER EPSILON WITH TONOS
0x009e: 0x03ae, # GREEK SMALL LETTER ETA WITH TONOS
0x009f: 0x03af, # GREEK SMALL LETTER IOTA WITH TONOS
0x00a0: 0x03ca, # GREEK SMALL LETTER IOTA WITH DIALYTIKA
0x00a1: 0x0390, # GREEK SMALL LETTER IOTA WITH DIALYTIKA AND TONOS
0x00a2: 0x03cc, # GREEK SMALL LETTER OMICRON WITH TONOS
0x00a3: 0x03cd, # GREEK SMALL LETTER UPSILON WITH TONOS
0x00a4: 0x0391, # GREEK CAPITAL LETTER ALPHA
0x00a5: 0x0392, # GREEK CAPITAL LETTER BETA
0x00a6: 0x0393, # GREEK CAPITAL LETTER GAMMA
0x00a7: 0x0394, # GREEK CAPITAL LETTER DELTA
0x00a8: 0x0395, # GREEK CAPITAL LETTER EPSILON
0x00a9: 0x0396, # GREEK CAPITAL LETTER ZETA
0x00aa: 0x0397, # GREEK CAPITAL LETTER ETA
0x00ab: 0x00bd, # VULGAR FRACTION ONE HALF
0x00ac: 0x0398, # GREEK CAPITAL LETTER THETA
0x00ad: 0x0399, # GREEK CAPITAL LETTER IOTA
0x00ae: 0x00ab, # LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00af: 0x00bb, # RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00b0: 0x2591, # LIGHT SHADE
0x00b1: 0x2592, # MEDIUM SHADE
0x00b2: 0x2593, # DARK SHADE
0x00b3: 0x2502, # BOX DRAWINGS LIGHT VERTICAL
0x00b4: 0x2524, # BOX DRAWINGS LIGHT VERTICAL AND LEFT
0x00b5: 0x039a, # GREEK CAPITAL LETTER KAPPA
0x00b6: 0x039b, # GREEK CAPITAL LETTER LAMDA
0x00b7: 0x039c, # GREEK CAPITAL LETTER MU
0x00b8: 0x039d, # GREEK CAPITAL LETTER NU
0x00b9: 0x2563, # BOX DRAWINGS DOUBLE VERTICAL AND LEFT
0x00ba: 0x2551, # BOX DRAWINGS DOUBLE VERTICAL
0x00bb: 0x2557, # BOX DRAWINGS DOUBLE DOWN AND LEFT
0x00bc: 0x255d, # BOX DRAWINGS DOUBLE UP AND LEFT
0x00bd: 0x039e, # GREEK CAPITAL LETTER XI
0x00be: 0x039f, # GREEK CAPITAL LETTER OMICRON
0x00bf: 0x2510, # BOX DRAWINGS LIGHT DOWN AND LEFT
0x00c0: 0x2514, # BOX DRAWINGS LIGHT UP AND RIGHT
0x00c1: 0x2534, # BOX DRAWINGS LIGHT UP AND HORIZONTAL
0x00c2: 0x252c, # BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
0x00c3: 0x251c, # BOX DRAWINGS LIGHT VERTICAL AND RIGHT
0x00c4: 0x2500, # BOX DRAWINGS LIGHT HORIZONTAL
0x00c5: 0x253c, # BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
0x00c6: 0x03a0, # GREEK CAPITAL LETTER PI
0x00c7: 0x03a1, # GREEK CAPITAL LETTER RHO
0x00c8: 0x255a, # BOX DRAWINGS DOUBLE UP AND RIGHT
0x00c9: 0x2554, # BOX DRAWINGS DOUBLE DOWN AND RIGHT
0x00ca: 0x2569, # BOX DRAWINGS DOUBLE UP AND HORIZONTAL
0x00cb: 0x2566, # BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
0x00cc: 0x2560, # BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
0x00cd: 0x2550, # BOX DRAWINGS DOUBLE HORIZONTAL
0x00ce: 0x256c, # BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
0x00cf: 0x03a3, # GREEK CAPITAL LETTER SIGMA
0x00d0: 0x03a4, # GREEK CAPITAL LETTER TAU
0x00d1: 0x03a5, # GREEK CAPITAL LETTER UPSILON
0x00d2: 0x03a6, # GREEK CAPITAL LETTER PHI
0x00d3: 0x03a7, # GREEK CAPITAL LETTER CHI
0x00d4: 0x03a8, # GREEK CAPITAL LETTER PSI
0x00d5: 0x03a9, # GREEK CAPITAL LETTER OMEGA
0x00d6: 0x03b1, # GREEK SMALL LETTER ALPHA
0x00d7: 0x03b2, # GREEK SMALL LETTER BETA
0x00d8: 0x03b3, # GREEK SMALL LETTER GAMMA
0x00d9: 0x2518, # BOX DRAWINGS LIGHT UP AND LEFT
0x00da: 0x250c, # BOX DRAWINGS LIGHT DOWN AND RIGHT
0x00db: 0x2588, # FULL BLOCK
0x00dc: 0x2584, # LOWER HALF BLOCK
0x00dd: 0x03b4, # GREEK SMALL LETTER DELTA
0x00de: 0x03b5, # GREEK SMALL LETTER EPSILON
0x00df: 0x2580, # UPPER HALF BLOCK
0x00e0: 0x03b6, # GREEK SMALL LETTER ZETA
0x00e1: 0x03b7, # GREEK SMALL LETTER ETA
0x00e2: 0x03b8, # GREEK SMALL LETTER THETA
0x00e3: 0x03b9, # GREEK SMALL LETTER IOTA
0x00e4: 0x03ba, # GREEK SMALL LETTER KAPPA
0x00e5: 0x03bb, # GREEK SMALL LETTER LAMDA
0x00e6: 0x03bc, # GREEK SMALL LETTER MU
0x00e7: 0x03bd, # GREEK SMALL LETTER NU
0x00e8: 0x03be, # GREEK SMALL LETTER XI
0x00e9: 0x03bf, # GREEK SMALL LETTER OMICRON
0x00ea: 0x03c0, # GREEK SMALL LETTER PI
0x00eb: 0x03c1, # GREEK SMALL LETTER RHO
0x00ec: 0x03c3, # GREEK SMALL LETTER SIGMA
0x00ed: 0x03c2, # GREEK SMALL LETTER FINAL SIGMA
0x00ee: 0x03c4, # GREEK SMALL LETTER TAU
0x00ef: 0x0384, # GREEK TONOS
0x00f0: 0x00ad, # SOFT HYPHEN
0x00f1: 0x00b1, # PLUS-MINUS SIGN
0x00f2: 0x03c5, # GREEK SMALL LETTER UPSILON
0x00f3: 0x03c6, # GREEK SMALL LETTER PHI
0x00f4: 0x03c7, # GREEK SMALL LETTER CHI
0x00f5: 0x00a7, # SECTION SIGN
0x00f6: 0x03c8, # GREEK SMALL LETTER PSI
0x00f7: 0x0385, # GREEK DIALYTIKA TONOS
0x00f8: 0x00b0, # DEGREE SIGN
0x00f9: 0x00a8, # DIAERESIS
0x00fa: 0x03c9, # GREEK SMALL LETTER OMEGA
0x00fb: 0x03cb, # GREEK SMALL LETTER UPSILON WITH DIALYTIKA
0x00fc: 0x03b0, # GREEK SMALL LETTER UPSILON WITH DIALYTIKA AND TONOS
0x00fd: 0x03ce, # GREEK SMALL LETTER OMEGA WITH TONOS
0x00fe: 0x25a0, # BLACK SQUARE
0x00ff: 0x00a0, # NO-BREAK SPACE
})
### Encoding Map
encoding_map = {}
for k,v in decoding_map.items():
encoding_map[v] = k

+ 0
- 173
lib/jython/Lib/encodings/cp874.py View File

@@ -1,173 +0,0 @@
""" Python Character Mapping Codec generated from 'CP874.TXT' with gencodec.py.
Written by Marc-Andre Lemburg (mal@lemburg.com).
(c) Copyright CNRI, All Rights Reserved. NO WARRANTY.
(c) Copyright 2000 Guido van Rossum.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_map)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_map)
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return (Codec().encode,Codec().decode,StreamReader,StreamWriter)
### Decoding Map
decoding_map = codecs.make_identity_dict(range(256))
decoding_map.update({
0x0080: 0x20ac, # EURO SIGN
0x0081: None, # UNDEFINED
0x0082: None, # UNDEFINED
0x0083: None, # UNDEFINED
0x0084: None, # UNDEFINED
0x0085: 0x2026, # HORIZONTAL ELLIPSIS
0x0086: None, # UNDEFINED
0x0087: None, # UNDEFINED
0x0088: None, # UNDEFINED
0x0089: None, # UNDEFINED
0x008a: None, # UNDEFINED
0x008b: None, # UNDEFINED
0x008c: None, # UNDEFINED
0x008d: None, # UNDEFINED
0x008e: None, # UNDEFINED
0x008f: None, # UNDEFINED
0x0090: None, # UNDEFINED
0x0091: 0x2018, # LEFT SINGLE QUOTATION MARK
0x0092: 0x2019, # RIGHT SINGLE QUOTATION MARK
0x0093: 0x201c, # LEFT DOUBLE QUOTATION MARK
0x0094: 0x201d, # RIGHT DOUBLE QUOTATION MARK
0x0095: 0x2022, # BULLET
0x0096: 0x2013, # EN DASH
0x0097: 0x2014, # EM DASH
0x0098: None, # UNDEFINED
0x0099: None, # UNDEFINED
0x009a: None, # UNDEFINED
0x009b: None, # UNDEFINED
0x009c: None, # UNDEFINED
0x009d: None, # UNDEFINED
0x009e: None, # UNDEFINED
0x009f: None, # UNDEFINED
0x00a1: 0x0e01, # THAI CHARACTER KO KAI
0x00a2: 0x0e02, # THAI CHARACTER KHO KHAI
0x00a3: 0x0e03, # THAI CHARACTER KHO KHUAT
0x00a4: 0x0e04, # THAI CHARACTER KHO KHWAI
0x00a5: 0x0e05, # THAI CHARACTER KHO KHON
0x00a6: 0x0e06, # THAI CHARACTER KHO RAKHANG
0x00a7: 0x0e07, # THAI CHARACTER NGO NGU
0x00a8: 0x0e08, # THAI CHARACTER CHO CHAN
0x00a9: 0x0e09, # THAI CHARACTER CHO CHING
0x00aa: 0x0e0a, # THAI CHARACTER CHO CHANG
0x00ab: 0x0e0b, # THAI CHARACTER SO SO
0x00ac: 0x0e0c, # THAI CHARACTER CHO CHOE
0x00ad: 0x0e0d, # THAI CHARACTER YO YING
0x00ae: 0x0e0e, # THAI CHARACTER DO CHADA
0x00af: 0x0e0f, # THAI CHARACTER TO PATAK
0x00b0: 0x0e10, # THAI CHARACTER THO THAN
0x00b1: 0x0e11, # THAI CHARACTER THO NANGMONTHO
0x00b2: 0x0e12, # THAI CHARACTER THO PHUTHAO
0x00b3: 0x0e13, # THAI CHARACTER NO NEN
0x00b4: 0x0e14, # THAI CHARACTER DO DEK
0x00b5: 0x0e15, # THAI CHARACTER TO TAO
0x00b6: 0x0e16, # THAI CHARACTER THO THUNG
0x00b7: 0x0e17, # THAI CHARACTER THO THAHAN
0x00b8: 0x0e18, # THAI CHARACTER THO THONG
0x00b9: 0x0e19, # THAI CHARACTER NO NU
0x00ba: 0x0e1a, # THAI CHARACTER BO BAIMAI
0x00bb: 0x0e1b, # THAI CHARACTER PO PLA
0x00bc: 0x0e1c, # THAI CHARACTER PHO PHUNG
0x00bd: 0x0e1d, # THAI CHARACTER FO FA
0x00be: 0x0e1e, # THAI CHARACTER PHO PHAN
0x00bf: 0x0e1f, # THAI CHARACTER FO FAN
0x00c0: 0x0e20, # THAI CHARACTER PHO SAMPHAO
0x00c1: 0x0e21, # THAI CHARACTER MO MA
0x00c2: 0x0e22, # THAI CHARACTER YO YAK
0x00c3: 0x0e23, # THAI CHARACTER RO RUA
0x00c4: 0x0e24, # THAI CHARACTER RU
0x00c5: 0x0e25, # THAI CHARACTER LO LING
0x00c6: 0x0e26, # THAI CHARACTER LU
0x00c7: 0x0e27, # THAI CHARACTER WO WAEN
0x00c8: 0x0e28, # THAI CHARACTER SO SALA
0x00c9: 0x0e29, # THAI CHARACTER SO RUSI
0x00ca: 0x0e2a, # THAI CHARACTER SO SUA
0x00cb: 0x0e2b, # THAI CHARACTER HO HIP
0x00cc: 0x0e2c, # THAI CHARACTER LO CHULA
0x00cd: 0x0e2d, # THAI CHARACTER O ANG
0x00ce: 0x0e2e, # THAI CHARACTER HO NOKHUK
0x00cf: 0x0e2f, # THAI CHARACTER PAIYANNOI
0x00d0: 0x0e30, # THAI CHARACTER SARA A
0x00d1: 0x0e31, # THAI CHARACTER MAI HAN-AKAT
0x00d2: 0x0e32, # THAI CHARACTER SARA AA
0x00d3: 0x0e33, # THAI CHARACTER SARA AM
0x00d4: 0x0e34, # THAI CHARACTER SARA I
0x00d5: 0x0e35, # THAI CHARACTER SARA II
0x00d6: 0x0e36, # THAI CHARACTER SARA UE
0x00d7: 0x0e37, # THAI CHARACTER SARA UEE
0x00d8: 0x0e38, # THAI CHARACTER SARA U
0x00d9: 0x0e39, # THAI CHARACTER SARA UU
0x00da: 0x0e3a, # THAI CHARACTER PHINTHU
0x00db: None, # UNDEFINED
0x00dc: None, # UNDEFINED
0x00dd: None, # UNDEFINED
0x00de: None, # UNDEFINED
0x00df: 0x0e3f, # THAI CURRENCY SYMBOL BAHT
0x00e0: 0x0e40, # THAI CHARACTER SARA E
0x00e1: 0x0e41, # THAI CHARACTER SARA AE
0x00e2: 0x0e42, # THAI CHARACTER SARA O
0x00e3: 0x0e43, # THAI CHARACTER SARA AI MAIMUAN
0x00e4: 0x0e44, # THAI CHARACTER SARA AI MAIMALAI
0x00e5: 0x0e45, # THAI CHARACTER LAKKHANGYAO
0x00e6: 0x0e46, # THAI CHARACTER MAIYAMOK
0x00e7: 0x0e47, # THAI CHARACTER MAITAIKHU
0x00e8: 0x0e48, # THAI CHARACTER MAI EK
0x00e9: 0x0e49, # THAI CHARACTER MAI THO
0x00ea: 0x0e4a, # THAI CHARACTER MAI TRI
0x00eb: 0x0e4b, # THAI CHARACTER MAI CHATTAWA
0x00ec: 0x0e4c, # THAI CHARACTER THANTHAKHAT
0x00ed: 0x0e4d, # THAI CHARACTER NIKHAHIT
0x00ee: 0x0e4e, # THAI CHARACTER YAMAKKAN
0x00ef: 0x0e4f, # THAI CHARACTER FONGMAN
0x00f0: 0x0e50, # THAI DIGIT ZERO
0x00f1: 0x0e51, # THAI DIGIT ONE
0x00f2: 0x0e52, # THAI DIGIT TWO
0x00f3: 0x0e53, # THAI DIGIT THREE
0x00f4: 0x0e54, # THAI DIGIT FOUR
0x00f5: 0x0e55, # THAI DIGIT FIVE
0x00f6: 0x0e56, # THAI DIGIT SIX
0x00f7: 0x0e57, # THAI DIGIT SEVEN
0x00f8: 0x0e58, # THAI DIGIT EIGHT
0x00f9: 0x0e59, # THAI DIGIT NINE
0x00fa: 0x0e5a, # THAI CHARACTER ANGKHANKHU
0x00fb: 0x0e5b, # THAI CHARACTER KHOMUT
0x00fc: None, # UNDEFINED
0x00fd: None, # UNDEFINED
0x00fe: None, # UNDEFINED
0x00ff: None, # UNDEFINED
})
### Encoding Map
encoding_map = {}
for k,v in decoding_map.items():
encoding_map[v] = k

+ 0
- 283
lib/jython/Lib/encodings/cp875.py View File

@@ -1,283 +0,0 @@
""" Python Character Mapping Codec generated from 'CP875.TXT' with gencodec.py.
Written by Marc-Andre Lemburg (mal@lemburg.com).
(c) Copyright CNRI, All Rights Reserved. NO WARRANTY.
(c) Copyright 2000 Guido van Rossum.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_map)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_map)
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return (Codec().encode,Codec().decode,StreamReader,StreamWriter)
### Decoding Map
decoding_map = codecs.make_identity_dict(range(256))
decoding_map.update({
0x0004: 0x009c, # CONTROL
0x0005: 0x0009, # HORIZONTAL TABULATION
0x0006: 0x0086, # CONTROL
0x0007: 0x007f, # DELETE
0x0008: 0x0097, # CONTROL
0x0009: 0x008d, # CONTROL
0x000a: 0x008e, # CONTROL
0x0014: 0x009d, # CONTROL
0x0015: 0x0085, # CONTROL
0x0016: 0x0008, # BACKSPACE
0x0017: 0x0087, # CONTROL
0x001a: 0x0092, # CONTROL
0x001b: 0x008f, # CONTROL
0x0020: 0x0080, # CONTROL
0x0021: 0x0081, # CONTROL
0x0022: 0x0082, # CONTROL
0x0023: 0x0083, # CONTROL
0x0024: 0x0084, # CONTROL
0x0025: 0x000a, # LINE FEED
0x0026: 0x0017, # END OF TRANSMISSION BLOCK
0x0027: 0x001b, # ESCAPE
0x0028: 0x0088, # CONTROL
0x0029: 0x0089, # CONTROL
0x002a: 0x008a, # CONTROL
0x002b: 0x008b, # CONTROL
0x002c: 0x008c, # CONTROL
0x002d: 0x0005, # ENQUIRY
0x002e: 0x0006, # ACKNOWLEDGE
0x002f: 0x0007, # BELL
0x0030: 0x0090, # CONTROL
0x0031: 0x0091, # CONTROL
0x0032: 0x0016, # SYNCHRONOUS IDLE
0x0033: 0x0093, # CONTROL
0x0034: 0x0094, # CONTROL
0x0035: 0x0095, # CONTROL
0x0036: 0x0096, # CONTROL
0x0037: 0x0004, # END OF TRANSMISSION
0x0038: 0x0098, # CONTROL
0x0039: 0x0099, # CONTROL
0x003a: 0x009a, # CONTROL
0x003b: 0x009b, # CONTROL
0x003c: 0x0014, # DEVICE CONTROL FOUR
0x003d: 0x0015, # NEGATIVE ACKNOWLEDGE
0x003e: 0x009e, # CONTROL
0x003f: 0x001a, # SUBSTITUTE
0x0040: 0x0020, # SPACE
0x0041: 0x0391, # GREEK CAPITAL LETTER ALPHA
0x0042: 0x0392, # GREEK CAPITAL LETTER BETA
0x0043: 0x0393, # GREEK CAPITAL LETTER GAMMA
0x0044: 0x0394, # GREEK CAPITAL LETTER DELTA
0x0045: 0x0395, # GREEK CAPITAL LETTER EPSILON
0x0046: 0x0396, # GREEK CAPITAL LETTER ZETA
0x0047: 0x0397, # GREEK CAPITAL LETTER ETA
0x0048: 0x0398, # GREEK CAPITAL LETTER THETA
0x0049: 0x0399, # GREEK CAPITAL LETTER IOTA
0x004a: 0x005b, # LEFT SQUARE BRACKET
0x004b: 0x002e, # FULL STOP
0x004c: 0x003c, # LESS-THAN SIGN
0x004d: 0x0028, # LEFT PARENTHESIS
0x004e: 0x002b, # PLUS SIGN
0x004f: 0x0021, # EXCLAMATION MARK
0x0050: 0x0026, # AMPERSAND
0x0051: 0x039a, # GREEK CAPITAL LETTER KAPPA
0x0052: 0x039b, # GREEK CAPITAL LETTER LAMDA
0x0053: 0x039c, # GREEK CAPITAL LETTER MU
0x0054: 0x039d, # GREEK CAPITAL LETTER NU
0x0055: 0x039e, # GREEK CAPITAL LETTER XI
0x0056: 0x039f, # GREEK CAPITAL LETTER OMICRON
0x0057: 0x03a0, # GREEK CAPITAL LETTER PI
0x0058: 0x03a1, # GREEK CAPITAL LETTER RHO
0x0059: 0x03a3, # GREEK CAPITAL LETTER SIGMA
0x005a: 0x005d, # RIGHT SQUARE BRACKET
0x005b: 0x0024, # DOLLAR SIGN
0x005c: 0x002a, # ASTERISK
0x005d: 0x0029, # RIGHT PARENTHESIS
0x005e: 0x003b, # SEMICOLON
0x005f: 0x005e, # CIRCUMFLEX ACCENT
0x0060: 0x002d, # HYPHEN-MINUS
0x0061: 0x002f, # SOLIDUS
0x0062: 0x03a4, # GREEK CAPITAL LETTER TAU
0x0063: 0x03a5, # GREEK CAPITAL LETTER UPSILON
0x0064: 0x03a6, # GREEK CAPITAL LETTER PHI
0x0065: 0x03a7, # GREEK CAPITAL LETTER CHI
0x0066: 0x03a8, # GREEK CAPITAL LETTER PSI
0x0067: 0x03a9, # GREEK CAPITAL LETTER OMEGA
0x0068: 0x03aa, # GREEK CAPITAL LETTER IOTA WITH DIALYTIKA
0x0069: 0x03ab, # GREEK CAPITAL LETTER UPSILON WITH DIALYTIKA
0x006a: 0x007c, # VERTICAL LINE
0x006b: 0x002c, # COMMA
0x006c: 0x0025, # PERCENT SIGN
0x006d: 0x005f, # LOW LINE
0x006e: 0x003e, # GREATER-THAN SIGN
0x006f: 0x003f, # QUESTION MARK
0x0070: 0x00a8, # DIAERESIS
0x0071: 0x0386, # GREEK CAPITAL LETTER ALPHA WITH TONOS
0x0072: 0x0388, # GREEK CAPITAL LETTER EPSILON WITH TONOS
0x0073: 0x0389, # GREEK CAPITAL LETTER ETA WITH TONOS
0x0074: 0x00a0, # NO-BREAK SPACE
0x0075: 0x038a, # GREEK CAPITAL LETTER IOTA WITH TONOS
0x0076: 0x038c, # GREEK CAPITAL LETTER OMICRON WITH TONOS
0x0077: 0x038e, # GREEK CAPITAL LETTER UPSILON WITH TONOS
0x0078: 0x038f, # GREEK CAPITAL LETTER OMEGA WITH TONOS
0x0079: 0x0060, # GRAVE ACCENT
0x007a: 0x003a, # COLON
0x007b: 0x0023, # NUMBER SIGN
0x007c: 0x0040, # COMMERCIAL AT
0x007d: 0x0027, # APOSTROPHE
0x007e: 0x003d, # EQUALS SIGN
0x007f: 0x0022, # QUOTATION MARK
0x0080: 0x0385, # GREEK DIALYTIKA TONOS
0x0081: 0x0061, # LATIN SMALL LETTER A
0x0082: 0x0062, # LATIN SMALL LETTER B
0x0083: 0x0063, # LATIN SMALL LETTER C
0x0084: 0x0064, # LATIN SMALL LETTER D
0x0085: 0x0065, # LATIN SMALL LETTER E
0x0086: 0x0066, # LATIN SMALL LETTER F
0x0087: 0x0067, # LATIN SMALL LETTER G
0x0088: 0x0068, # LATIN SMALL LETTER H
0x0089: 0x0069, # LATIN SMALL LETTER I
0x008a: 0x03b1, # GREEK SMALL LETTER ALPHA
0x008b: 0x03b2, # GREEK SMALL LETTER BETA
0x008c: 0x03b3, # GREEK SMALL LETTER GAMMA
0x008d: 0x03b4, # GREEK SMALL LETTER DELTA
0x008e: 0x03b5, # GREEK SMALL LETTER EPSILON
0x008f: 0x03b6, # GREEK SMALL LETTER ZETA
0x0090: 0x00b0, # DEGREE SIGN
0x0091: 0x006a, # LATIN SMALL LETTER J
0x0092: 0x006b, # LATIN SMALL LETTER K
0x0093: 0x006c, # LATIN SMALL LETTER L
0x0094: 0x006d, # LATIN SMALL LETTER M
0x0095: 0x006e, # LATIN SMALL LETTER N
0x0096: 0x006f, # LATIN SMALL LETTER O
0x0097: 0x0070, # LATIN SMALL LETTER P
0x0098: 0x0071, # LATIN SMALL LETTER Q
0x0099: 0x0072, # LATIN SMALL LETTER R
0x009a: 0x03b7, # GREEK SMALL LETTER ETA
0x009b: 0x03b8, # GREEK SMALL LETTER THETA
0x009c: 0x03b9, # GREEK SMALL LETTER IOTA
0x009d: 0x03ba, # GREEK SMALL LETTER KAPPA
0x009e: 0x03bb, # GREEK SMALL LETTER LAMDA
0x009f: 0x03bc, # GREEK SMALL LETTER MU
0x00a0: 0x00b4, # ACUTE ACCENT
0x00a1: 0x007e, # TILDE
0x00a2: 0x0073, # LATIN SMALL LETTER S
0x00a3: 0x0074, # LATIN SMALL LETTER T
0x00a4: 0x0075, # LATIN SMALL LETTER U
0x00a5: 0x0076, # LATIN SMALL LETTER V
0x00a6: 0x0077, # LATIN SMALL LETTER W
0x00a7: 0x0078, # LATIN SMALL LETTER X
0x00a8: 0x0079, # LATIN SMALL LETTER Y
0x00a9: 0x007a, # LATIN SMALL LETTER Z
0x00aa: 0x03bd, # GREEK SMALL LETTER NU
0x00ab: 0x03be, # GREEK SMALL LETTER XI
0x00ac: 0x03bf, # GREEK SMALL LETTER OMICRON
0x00ad: 0x03c0, # GREEK SMALL LETTER PI
0x00ae: 0x03c1, # GREEK SMALL LETTER RHO
0x00af: 0x03c3, # GREEK SMALL LETTER SIGMA
0x00b0: 0x00a3, # POUND SIGN
0x00b1: 0x03ac, # GREEK SMALL LETTER ALPHA WITH TONOS
0x00b2: 0x03ad, # GREEK SMALL LETTER EPSILON WITH TONOS
0x00b3: 0x03ae, # GREEK SMALL LETTER ETA WITH TONOS
0x00b4: 0x03ca, # GREEK SMALL LETTER IOTA WITH DIALYTIKA
0x00b5: 0x03af, # GREEK SMALL LETTER IOTA WITH TONOS
0x00b6: 0x03cc, # GREEK SMALL LETTER OMICRON WITH TONOS
0x00b7: 0x03cd, # GREEK SMALL LETTER UPSILON WITH TONOS
0x00b8: 0x03cb, # GREEK SMALL LETTER UPSILON WITH DIALYTIKA
0x00b9: 0x03ce, # GREEK SMALL LETTER OMEGA WITH TONOS
0x00ba: 0x03c2, # GREEK SMALL LETTER FINAL SIGMA
0x00bb: 0x03c4, # GREEK SMALL LETTER TAU
0x00bc: 0x03c5, # GREEK SMALL LETTER UPSILON
0x00bd: 0x03c6, # GREEK SMALL LETTER PHI
0x00be: 0x03c7, # GREEK SMALL LETTER CHI
0x00bf: 0x03c8, # GREEK SMALL LETTER PSI
0x00c0: 0x007b, # LEFT CURLY BRACKET
0x00c1: 0x0041, # LATIN CAPITAL LETTER A
0x00c2: 0x0042, # LATIN CAPITAL LETTER B
0x00c3: 0x0043, # LATIN CAPITAL LETTER C
0x00c4: 0x0044, # LATIN CAPITAL LETTER D
0x00c5: 0x0045, # LATIN CAPITAL LETTER E
0x00c6: 0x0046, # LATIN CAPITAL LETTER F
0x00c7: 0x0047, # LATIN CAPITAL LETTER G
0x00c8: 0x0048, # LATIN CAPITAL LETTER H
0x00c9: 0x0049, # LATIN CAPITAL LETTER I
0x00ca: 0x00ad, # SOFT HYPHEN
0x00cb: 0x03c9, # GREEK SMALL LETTER OMEGA
0x00cc: 0x0390, # GREEK SMALL LETTER IOTA WITH DIALYTIKA AND TONOS
0x00cd: 0x03b0, # GREEK SMALL LETTER UPSILON WITH DIALYTIKA AND TONOS
0x00ce: 0x2018, # LEFT SINGLE QUOTATION MARK
0x00cf: 0x2015, # HORIZONTAL BAR
0x00d0: 0x007d, # RIGHT CURLY BRACKET
0x00d1: 0x004a, # LATIN CAPITAL LETTER J
0x00d2: 0x004b, # LATIN CAPITAL LETTER K
0x00d3: 0x004c, # LATIN CAPITAL LETTER L
0x00d4: 0x004d, # LATIN CAPITAL LETTER M
0x00d5: 0x004e, # LATIN CAPITAL LETTER N
0x00d6: 0x004f, # LATIN CAPITAL LETTER O
0x00d7: 0x0050, # LATIN CAPITAL LETTER P
0x00d8: 0x0051, # LATIN CAPITAL LETTER Q
0x00d9: 0x0052, # LATIN CAPITAL LETTER R
0x00da: 0x00b1, # PLUS-MINUS SIGN
0x00db: 0x00bd, # VULGAR FRACTION ONE HALF
0x00dc: 0x001a, # SUBSTITUTE
0x00dd: 0x0387, # GREEK ANO TELEIA
0x00de: 0x2019, # RIGHT SINGLE QUOTATION MARK
0x00df: 0x00a6, # BROKEN BAR
0x00e0: 0x005c, # REVERSE SOLIDUS
0x00e1: 0x001a, # SUBSTITUTE
0x00e2: 0x0053, # LATIN CAPITAL LETTER S
0x00e3: 0x0054, # LATIN CAPITAL LETTER T
0x00e4: 0x0055, # LATIN CAPITAL LETTER U
0x00e5: 0x0056, # LATIN CAPITAL LETTER V
0x00e6: 0x0057, # LATIN CAPITAL LETTER W
0x00e7: 0x0058, # LATIN CAPITAL LETTER X
0x00e8: 0x0059, # LATIN CAPITAL LETTER Y
0x00e9: 0x005a, # LATIN CAPITAL LETTER Z
0x00ea: 0x00b2, # SUPERSCRIPT TWO
0x00eb: 0x00a7, # SECTION SIGN
0x00ec: 0x001a, # SUBSTITUTE
0x00ed: 0x001a, # SUBSTITUTE
0x00ee: 0x00ab, # LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00ef: 0x00ac, # NOT SIGN
0x00f0: 0x0030, # DIGIT ZERO
0x00f1: 0x0031, # DIGIT ONE
0x00f2: 0x0032, # DIGIT TWO
0x00f3: 0x0033, # DIGIT THREE
0x00f4: 0x0034, # DIGIT FOUR
0x00f5: 0x0035, # DIGIT FIVE
0x00f6: 0x0036, # DIGIT SIX
0x00f7: 0x0037, # DIGIT SEVEN
0x00f8: 0x0038, # DIGIT EIGHT
0x00f9: 0x0039, # DIGIT NINE
0x00fa: 0x00b3, # SUPERSCRIPT THREE
0x00fb: 0x00a9, # COPYRIGHT SIGN
0x00fc: 0x001a, # SUBSTITUTE
0x00fd: 0x001a, # SUBSTITUTE
0x00fe: 0x00bb, # RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00ff: 0x009f, # CONTROL
})
### Encoding Map
encoding_map = {}
for k,v in decoding_map.items():
encoding_map[v] = k

+ 0
- 46
lib/jython/Lib/encodings/iso8859_1.py View File

@@ -1,46 +0,0 @@
""" Python Character Mapping Codec generated from '8859-1.TXT' with gencodec.py.
Written by Marc-Andre Lemburg (mal@lemburg.com).
(c) Copyright CNRI, All Rights Reserved. NO WARRANTY.
(c) Copyright 2000 Guido van Rossum.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_map)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_map)
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return (Codec().encode,Codec().decode,StreamReader,StreamWriter)
### Decoding Map
decoding_map = codecs.make_identity_dict(range(256))
decoding_map.update({
})
### Encoding Map
encoding_map = {}
for k,v in decoding_map.items():
encoding_map[v] = k

+ 0
- 92
lib/jython/Lib/encodings/iso8859_10.py View File

@@ -1,92 +0,0 @@
""" Python Character Mapping Codec generated from '8859-10.TXT' with gencodec.py.
Written by Marc-Andre Lemburg (mal@lemburg.com).
(c) Copyright CNRI, All Rights Reserved. NO WARRANTY.
(c) Copyright 2000 Guido van Rossum.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_map)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_map)
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return (Codec().encode,Codec().decode,StreamReader,StreamWriter)
### Decoding Map
decoding_map = codecs.make_identity_dict(range(256))
decoding_map.update({
0x00a1: 0x0104, # LATIN CAPITAL LETTER A WITH OGONEK
0x00a2: 0x0112, # LATIN CAPITAL LETTER E WITH MACRON
0x00a3: 0x0122, # LATIN CAPITAL LETTER G WITH CEDILLA
0x00a4: 0x012a, # LATIN CAPITAL LETTER I WITH MACRON
0x00a5: 0x0128, # LATIN CAPITAL LETTER I WITH TILDE
0x00a6: 0x0136, # LATIN CAPITAL LETTER K WITH CEDILLA
0x00a8: 0x013b, # LATIN CAPITAL LETTER L WITH CEDILLA
0x00a9: 0x0110, # LATIN CAPITAL LETTER D WITH STROKE
0x00aa: 0x0160, # LATIN CAPITAL LETTER S WITH CARON
0x00ab: 0x0166, # LATIN CAPITAL LETTER T WITH STROKE
0x00ac: 0x017d, # LATIN CAPITAL LETTER Z WITH CARON
0x00ae: 0x016a, # LATIN CAPITAL LETTER U WITH MACRON
0x00af: 0x014a, # LATIN CAPITAL LETTER ENG
0x00b1: 0x0105, # LATIN SMALL LETTER A WITH OGONEK
0x00b2: 0x0113, # LATIN SMALL LETTER E WITH MACRON
0x00b3: 0x0123, # LATIN SMALL LETTER G WITH CEDILLA
0x00b4: 0x012b, # LATIN SMALL LETTER I WITH MACRON
0x00b5: 0x0129, # LATIN SMALL LETTER I WITH TILDE
0x00b6: 0x0137, # LATIN SMALL LETTER K WITH CEDILLA
0x00b8: 0x013c, # LATIN SMALL LETTER L WITH CEDILLA
0x00b9: 0x0111, # LATIN SMALL LETTER D WITH STROKE
0x00ba: 0x0161, # LATIN SMALL LETTER S WITH CARON
0x00bb: 0x0167, # LATIN SMALL LETTER T WITH STROKE
0x00bc: 0x017e, # LATIN SMALL LETTER Z WITH CARON
0x00bd: 0x2015, # HORIZONTAL BAR
0x00be: 0x016b, # LATIN SMALL LETTER U WITH MACRON
0x00bf: 0x014b, # LATIN SMALL LETTER ENG
0x00c0: 0x0100, # LATIN CAPITAL LETTER A WITH MACRON
0x00c7: 0x012e, # LATIN CAPITAL LETTER I WITH OGONEK
0x00c8: 0x010c, # LATIN CAPITAL LETTER C WITH CARON
0x00ca: 0x0118, # LATIN CAPITAL LETTER E WITH OGONEK
0x00cc: 0x0116, # LATIN CAPITAL LETTER E WITH DOT ABOVE
0x00d1: 0x0145, # LATIN CAPITAL LETTER N WITH CEDILLA
0x00d2: 0x014c, # LATIN CAPITAL LETTER O WITH MACRON
0x00d7: 0x0168, # LATIN CAPITAL LETTER U WITH TILDE
0x00d9: 0x0172, # LATIN CAPITAL LETTER U WITH OGONEK
0x00e0: 0x0101, # LATIN SMALL LETTER A WITH MACRON
0x00e7: 0x012f, # LATIN SMALL LETTER I WITH OGONEK
0x00e8: 0x010d, # LATIN SMALL LETTER C WITH CARON
0x00ea: 0x0119, # LATIN SMALL LETTER E WITH OGONEK
0x00ec: 0x0117, # LATIN SMALL LETTER E WITH DOT ABOVE
0x00f1: 0x0146, # LATIN SMALL LETTER N WITH CEDILLA
0x00f2: 0x014d, # LATIN SMALL LETTER O WITH MACRON
0x00f7: 0x0169, # LATIN SMALL LETTER U WITH TILDE
0x00f9: 0x0173, # LATIN SMALL LETTER U WITH OGONEK
0x00ff: 0x0138, # LATIN SMALL LETTER KRA
})
### Encoding Map
encoding_map = {}
for k,v in decoding_map.items():
encoding_map[v] = k

+ 0
- 102
lib/jython/Lib/encodings/iso8859_13.py View File

@@ -1,102 +0,0 @@
""" Python Character Mapping Codec generated from '8859-13.TXT' with gencodec.py.
Written by Marc-Andre Lemburg (mal@lemburg.com).
(c) Copyright CNRI, All Rights Reserved. NO WARRANTY.
(c) Copyright 2000 Guido van Rossum.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_map)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_map)
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return (Codec().encode,Codec().decode,StreamReader,StreamWriter)
### Decoding Map
decoding_map = codecs.make_identity_dict(range(256))
decoding_map.update({
0x00a1: 0x201d, # RIGHT DOUBLE QUOTATION MARK
0x00a5: 0x201e, # DOUBLE LOW-9 QUOTATION MARK
0x00a8: 0x00d8, # LATIN CAPITAL LETTER O WITH STROKE
0x00aa: 0x0156, # LATIN CAPITAL LETTER R WITH CEDILLA
0x00af: 0x00c6, # LATIN CAPITAL LETTER AE
0x00b4: 0x201c, # LEFT DOUBLE QUOTATION MARK
0x00b8: 0x00f8, # LATIN SMALL LETTER O WITH STROKE
0x00ba: 0x0157, # LATIN SMALL LETTER R WITH CEDILLA
0x00bf: 0x00e6, # LATIN SMALL LETTER AE
0x00c0: 0x0104, # LATIN CAPITAL LETTER A WITH OGONEK
0x00c1: 0x012e, # LATIN CAPITAL LETTER I WITH OGONEK
0x00c2: 0x0100, # LATIN CAPITAL LETTER A WITH MACRON
0x00c3: 0x0106, # LATIN CAPITAL LETTER C WITH ACUTE
0x00c6: 0x0118, # LATIN CAPITAL LETTER E WITH OGONEK
0x00c7: 0x0112, # LATIN CAPITAL LETTER E WITH MACRON
0x00c8: 0x010c, # LATIN CAPITAL LETTER C WITH CARON
0x00ca: 0x0179, # LATIN CAPITAL LETTER Z WITH ACUTE
0x00cb: 0x0116, # LATIN CAPITAL LETTER E WITH DOT ABOVE
0x00cc: 0x0122, # LATIN CAPITAL LETTER G WITH CEDILLA
0x00cd: 0x0136, # LATIN CAPITAL LETTER K WITH CEDILLA
0x00ce: 0x012a, # LATIN CAPITAL LETTER I WITH MACRON
0x00cf: 0x013b, # LATIN CAPITAL LETTER L WITH CEDILLA
0x00d0: 0x0160, # LATIN CAPITAL LETTER S WITH CARON
0x00d1: 0x0143, # LATIN CAPITAL LETTER N WITH ACUTE
0x00d2: 0x0145, # LATIN CAPITAL LETTER N WITH CEDILLA
0x00d4: 0x014c, # LATIN CAPITAL LETTER O WITH MACRON
0x00d8: 0x0172, # LATIN CAPITAL LETTER U WITH OGONEK
0x00d9: 0x0141, # LATIN CAPITAL LETTER L WITH STROKE
0x00da: 0x015a, # LATIN CAPITAL LETTER S WITH ACUTE
0x00db: 0x016a, # LATIN CAPITAL LETTER U WITH MACRON
0x00dd: 0x017b, # LATIN CAPITAL LETTER Z WITH DOT ABOVE
0x00de: 0x017d, # LATIN CAPITAL LETTER Z WITH CARON
0x00e0: 0x0105, # LATIN SMALL LETTER A WITH OGONEK
0x00e1: 0x012f, # LATIN SMALL LETTER I WITH OGONEK
0x00e2: 0x0101, # LATIN SMALL LETTER A WITH MACRON
0x00e3: 0x0107, # LATIN SMALL LETTER C WITH ACUTE
0x00e6: 0x0119, # LATIN SMALL LETTER E WITH OGONEK
0x00e7: 0x0113, # LATIN SMALL LETTER E WITH MACRON
0x00e8: 0x010d, # LATIN SMALL LETTER C WITH CARON
0x00ea: 0x017a, # LATIN SMALL LETTER Z WITH ACUTE
0x00eb: 0x0117, # LATIN SMALL LETTER E WITH DOT ABOVE
0x00ec: 0x0123, # LATIN SMALL LETTER G WITH CEDILLA
0x00ed: 0x0137, # LATIN SMALL LETTER K WITH CEDILLA
0x00ee: 0x012b, # LATIN SMALL LETTER I WITH MACRON
0x00ef: 0x013c, # LATIN SMALL LETTER L WITH CEDILLA
0x00f0: 0x0161, # LATIN SMALL LETTER S WITH CARON
0x00f1: 0x0144, # LATIN SMALL LETTER N WITH ACUTE
0x00f2: 0x0146, # LATIN SMALL LETTER N WITH CEDILLA
0x00f4: 0x014d, # LATIN SMALL LETTER O WITH MACRON
0x00f8: 0x0173, # LATIN SMALL LETTER U WITH OGONEK
0x00f9: 0x0142, # LATIN SMALL LETTER L WITH STROKE
0x00fa: 0x015b, # LATIN SMALL LETTER S WITH ACUTE
0x00fb: 0x016b, # LATIN SMALL LETTER U WITH MACRON
0x00fd: 0x017c, # LATIN SMALL LETTER Z WITH DOT ABOVE
0x00fe: 0x017e, # LATIN SMALL LETTER Z WITH CARON
0x00ff: 0x2019, # RIGHT SINGLE QUOTATION MARK
})
### Encoding Map
encoding_map = {}
for k,v in decoding_map.items():
encoding_map[v] = k

+ 0
- 77
lib/jython/Lib/encodings/iso8859_14.py View File

@@ -1,77 +0,0 @@
""" Python Character Mapping Codec generated from '8859-14.TXT' with gencodec.py.
Written by Marc-Andre Lemburg (mal@lemburg.com).
(c) Copyright CNRI, All Rights Reserved. NO WARRANTY.
(c) Copyright 2000 Guido van Rossum.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_map)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_map)
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return (Codec().encode,Codec().decode,StreamReader,StreamWriter)
### Decoding Map
decoding_map = codecs.make_identity_dict(range(256))
decoding_map.update({
0x00a1: 0x1e02, # LATIN CAPITAL LETTER B WITH DOT ABOVE
0x00a2: 0x1e03, # LATIN SMALL LETTER B WITH DOT ABOVE
0x00a4: 0x010a, # LATIN CAPITAL LETTER C WITH DOT ABOVE
0x00a5: 0x010b, # LATIN SMALL LETTER C WITH DOT ABOVE
0x00a6: 0x1e0a, # LATIN CAPITAL LETTER D WITH DOT ABOVE
0x00a8: 0x1e80, # LATIN CAPITAL LETTER W WITH GRAVE
0x00aa: 0x1e82, # LATIN CAPITAL LETTER W WITH ACUTE
0x00ab: 0x1e0b, # LATIN SMALL LETTER D WITH DOT ABOVE
0x00ac: 0x1ef2, # LATIN CAPITAL LETTER Y WITH GRAVE
0x00af: 0x0178, # LATIN CAPITAL LETTER Y WITH DIAERESIS
0x00b0: 0x1e1e, # LATIN CAPITAL LETTER F WITH DOT ABOVE
0x00b1: 0x1e1f, # LATIN SMALL LETTER F WITH DOT ABOVE
0x00b2: 0x0120, # LATIN CAPITAL LETTER G WITH DOT ABOVE
0x00b3: 0x0121, # LATIN SMALL LETTER G WITH DOT ABOVE
0x00b4: 0x1e40, # LATIN CAPITAL LETTER M WITH DOT ABOVE
0x00b5: 0x1e41, # LATIN SMALL LETTER M WITH DOT ABOVE
0x00b7: 0x1e56, # LATIN CAPITAL LETTER P WITH DOT ABOVE
0x00b8: 0x1e81, # LATIN SMALL LETTER W WITH GRAVE
0x00b9: 0x1e57, # LATIN SMALL LETTER P WITH DOT ABOVE
0x00ba: 0x1e83, # LATIN SMALL LETTER W WITH ACUTE
0x00bb: 0x1e60, # LATIN CAPITAL LETTER S WITH DOT ABOVE
0x00bc: 0x1ef3, # LATIN SMALL LETTER Y WITH GRAVE
0x00bd: 0x1e84, # LATIN CAPITAL LETTER W WITH DIAERESIS
0x00be: 0x1e85, # LATIN SMALL LETTER W WITH DIAERESIS
0x00bf: 0x1e61, # LATIN SMALL LETTER S WITH DOT ABOVE
0x00d0: 0x0174, # LATIN CAPITAL LETTER W WITH CIRCUMFLEX
0x00d7: 0x1e6a, # LATIN CAPITAL LETTER T WITH DOT ABOVE
0x00de: 0x0176, # LATIN CAPITAL LETTER Y WITH CIRCUMFLEX
0x00f0: 0x0175, # LATIN SMALL LETTER W WITH CIRCUMFLEX
0x00f7: 0x1e6b, # LATIN SMALL LETTER T WITH DOT ABOVE
0x00fe: 0x0177, # LATIN SMALL LETTER Y WITH CIRCUMFLEX
})
### Encoding Map
encoding_map = {}
for k,v in decoding_map.items():
encoding_map[v] = k

+ 0
- 54
lib/jython/Lib/encodings/iso8859_15.py View File

@@ -1,54 +0,0 @@
""" Python Character Mapping Codec generated from '8859-15.TXT' with gencodec.py.
Written by Marc-Andre Lemburg (mal@lemburg.com).
(c) Copyright CNRI, All Rights Reserved. NO WARRANTY.
(c) Copyright 2000 Guido van Rossum.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_map)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_map)
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return (Codec().encode,Codec().decode,StreamReader,StreamWriter)
### Decoding Map
decoding_map = codecs.make_identity_dict(range(256))
decoding_map.update({
0x00a4: 0x20ac, # EURO SIGN
0x00a6: 0x0160, # LATIN CAPITAL LETTER S WITH CARON
0x00a8: 0x0161, # LATIN SMALL LETTER S WITH CARON
0x00b4: 0x017d, # LATIN CAPITAL LETTER Z WITH CARON
0x00b8: 0x017e, # LATIN SMALL LETTER Z WITH CARON
0x00bc: 0x0152, # LATIN CAPITAL LIGATURE OE
0x00bd: 0x0153, # LATIN SMALL LIGATURE OE
0x00be: 0x0178, # LATIN CAPITAL LETTER Y WITH DIAERESIS
})
### Encoding Map
encoding_map = {}
for k,v in decoding_map.items():
encoding_map[v] = k

+ 0
- 103
lib/jython/Lib/encodings/iso8859_2.py View File

@@ -1,103 +0,0 @@
""" Python Character Mapping Codec generated from '8859-2.TXT' with gencodec.py.
Written by Marc-Andre Lemburg (mal@lemburg.com).
(c) Copyright CNRI, All Rights Reserved. NO WARRANTY.
(c) Copyright 2000 Guido van Rossum.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_map)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_map)
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return (Codec().encode,Codec().decode,StreamReader,StreamWriter)
### Decoding Map
decoding_map = codecs.make_identity_dict(range(256))
decoding_map.update({
0x00a1: 0x0104, # LATIN CAPITAL LETTER A WITH OGONEK
0x00a2: 0x02d8, # BREVE
0x00a3: 0x0141, # LATIN CAPITAL LETTER L WITH STROKE
0x00a5: 0x013d, # LATIN CAPITAL LETTER L WITH CARON
0x00a6: 0x015a, # LATIN CAPITAL LETTER S WITH ACUTE
0x00a9: 0x0160, # LATIN CAPITAL LETTER S WITH CARON
0x00aa: 0x015e, # LATIN CAPITAL LETTER S WITH CEDILLA
0x00ab: 0x0164, # LATIN CAPITAL LETTER T WITH CARON
0x00ac: 0x0179, # LATIN CAPITAL LETTER Z WITH ACUTE
0x00ae: 0x017d, # LATIN CAPITAL LETTER Z WITH CARON
0x00af: 0x017b, # LATIN CAPITAL LETTER Z WITH DOT ABOVE
0x00b1: 0x0105, # LATIN SMALL LETTER A WITH OGONEK
0x00b2: 0x02db, # OGONEK
0x00b3: 0x0142, # LATIN SMALL LETTER L WITH STROKE
0x00b5: 0x013e, # LATIN SMALL LETTER L WITH CARON
0x00b6: 0x015b, # LATIN SMALL LETTER S WITH ACUTE
0x00b7: 0x02c7, # CARON
0x00b9: 0x0161, # LATIN SMALL LETTER S WITH CARON
0x00ba: 0x015f, # LATIN SMALL LETTER S WITH CEDILLA
0x00bb: 0x0165, # LATIN SMALL LETTER T WITH CARON
0x00bc: 0x017a, # LATIN SMALL LETTER Z WITH ACUTE
0x00bd: 0x02dd, # DOUBLE ACUTE ACCENT
0x00be: 0x017e, # LATIN SMALL LETTER Z WITH CARON
0x00bf: 0x017c, # LATIN SMALL LETTER Z WITH DOT ABOVE
0x00c0: 0x0154, # LATIN CAPITAL LETTER R WITH ACUTE
0x00c3: 0x0102, # LATIN CAPITAL LETTER A WITH BREVE
0x00c5: 0x0139, # LATIN CAPITAL LETTER L WITH ACUTE
0x00c6: 0x0106, # LATIN CAPITAL LETTER C WITH ACUTE
0x00c8: 0x010c, # LATIN CAPITAL LETTER C WITH CARON
0x00ca: 0x0118, # LATIN CAPITAL LETTER E WITH OGONEK
0x00cc: 0x011a, # LATIN CAPITAL LETTER E WITH CARON
0x00cf: 0x010e, # LATIN CAPITAL LETTER D WITH CARON
0x00d0: 0x0110, # LATIN CAPITAL LETTER D WITH STROKE
0x00d1: 0x0143, # LATIN CAPITAL LETTER N WITH ACUTE
0x00d2: 0x0147, # LATIN CAPITAL LETTER N WITH CARON
0x00d5: 0x0150, # LATIN CAPITAL LETTER O WITH DOUBLE ACUTE
0x00d8: 0x0158, # LATIN CAPITAL LETTER R WITH CARON
0x00d9: 0x016e, # LATIN CAPITAL LETTER U WITH RING ABOVE
0x00db: 0x0170, # LATIN CAPITAL LETTER U WITH DOUBLE ACUTE
0x00de: 0x0162, # LATIN CAPITAL LETTER T WITH CEDILLA
0x00e0: 0x0155, # LATIN SMALL LETTER R WITH ACUTE
0x00e3: 0x0103, # LATIN SMALL LETTER A WITH BREVE
0x00e5: 0x013a, # LATIN SMALL LETTER L WITH ACUTE
0x00e6: 0x0107, # LATIN SMALL LETTER C WITH ACUTE
0x00e8: 0x010d, # LATIN SMALL LETTER C WITH CARON
0x00ea: 0x0119, # LATIN SMALL LETTER E WITH OGONEK
0x00ec: 0x011b, # LATIN SMALL LETTER E WITH CARON
0x00ef: 0x010f, # LATIN SMALL LETTER D WITH CARON
0x00f0: 0x0111, # LATIN SMALL LETTER D WITH STROKE
0x00f1: 0x0144, # LATIN SMALL LETTER N WITH ACUTE
0x00f2: 0x0148, # LATIN SMALL LETTER N WITH CARON
0x00f5: 0x0151, # LATIN SMALL LETTER O WITH DOUBLE ACUTE
0x00f8: 0x0159, # LATIN SMALL LETTER R WITH CARON
0x00f9: 0x016f, # LATIN SMALL LETTER U WITH RING ABOVE
0x00fb: 0x0171, # LATIN SMALL LETTER U WITH DOUBLE ACUTE
0x00fe: 0x0163, # LATIN SMALL LETTER T WITH CEDILLA
0x00ff: 0x02d9, # DOT ABOVE
})
### Encoding Map
encoding_map = {}
for k,v in decoding_map.items():
encoding_map[v] = k

+ 0
- 81
lib/jython/Lib/encodings/iso8859_3.py View File

@@ -1,81 +0,0 @@
""" Python Character Mapping Codec generated from '8859-3.TXT' with gencodec.py.
Written by Marc-Andre Lemburg (mal@lemburg.com).
(c) Copyright CNRI, All Rights Reserved. NO WARRANTY.
(c) Copyright 2000 Guido van Rossum.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_map)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_map)
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return (Codec().encode,Codec().decode,StreamReader,StreamWriter)
### Decoding Map
decoding_map = codecs.make_identity_dict(range(256))
decoding_map.update({
0x00a1: 0x0126, # LATIN CAPITAL LETTER H WITH STROKE
0x00a2: 0x02d8, # BREVE
0x00a5: None,
0x00a6: 0x0124, # LATIN CAPITAL LETTER H WITH CIRCUMFLEX
0x00a9: 0x0130, # LATIN CAPITAL LETTER I WITH DOT ABOVE
0x00aa: 0x015e, # LATIN CAPITAL LETTER S WITH CEDILLA
0x00ab: 0x011e, # LATIN CAPITAL LETTER G WITH BREVE
0x00ac: 0x0134, # LATIN CAPITAL LETTER J WITH CIRCUMFLEX
0x00ae: None,
0x00af: 0x017b, # LATIN CAPITAL LETTER Z WITH DOT ABOVE
0x00b1: 0x0127, # LATIN SMALL LETTER H WITH STROKE
0x00b6: 0x0125, # LATIN SMALL LETTER H WITH CIRCUMFLEX
0x00b9: 0x0131, # LATIN SMALL LETTER DOTLESS I
0x00ba: 0x015f, # LATIN SMALL LETTER S WITH CEDILLA
0x00bb: 0x011f, # LATIN SMALL LETTER G WITH BREVE
0x00bc: 0x0135, # LATIN SMALL LETTER J WITH CIRCUMFLEX
0x00be: None,
0x00bf: 0x017c, # LATIN SMALL LETTER Z WITH DOT ABOVE
0x00c3: None,
0x00c5: 0x010a, # LATIN CAPITAL LETTER C WITH DOT ABOVE
0x00c6: 0x0108, # LATIN CAPITAL LETTER C WITH CIRCUMFLEX
0x00d0: None,
0x00d5: 0x0120, # LATIN CAPITAL LETTER G WITH DOT ABOVE
0x00d8: 0x011c, # LATIN CAPITAL LETTER G WITH CIRCUMFLEX
0x00dd: 0x016c, # LATIN CAPITAL LETTER U WITH BREVE
0x00de: 0x015c, # LATIN CAPITAL LETTER S WITH CIRCUMFLEX
0x00e3: None,
0x00e5: 0x010b, # LATIN SMALL LETTER C WITH DOT ABOVE
0x00e6: 0x0109, # LATIN SMALL LETTER C WITH CIRCUMFLEX
0x00f0: None,
0x00f5: 0x0121, # LATIN SMALL LETTER G WITH DOT ABOVE
0x00f8: 0x011d, # LATIN SMALL LETTER G WITH CIRCUMFLEX
0x00fd: 0x016d, # LATIN SMALL LETTER U WITH BREVE
0x00fe: 0x015d, # LATIN SMALL LETTER S WITH CIRCUMFLEX
0x00ff: 0x02d9, # DOT ABOVE
})
### Encoding Map
encoding_map = {}
for k,v in decoding_map.items():
encoding_map[v] = k

+ 0
- 96
lib/jython/Lib/encodings/iso8859_4.py View File

@@ -1,96 +0,0 @@
""" Python Character Mapping Codec generated from '8859-4.TXT' with gencodec.py.
Written by Marc-Andre Lemburg (mal@lemburg.com).
(c) Copyright CNRI, All Rights Reserved. NO WARRANTY.
(c) Copyright 2000 Guido van Rossum.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_map)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_map)
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return (Codec().encode,Codec().decode,StreamReader,StreamWriter)
### Decoding Map
decoding_map = codecs.make_identity_dict(range(256))
decoding_map.update({
0x00a1: 0x0104, # LATIN CAPITAL LETTER A WITH OGONEK
0x00a2: 0x0138, # LATIN SMALL LETTER KRA
0x00a3: 0x0156, # LATIN CAPITAL LETTER R WITH CEDILLA
0x00a5: 0x0128, # LATIN CAPITAL LETTER I WITH TILDE
0x00a6: 0x013b, # LATIN CAPITAL LETTER L WITH CEDILLA
0x00a9: 0x0160, # LATIN CAPITAL LETTER S WITH CARON
0x00aa: 0x0112, # LATIN CAPITAL LETTER E WITH MACRON
0x00ab: 0x0122, # LATIN CAPITAL LETTER G WITH CEDILLA
0x00ac: 0x0166, # LATIN CAPITAL LETTER T WITH STROKE
0x00ae: 0x017d, # LATIN CAPITAL LETTER Z WITH CARON
0x00b1: 0x0105, # LATIN SMALL LETTER A WITH OGONEK
0x00b2: 0x02db, # OGONEK
0x00b3: 0x0157, # LATIN SMALL LETTER R WITH CEDILLA
0x00b5: 0x0129, # LATIN SMALL LETTER I WITH TILDE
0x00b6: 0x013c, # LATIN SMALL LETTER L WITH CEDILLA
0x00b7: 0x02c7, # CARON
0x00b9: 0x0161, # LATIN SMALL LETTER S WITH CARON
0x00ba: 0x0113, # LATIN SMALL LETTER E WITH MACRON
0x00bb: 0x0123, # LATIN SMALL LETTER G WITH CEDILLA
0x00bc: 0x0167, # LATIN SMALL LETTER T WITH STROKE
0x00bd: 0x014a, # LATIN CAPITAL LETTER ENG
0x00be: 0x017e, # LATIN SMALL LETTER Z WITH CARON
0x00bf: 0x014b, # LATIN SMALL LETTER ENG
0x00c0: 0x0100, # LATIN CAPITAL LETTER A WITH MACRON
0x00c7: 0x012e, # LATIN CAPITAL LETTER I WITH OGONEK
0x00c8: 0x010c, # LATIN CAPITAL LETTER C WITH CARON
0x00ca: 0x0118, # LATIN CAPITAL LETTER E WITH OGONEK
0x00cc: 0x0116, # LATIN CAPITAL LETTER E WITH DOT ABOVE
0x00cf: 0x012a, # LATIN CAPITAL LETTER I WITH MACRON
0x00d0: 0x0110, # LATIN CAPITAL LETTER D WITH STROKE
0x00d1: 0x0145, # LATIN CAPITAL LETTER N WITH CEDILLA
0x00d2: 0x014c, # LATIN CAPITAL LETTER O WITH MACRON
0x00d3: 0x0136, # LATIN CAPITAL LETTER K WITH CEDILLA
0x00d9: 0x0172, # LATIN CAPITAL LETTER U WITH OGONEK
0x00dd: 0x0168, # LATIN CAPITAL LETTER U WITH TILDE
0x00de: 0x016a, # LATIN CAPITAL LETTER U WITH MACRON
0x00e0: 0x0101, # LATIN SMALL LETTER A WITH MACRON
0x00e7: 0x012f, # LATIN SMALL LETTER I WITH OGONEK
0x00e8: 0x010d, # LATIN SMALL LETTER C WITH CARON
0x00ea: 0x0119, # LATIN SMALL LETTER E WITH OGONEK
0x00ec: 0x0117, # LATIN SMALL LETTER E WITH DOT ABOVE
0x00ef: 0x012b, # LATIN SMALL LETTER I WITH MACRON
0x00f0: 0x0111, # LATIN SMALL LETTER D WITH STROKE
0x00f1: 0x0146, # LATIN SMALL LETTER N WITH CEDILLA
0x00f2: 0x014d, # LATIN SMALL LETTER O WITH MACRON
0x00f3: 0x0137, # LATIN SMALL LETTER K WITH CEDILLA
0x00f9: 0x0173, # LATIN SMALL LETTER U WITH OGONEK
0x00fd: 0x0169, # LATIN SMALL LETTER U WITH TILDE
0x00fe: 0x016b, # LATIN SMALL LETTER U WITH MACRON
0x00ff: 0x02d9, # DOT ABOVE
})
### Encoding Map
encoding_map = {}
for k,v in decoding_map.items():
encoding_map[v] = k

+ 0
- 140
lib/jython/Lib/encodings/iso8859_5.py View File

@@ -1,140 +0,0 @@
""" Python Character Mapping Codec generated from '8859-5.TXT' with gencodec.py.
Written by Marc-Andre Lemburg (mal@lemburg.com).
(c) Copyright CNRI, All Rights Reserved. NO WARRANTY.
(c) Copyright 2000 Guido van Rossum.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_map)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_map)
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return (Codec().encode,Codec().decode,StreamReader,StreamWriter)
### Decoding Map
decoding_map = codecs.make_identity_dict(range(256))
decoding_map.update({
0x00a1: 0x0401, # CYRILLIC CAPITAL LETTER IO
0x00a2: 0x0402, # CYRILLIC CAPITAL LETTER DJE
0x00a3: 0x0403, # CYRILLIC CAPITAL LETTER GJE
0x00a4: 0x0404, # CYRILLIC CAPITAL LETTER UKRAINIAN IE
0x00a5: 0x0405, # CYRILLIC CAPITAL LETTER DZE
0x00a6: 0x0406, # CYRILLIC CAPITAL LETTER BYELORUSSIAN-UKRAINIAN I
0x00a7: 0x0407, # CYRILLIC CAPITAL LETTER YI
0x00a8: 0x0408, # CYRILLIC CAPITAL LETTER JE
0x00a9: 0x0409, # CYRILLIC CAPITAL LETTER LJE
0x00aa: 0x040a, # CYRILLIC CAPITAL LETTER NJE
0x00ab: 0x040b, # CYRILLIC CAPITAL LETTER TSHE
0x00ac: 0x040c, # CYRILLIC CAPITAL LETTER KJE
0x00ae: 0x040e, # CYRILLIC CAPITAL LETTER SHORT U
0x00af: 0x040f, # CYRILLIC CAPITAL LETTER DZHE
0x00b0: 0x0410, # CYRILLIC CAPITAL LETTER A
0x00b1: 0x0411, # CYRILLIC CAPITAL LETTER BE
0x00b2: 0x0412, # CYRILLIC CAPITAL LETTER VE
0x00b3: 0x0413, # CYRILLIC CAPITAL LETTER GHE
0x00b4: 0x0414, # CYRILLIC CAPITAL LETTER DE
0x00b5: 0x0415, # CYRILLIC CAPITAL LETTER IE
0x00b6: 0x0416, # CYRILLIC CAPITAL LETTER ZHE
0x00b7: 0x0417, # CYRILLIC CAPITAL LETTER ZE
0x00b8: 0x0418, # CYRILLIC CAPITAL LETTER I
0x00b9: 0x0419, # CYRILLIC CAPITAL LETTER SHORT I
0x00ba: 0x041a, # CYRILLIC CAPITAL LETTER KA
0x00bb: 0x041b, # CYRILLIC CAPITAL LETTER EL
0x00bc: 0x041c, # CYRILLIC CAPITAL LETTER EM
0x00bd: 0x041d, # CYRILLIC CAPITAL LETTER EN
0x00be: 0x041e, # CYRILLIC CAPITAL LETTER O
0x00bf: 0x041f, # CYRILLIC CAPITAL LETTER PE
0x00c0: 0x0420, # CYRILLIC CAPITAL LETTER ER
0x00c1: 0x0421, # CYRILLIC CAPITAL LETTER ES
0x00c2: 0x0422, # CYRILLIC CAPITAL LETTER TE
0x00c3: 0x0423, # CYRILLIC CAPITAL LETTER U
0x00c4: 0x0424, # CYRILLIC CAPITAL LETTER EF
0x00c5: 0x0425, # CYRILLIC CAPITAL LETTER HA
0x00c6: 0x0426, # CYRILLIC CAPITAL LETTER TSE
0x00c7: 0x0427, # CYRILLIC CAPITAL LETTER CHE
0x00c8: 0x0428, # CYRILLIC CAPITAL LETTER SHA
0x00c9: 0x0429, # CYRILLIC CAPITAL LETTER SHCHA
0x00ca: 0x042a, # CYRILLIC CAPITAL LETTER HARD SIGN
0x00cb: 0x042b, # CYRILLIC CAPITAL LETTER YERU
0x00cc: 0x042c, # CYRILLIC CAPITAL LETTER SOFT SIGN
0x00cd: 0x042d, # CYRILLIC CAPITAL LETTER E
0x00ce: 0x042e, # CYRILLIC CAPITAL LETTER YU
0x00cf: 0x042f, # CYRILLIC CAPITAL LETTER YA
0x00d0: 0x0430, # CYRILLIC SMALL LETTER A
0x00d1: 0x0431, # CYRILLIC SMALL LETTER BE
0x00d2: 0x0432, # CYRILLIC SMALL LETTER VE
0x00d3: 0x0433, # CYRILLIC SMALL LETTER GHE
0x00d4: 0x0434, # CYRILLIC SMALL LETTER DE
0x00d5: 0x0435, # CYRILLIC SMALL LETTER IE
0x00d6: 0x0436, # CYRILLIC SMALL LETTER ZHE
0x00d7: 0x0437, # CYRILLIC SMALL LETTER ZE
0x00d8: 0x0438, # CYRILLIC SMALL LETTER I
0x00d9: 0x0439, # CYRILLIC SMALL LETTER SHORT I
0x00da: 0x043a, # CYRILLIC SMALL LETTER KA
0x00db: 0x043b, # CYRILLIC SMALL LETTER EL
0x00dc: 0x043c, # CYRILLIC SMALL LETTER EM
0x00dd: 0x043d, # CYRILLIC SMALL LETTER EN
0x00de: 0x043e, # CYRILLIC SMALL LETTER O
0x00df: 0x043f, # CYRILLIC SMALL LETTER PE
0x00e0: 0x0440, # CYRILLIC SMALL LETTER ER
0x00e1: 0x0441, # CYRILLIC SMALL LETTER ES
0x00e2: 0x0442, # CYRILLIC SMALL LETTER TE
0x00e3: 0x0443, # CYRILLIC SMALL LETTER U
0x00e4: 0x0444, # CYRILLIC SMALL LETTER EF
0x00e5: 0x0445, # CYRILLIC SMALL LETTER HA
0x00e6: 0x0446, # CYRILLIC SMALL LETTER TSE
0x00e7: 0x0447, # CYRILLIC SMALL LETTER CHE
0x00e8: 0x0448, # CYRILLIC SMALL LETTER SHA
0x00e9: 0x0449, # CYRILLIC SMALL LETTER SHCHA
0x00ea: 0x044a, # CYRILLIC SMALL LETTER HARD SIGN
0x00eb: 0x044b, # CYRILLIC SMALL LETTER YERU
0x00ec: 0x044c, # CYRILLIC SMALL LETTER SOFT SIGN
0x00ed: 0x044d, # CYRILLIC SMALL LETTER E
0x00ee: 0x044e, # CYRILLIC SMALL LETTER YU
0x00ef: 0x044f, # CYRILLIC SMALL LETTER YA
0x00f0: 0x2116, # NUMERO SIGN
0x00f1: 0x0451, # CYRILLIC SMALL LETTER IO
0x00f2: 0x0452, # CYRILLIC SMALL LETTER DJE
0x00f3: 0x0453, # CYRILLIC SMALL LETTER GJE
0x00f4: 0x0454, # CYRILLIC SMALL LETTER UKRAINIAN IE
0x00f5: 0x0455, # CYRILLIC SMALL LETTER DZE
0x00f6: 0x0456, # CYRILLIC SMALL LETTER BYELORUSSIAN-UKRAINIAN I
0x00f7: 0x0457, # CYRILLIC SMALL LETTER YI
0x00f8: 0x0458, # CYRILLIC SMALL LETTER JE
0x00f9: 0x0459, # CYRILLIC SMALL LETTER LJE
0x00fa: 0x045a, # CYRILLIC SMALL LETTER NJE
0x00fb: 0x045b, # CYRILLIC SMALL LETTER TSHE
0x00fc: 0x045c, # CYRILLIC SMALL LETTER KJE
0x00fd: 0x00a7, # SECTION SIGN
0x00fe: 0x045e, # CYRILLIC SMALL LETTER SHORT U
0x00ff: 0x045f, # CYRILLIC SMALL LETTER DZHE
})
### Encoding Map
encoding_map = {}
for k,v in decoding_map.items():
encoding_map[v] = k

+ 0
- 139
lib/jython/Lib/encodings/iso8859_6.py View File

@@ -1,139 +0,0 @@
""" Python Character Mapping Codec generated from '8859-6.TXT' with gencodec.py.
Written by Marc-Andre Lemburg (mal@lemburg.com).
(c) Copyright CNRI, All Rights Reserved. NO WARRANTY.
(c) Copyright 2000 Guido van Rossum.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_map)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_map)
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return (Codec().encode,Codec().decode,StreamReader,StreamWriter)
### Decoding Map
decoding_map = codecs.make_identity_dict(range(256))
decoding_map.update({
0x00a1: None,
0x00a2: None,
0x00a3: None,
0x00a5: None,
0x00a6: None,
0x00a7: None,
0x00a8: None,
0x00a9: None,
0x00aa: None,
0x00ab: None,
0x00ac: 0x060c, # ARABIC COMMA
0x00ae: None,
0x00af: None,
0x00b0: None,
0x00b1: None,
0x00b2: None,
0x00b3: None,
0x00b4: None,
0x00b5: None,
0x00b6: None,
0x00b7: None,
0x00b8: None,
0x00b9: None,
0x00ba: None,
0x00bb: 0x061b, # ARABIC SEMICOLON
0x00bc: None,
0x00bd: None,
0x00be: None,
0x00bf: 0x061f, # ARABIC QUESTION MARK
0x00c0: None,
0x00c1: 0x0621, # ARABIC LETTER HAMZA
0x00c2: 0x0622, # ARABIC LETTER ALEF WITH MADDA ABOVE
0x00c3: 0x0623, # ARABIC LETTER ALEF WITH HAMZA ABOVE
0x00c4: 0x0624, # ARABIC LETTER WAW WITH HAMZA ABOVE
0x00c5: 0x0625, # ARABIC LETTER ALEF WITH HAMZA BELOW
0x00c6: 0x0626, # ARABIC LETTER YEH WITH HAMZA ABOVE
0x00c7: 0x0627, # ARABIC LETTER ALEF
0x00c8: 0x0628, # ARABIC LETTER BEH
0x00c9: 0x0629, # ARABIC LETTER TEH MARBUTA
0x00ca: 0x062a, # ARABIC LETTER TEH
0x00cb: 0x062b, # ARABIC LETTER THEH
0x00cc: 0x062c, # ARABIC LETTER JEEM
0x00cd: 0x062d, # ARABIC LETTER HAH
0x00ce: 0x062e, # ARABIC LETTER KHAH
0x00cf: 0x062f, # ARABIC LETTER DAL
0x00d0: 0x0630, # ARABIC LETTER THAL
0x00d1: 0x0631, # ARABIC LETTER REH
0x00d2: 0x0632, # ARABIC LETTER ZAIN
0x00d3: 0x0633, # ARABIC LETTER SEEN
0x00d4: 0x0634, # ARABIC LETTER SHEEN
0x00d5: 0x0635, # ARABIC LETTER SAD
0x00d6: 0x0636, # ARABIC LETTER DAD
0x00d7: 0x0637, # ARABIC LETTER TAH
0x00d8: 0x0638, # ARABIC LETTER ZAH
0x00d9: 0x0639, # ARABIC LETTER AIN
0x00da: 0x063a, # ARABIC LETTER GHAIN
0x00db: None,
0x00dc: None,
0x00dd: None,
0x00de: None,
0x00df: None,
0x00e0: 0x0640, # ARABIC TATWEEL
0x00e1: 0x0641, # ARABIC LETTER FEH
0x00e2: 0x0642, # ARABIC LETTER QAF
0x00e3: 0x0643, # ARABIC LETTER KAF
0x00e4: 0x0644, # ARABIC LETTER LAM
0x00e5: 0x0645, # ARABIC LETTER MEEM
0x00e6: 0x0646, # ARABIC LETTER NOON
0x00e7: 0x0647, # ARABIC LETTER HEH
0x00e8: 0x0648, # ARABIC LETTER WAW
0x00e9: 0x0649, # ARABIC LETTER ALEF MAKSURA
0x00ea: 0x064a, # ARABIC LETTER YEH
0x00eb: 0x064b, # ARABIC FATHATAN
0x00ec: 0x064c, # ARABIC DAMMATAN
0x00ed: 0x064d, # ARABIC KASRATAN
0x00ee: 0x064e, # ARABIC FATHA
0x00ef: 0x064f, # ARABIC DAMMA
0x00f0: 0x0650, # ARABIC KASRA
0x00f1: 0x0651, # ARABIC SHADDA
0x00f2: 0x0652, # ARABIC SUKUN
0x00f3: None,
0x00f4: None,
0x00f5: None,
0x00f6: None,
0x00f7: None,
0x00f8: None,
0x00f9: None,
0x00fa: None,
0x00fb: None,
0x00fc: None,
0x00fd: None,
0x00fe: None,
0x00ff: None,
})
### Encoding Map
encoding_map = {}
for k,v in decoding_map.items():
encoding_map[v] = k

+ 0
- 126
lib/jython/Lib/encodings/iso8859_7.py View File

@@ -1,126 +0,0 @@
""" Python Character Mapping Codec generated from '8859-7.TXT' with gencodec.py.
Written by Marc-Andre Lemburg (mal@lemburg.com).
(c) Copyright CNRI, All Rights Reserved. NO WARRANTY.
(c) Copyright 2000 Guido van Rossum.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_map)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_map)
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return (Codec().encode,Codec().decode,StreamReader,StreamWriter)
### Decoding Map
decoding_map = codecs.make_identity_dict(range(256))
decoding_map.update({
0x00a1: 0x2018, # LEFT SINGLE QUOTATION MARK
0x00a2: 0x2019, # RIGHT SINGLE QUOTATION MARK
0x00a4: None,
0x00a5: None,
0x00aa: None,
0x00ae: None,
0x00af: 0x2015, # HORIZONTAL BAR
0x00b4: 0x0384, # GREEK TONOS
0x00b5: 0x0385, # GREEK DIALYTIKA TONOS
0x00b6: 0x0386, # GREEK CAPITAL LETTER ALPHA WITH TONOS
0x00b8: 0x0388, # GREEK CAPITAL LETTER EPSILON WITH TONOS
0x00b9: 0x0389, # GREEK CAPITAL LETTER ETA WITH TONOS
0x00ba: 0x038a, # GREEK CAPITAL LETTER IOTA WITH TONOS
0x00bc: 0x038c, # GREEK CAPITAL LETTER OMICRON WITH TONOS
0x00be: 0x038e, # GREEK CAPITAL LETTER UPSILON WITH TONOS
0x00bf: 0x038f, # GREEK CAPITAL LETTER OMEGA WITH TONOS
0x00c0: 0x0390, # GREEK SMALL LETTER IOTA WITH DIALYTIKA AND TONOS
0x00c1: 0x0391, # GREEK CAPITAL LETTER ALPHA
0x00c2: 0x0392, # GREEK CAPITAL LETTER BETA
0x00c3: 0x0393, # GREEK CAPITAL LETTER GAMMA
0x00c4: 0x0394, # GREEK CAPITAL LETTER DELTA
0x00c5: 0x0395, # GREEK CAPITAL LETTER EPSILON
0x00c6: 0x0396, # GREEK CAPITAL LETTER ZETA
0x00c7: 0x0397, # GREEK CAPITAL LETTER ETA
0x00c8: 0x0398, # GREEK CAPITAL LETTER THETA
0x00c9: 0x0399, # GREEK CAPITAL LETTER IOTA
0x00ca: 0x039a, # GREEK CAPITAL LETTER KAPPA
0x00cb: 0x039b, # GREEK CAPITAL LETTER LAMDA
0x00cc: 0x039c, # GREEK CAPITAL LETTER MU
0x00cd: 0x039d, # GREEK CAPITAL LETTER NU
0x00ce: 0x039e, # GREEK CAPITAL LETTER XI
0x00cf: 0x039f, # GREEK CAPITAL LETTER OMICRON
0x00d0: 0x03a0, # GREEK CAPITAL LETTER PI
0x00d1: 0x03a1, # GREEK CAPITAL LETTER RHO
0x00d2: None,
0x00d3: 0x03a3, # GREEK CAPITAL LETTER SIGMA
0x00d4: 0x03a4, # GREEK CAPITAL LETTER TAU
0x00d5: 0x03a5, # GREEK CAPITAL LETTER UPSILON
0x00d6: 0x03a6, # GREEK CAPITAL LETTER PHI
0x00d7: 0x03a7, # GREEK CAPITAL LETTER CHI
0x00d8: 0x03a8, # GREEK CAPITAL LETTER PSI
0x00d9: 0x03a9, # GREEK CAPITAL LETTER OMEGA
0x00da: 0x03aa, # GREEK CAPITAL LETTER IOTA WITH DIALYTIKA
0x00db: 0x03ab, # GREEK CAPITAL LETTER UPSILON WITH DIALYTIKA
0x00dc: 0x03ac, # GREEK SMALL LETTER ALPHA WITH TONOS
0x00dd: 0x03ad, # GREEK SMALL LETTER EPSILON WITH TONOS
0x00de: 0x03ae, # GREEK SMALL LETTER ETA WITH TONOS
0x00df: 0x03af, # GREEK SMALL LETTER IOTA WITH TONOS
0x00e0: 0x03b0, # GREEK SMALL LETTER UPSILON WITH DIALYTIKA AND TONOS
0x00e1: 0x03b1, # GREEK SMALL LETTER ALPHA
0x00e2: 0x03b2, # GREEK SMALL LETTER BETA
0x00e3: 0x03b3, # GREEK SMALL LETTER GAMMA
0x00e4: 0x03b4, # GREEK SMALL LETTER DELTA
0x00e5: 0x03b5, # GREEK SMALL LETTER EPSILON
0x00e6: 0x03b6, # GREEK SMALL LETTER ZETA
0x00e7: 0x03b7, # GREEK SMALL LETTER ETA
0x00e8: 0x03b8, # GREEK SMALL LETTER THETA
0x00e9: 0x03b9, # GREEK SMALL LETTER IOTA
0x00ea: 0x03ba, # GREEK SMALL LETTER KAPPA
0x00eb: 0x03bb, # GREEK SMALL LETTER LAMDA
0x00ec: 0x03bc, # GREEK SMALL LETTER MU
0x00ed: 0x03bd, # GREEK SMALL LETTER NU
0x00ee: 0x03be, # GREEK SMALL LETTER XI
0x00ef: 0x03bf, # GREEK SMALL LETTER OMICRON
0x00f0: 0x03c0, # GREEK SMALL LETTER PI
0x00f1: 0x03c1, # GREEK SMALL LETTER RHO
0x00f2: 0x03c2, # GREEK SMALL LETTER FINAL SIGMA
0x00f3: 0x03c3, # GREEK SMALL LETTER SIGMA
0x00f4: 0x03c4, # GREEK SMALL LETTER TAU
0x00f5: 0x03c5, # GREEK SMALL LETTER UPSILON
0x00f6: 0x03c6, # GREEK SMALL LETTER PHI
0x00f7: 0x03c7, # GREEK SMALL LETTER CHI
0x00f8: 0x03c8, # GREEK SMALL LETTER PSI
0x00f9: 0x03c9, # GREEK SMALL LETTER OMEGA
0x00fa: 0x03ca, # GREEK SMALL LETTER IOTA WITH DIALYTIKA
0x00fb: 0x03cb, # GREEK SMALL LETTER UPSILON WITH DIALYTIKA
0x00fc: 0x03cc, # GREEK SMALL LETTER OMICRON WITH TONOS
0x00fd: 0x03cd, # GREEK SMALL LETTER UPSILON WITH TONOS
0x00fe: 0x03ce, # GREEK SMALL LETTER OMEGA WITH TONOS
0x00ff: None,
})
### Encoding Map
encoding_map = {}
for k,v in decoding_map.items():
encoding_map[v] = k

+ 0
- 114
lib/jython/Lib/encodings/iso8859_8.py View File

@@ -1,114 +0,0 @@
""" Python Character Mapping Codec generated from '8859-8.TXT' with gencodec.py.
Written by Marc-Andre Lemburg (mal@lemburg.com).
(c) Copyright CNRI, All Rights Reserved. NO WARRANTY.
(c) Copyright 2000 Guido van Rossum.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_map)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_map)
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return (Codec().encode,Codec().decode,StreamReader,StreamWriter)
### Decoding Map
decoding_map = codecs.make_identity_dict(range(256))
decoding_map.update({
0x00a1: None,
0x00aa: 0x00d7, # MULTIPLICATION SIGN
0x00ba: 0x00f7, # DIVISION SIGN
0x00bf: None,
0x00c0: None,
0x00c1: None,
0x00c2: None,
0x00c3: None,
0x00c4: None,
0x00c5: None,
0x00c6: None,
0x00c7: None,
0x00c8: None,
0x00c9: None,
0x00ca: None,
0x00cb: None,
0x00cc: None,
0x00cd: None,
0x00ce: None,
0x00cf: None,
0x00d0: None,
0x00d1: None,
0x00d2: None,
0x00d3: None,
0x00d4: None,
0x00d5: None,
0x00d6: None,
0x00d7: None,
0x00d8: None,
0x00d9: None,
0x00da: None,
0x00db: None,
0x00dc: None,
0x00dd: None,
0x00de: None,
0x00df: 0x2017, # DOUBLE LOW LINE
0x00e0: 0x05d0, # HEBREW LETTER ALEF
0x00e1: 0x05d1, # HEBREW LETTER BET
0x00e2: 0x05d2, # HEBREW LETTER GIMEL
0x00e3: 0x05d3, # HEBREW LETTER DALET
0x00e4: 0x05d4, # HEBREW LETTER HE
0x00e5: 0x05d5, # HEBREW LETTER VAV
0x00e6: 0x05d6, # HEBREW LETTER ZAYIN
0x00e7: 0x05d7, # HEBREW LETTER HET
0x00e8: 0x05d8, # HEBREW LETTER TET
0x00e9: 0x05d9, # HEBREW LETTER YOD
0x00ea: 0x05da, # HEBREW LETTER FINAL KAF
0x00eb: 0x05db, # HEBREW LETTER KAF
0x00ec: 0x05dc, # HEBREW LETTER LAMED
0x00ed: 0x05dd, # HEBREW LETTER FINAL MEM
0x00ee: 0x05de, # HEBREW LETTER MEM
0x00ef: 0x05df, # HEBREW LETTER FINAL NUN
0x00f0: 0x05e0, # HEBREW LETTER NUN
0x00f1: 0x05e1, # HEBREW LETTER SAMEKH
0x00f2: 0x05e2, # HEBREW LETTER AYIN
0x00f3: 0x05e3, # HEBREW LETTER FINAL PE
0x00f4: 0x05e4, # HEBREW LETTER PE
0x00f5: 0x05e5, # HEBREW LETTER FINAL TSADI
0x00f6: 0x05e6, # HEBREW LETTER TSADI
0x00f7: 0x05e7, # HEBREW LETTER QOF
0x00f8: 0x05e8, # HEBREW LETTER RESH
0x00f9: 0x05e9, # HEBREW LETTER SHIN
0x00fa: 0x05ea, # HEBREW LETTER TAV
0x00fb: None,
0x00fc: None,
0x00fd: 0x200e, # LEFT-TO-RIGHT MARK
0x00fe: 0x200f, # RIGHT-TO-LEFT MARK
0x00ff: None,
})
### Encoding Map
encoding_map = {}
for k,v in decoding_map.items():
encoding_map[v] = k

+ 0
- 52
lib/jython/Lib/encodings/iso8859_9.py View File

@@ -1,52 +0,0 @@
""" Python Character Mapping Codec generated from '8859-9.TXT' with gencodec.py.
Written by Marc-Andre Lemburg (mal@lemburg.com).
(c) Copyright CNRI, All Rights Reserved. NO WARRANTY.
(c) Copyright 2000 Guido van Rossum.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_map)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_map)
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return (Codec().encode,Codec().decode,StreamReader,StreamWriter)
### Decoding Map
decoding_map = codecs.make_identity_dict(range(256))
decoding_map.update({
0x00d0: 0x011e, # LATIN CAPITAL LETTER G WITH BREVE
0x00dd: 0x0130, # LATIN CAPITAL LETTER I WITH DOT ABOVE
0x00de: 0x015e, # LATIN CAPITAL LETTER S WITH CEDILLA
0x00f0: 0x011f, # LATIN SMALL LETTER G WITH BREVE
0x00fd: 0x0131, # LATIN SMALL LETTER DOTLESS I
0x00fe: 0x015f, # LATIN SMALL LETTER S WITH CEDILLA
})
### Encoding Map
encoding_map = {}
for k,v in decoding_map.items():
encoding_map[v] = k

+ 0
- 174
lib/jython/Lib/encodings/koi8_r.py View File

@@ -1,174 +0,0 @@
""" Python Character Mapping Codec generated from 'KOI8-R.TXT' with gencodec.py.
Written by Marc-Andre Lemburg (mal@lemburg.com).
(c) Copyright CNRI, All Rights Reserved. NO WARRANTY.
(c) Copyright 2000 Guido van Rossum.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_map)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_map)
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return (Codec().encode,Codec().decode,StreamReader,StreamWriter)
### Decoding Map
decoding_map = codecs.make_identity_dict(range(256))
decoding_map.update({
0x0080: 0x2500, # BOX DRAWINGS LIGHT HORIZONTAL
0x0081: 0x2502, # BOX DRAWINGS LIGHT VERTICAL
0x0082: 0x250c, # BOX DRAWINGS LIGHT DOWN AND RIGHT
0x0083: 0x2510, # BOX DRAWINGS LIGHT DOWN AND LEFT
0x0084: 0x2514, # BOX DRAWINGS LIGHT UP AND RIGHT
0x0085: 0x2518, # BOX DRAWINGS LIGHT UP AND LEFT
0x0086: 0x251c, # BOX DRAWINGS LIGHT VERTICAL AND RIGHT
0x0087: 0x2524, # BOX DRAWINGS LIGHT VERTICAL AND LEFT
0x0088: 0x252c, # BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
0x0089: 0x2534, # BOX DRAWINGS LIGHT UP AND HORIZONTAL
0x008a: 0x253c, # BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
0x008b: 0x2580, # UPPER HALF BLOCK
0x008c: 0x2584, # LOWER HALF BLOCK
0x008d: 0x2588, # FULL BLOCK
0x008e: 0x258c, # LEFT HALF BLOCK
0x008f: 0x2590, # RIGHT HALF BLOCK
0x0090: 0x2591, # LIGHT SHADE
0x0091: 0x2592, # MEDIUM SHADE
0x0092: 0x2593, # DARK SHADE
0x0093: 0x2320, # TOP HALF INTEGRAL
0x0094: 0x25a0, # BLACK SQUARE
0x0095: 0x2219, # BULLET OPERATOR
0x0096: 0x221a, # SQUARE ROOT
0x0097: 0x2248, # ALMOST EQUAL TO
0x0098: 0x2264, # LESS-THAN OR EQUAL TO
0x0099: 0x2265, # GREATER-THAN OR EQUAL TO
0x009a: 0x00a0, # NO-BREAK SPACE
0x009b: 0x2321, # BOTTOM HALF INTEGRAL
0x009c: 0x00b0, # DEGREE SIGN
0x009d: 0x00b2, # SUPERSCRIPT TWO
0x009e: 0x00b7, # MIDDLE DOT
0x009f: 0x00f7, # DIVISION SIGN
0x00a0: 0x2550, # BOX DRAWINGS DOUBLE HORIZONTAL
0x00a1: 0x2551, # BOX DRAWINGS DOUBLE VERTICAL
0x00a2: 0x2552, # BOX DRAWINGS DOWN SINGLE AND RIGHT DOUBLE
0x00a3: 0x0451, # CYRILLIC SMALL LETTER IO
0x00a4: 0x2553, # BOX DRAWINGS DOWN DOUBLE AND RIGHT SINGLE
0x00a5: 0x2554, # BOX DRAWINGS DOUBLE DOWN AND RIGHT
0x00a6: 0x2555, # BOX DRAWINGS DOWN SINGLE AND LEFT DOUBLE
0x00a7: 0x2556, # BOX DRAWINGS DOWN DOUBLE AND LEFT SINGLE
0x00a8: 0x2557, # BOX DRAWINGS DOUBLE DOWN AND LEFT
0x00a9: 0x2558, # BOX DRAWINGS UP SINGLE AND RIGHT DOUBLE
0x00aa: 0x2559, # BOX DRAWINGS UP DOUBLE AND RIGHT SINGLE
0x00ab: 0x255a, # BOX DRAWINGS DOUBLE UP AND RIGHT
0x00ac: 0x255b, # BOX DRAWINGS UP SINGLE AND LEFT DOUBLE
0x00ad: 0x255c, # BOX DRAWINGS UP DOUBLE AND LEFT SINGLE
0x00ae: 0x255d, # BOX DRAWINGS DOUBLE UP AND LEFT
0x00af: 0x255e, # BOX DRAWINGS VERTICAL SINGLE AND RIGHT DOUBLE
0x00b0: 0x255f, # BOX DRAWINGS VERTICAL DOUBLE AND RIGHT SINGLE
0x00b1: 0x2560, # BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
0x00b2: 0x2561, # BOX DRAWINGS VERTICAL SINGLE AND LEFT DOUBLE
0x00b3: 0x0401, # CYRILLIC CAPITAL LETTER IO
0x00b4: 0x2562, # BOX DRAWINGS VERTICAL DOUBLE AND LEFT SINGLE
0x00b5: 0x2563, # BOX DRAWINGS DOUBLE VERTICAL AND LEFT
0x00b6: 0x2564, # BOX DRAWINGS DOWN SINGLE AND HORIZONTAL DOUBLE
0x00b7: 0x2565, # BOX DRAWINGS DOWN DOUBLE AND HORIZONTAL SINGLE
0x00b8: 0x2566, # BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
0x00b9: 0x2567, # BOX DRAWINGS UP SINGLE AND HORIZONTAL DOUBLE
0x00ba: 0x2568, # BOX DRAWINGS UP DOUBLE AND HORIZONTAL SINGLE
0x00bb: 0x2569, # BOX DRAWINGS DOUBLE UP AND HORIZONTAL
0x00bc: 0x256a, # BOX DRAWINGS VERTICAL SINGLE AND HORIZONTAL DOUBLE
0x00bd: 0x256b, # BOX DRAWINGS VERTICAL DOUBLE AND HORIZONTAL SINGLE
0x00be: 0x256c, # BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
0x00bf: 0x00a9, # COPYRIGHT SIGN
0x00c0: 0x044e, # CYRILLIC SMALL LETTER YU
0x00c1: 0x0430, # CYRILLIC SMALL LETTER A
0x00c2: 0x0431, # CYRILLIC SMALL LETTER BE
0x00c3: 0x0446, # CYRILLIC SMALL LETTER TSE
0x00c4: 0x0434, # CYRILLIC SMALL LETTER DE
0x00c5: 0x0435, # CYRILLIC SMALL LETTER IE
0x00c6: 0x0444, # CYRILLIC SMALL LETTER EF
0x00c7: 0x0433, # CYRILLIC SMALL LETTER GHE
0x00c8: 0x0445, # CYRILLIC SMALL LETTER HA
0x00c9: 0x0438, # CYRILLIC SMALL LETTER I
0x00ca: 0x0439, # CYRILLIC SMALL LETTER SHORT I
0x00cb: 0x043a, # CYRILLIC SMALL LETTER KA
0x00cc: 0x043b, # CYRILLIC SMALL LETTER EL
0x00cd: 0x043c, # CYRILLIC SMALL LETTER EM
0x00ce: 0x043d, # CYRILLIC SMALL LETTER EN
0x00cf: 0x043e, # CYRILLIC SMALL LETTER O
0x00d0: 0x043f, # CYRILLIC SMALL LETTER PE
0x00d1: 0x044f, # CYRILLIC SMALL LETTER YA
0x00d2: 0x0440, # CYRILLIC SMALL LETTER ER
0x00d3: 0x0441, # CYRILLIC SMALL LETTER ES
0x00d4: 0x0442, # CYRILLIC SMALL LETTER TE
0x00d5: 0x0443, # CYRILLIC SMALL LETTER U
0x00d6: 0x0436, # CYRILLIC SMALL LETTER ZHE
0x00d7: 0x0432, # CYRILLIC SMALL LETTER VE
0x00d8: 0x044c, # CYRILLIC SMALL LETTER SOFT SIGN
0x00d9: 0x044b, # CYRILLIC SMALL LETTER YERU
0x00da: 0x0437, # CYRILLIC SMALL LETTER ZE
0x00db: 0x0448, # CYRILLIC SMALL LETTER SHA
0x00dc: 0x044d, # CYRILLIC SMALL LETTER E
0x00dd: 0x0449, # CYRILLIC SMALL LETTER SHCHA
0x00de: 0x0447, # CYRILLIC SMALL LETTER CHE
0x00df: 0x044a, # CYRILLIC SMALL LETTER HARD SIGN
0x00e0: 0x042e, # CYRILLIC CAPITAL LETTER YU
0x00e1: 0x0410, # CYRILLIC CAPITAL LETTER A
0x00e2: 0x0411, # CYRILLIC CAPITAL LETTER BE
0x00e3: 0x0426, # CYRILLIC CAPITAL LETTER TSE
0x00e4: 0x0414, # CYRILLIC CAPITAL LETTER DE
0x00e5: 0x0415, # CYRILLIC CAPITAL LETTER IE
0x00e6: 0x0424, # CYRILLIC CAPITAL LETTER EF
0x00e7: 0x0413, # CYRILLIC CAPITAL LETTER GHE
0x00e8: 0x0425, # CYRILLIC CAPITAL LETTER HA
0x00e9: 0x0418, # CYRILLIC CAPITAL LETTER I
0x00ea: 0x0419, # CYRILLIC CAPITAL LETTER SHORT I
0x00eb: 0x041a, # CYRILLIC CAPITAL LETTER KA
0x00ec: 0x041b, # CYRILLIC CAPITAL LETTER EL
0x00ed: 0x041c, # CYRILLIC CAPITAL LETTER EM
0x00ee: 0x041d, # CYRILLIC CAPITAL LETTER EN
0x00ef: 0x041e, # CYRILLIC CAPITAL LETTER O
0x00f0: 0x041f, # CYRILLIC CAPITAL LETTER PE
0x00f1: 0x042f, # CYRILLIC CAPITAL LETTER YA
0x00f2: 0x0420, # CYRILLIC CAPITAL LETTER ER
0x00f3: 0x0421, # CYRILLIC CAPITAL LETTER ES
0x00f4: 0x0422, # CYRILLIC CAPITAL LETTER TE
0x00f5: 0x0423, # CYRILLIC CAPITAL LETTER U
0x00f6: 0x0416, # CYRILLIC CAPITAL LETTER ZHE
0x00f7: 0x0412, # CYRILLIC CAPITAL LETTER VE
0x00f8: 0x042c, # CYRILLIC CAPITAL LETTER SOFT SIGN
0x00f9: 0x042b, # CYRILLIC CAPITAL LETTER YERU
0x00fa: 0x0417, # CYRILLIC CAPITAL LETTER ZE
0x00fb: 0x0428, # CYRILLIC CAPITAL LETTER SHA
0x00fc: 0x042d, # CYRILLIC CAPITAL LETTER E
0x00fd: 0x0429, # CYRILLIC CAPITAL LETTER SHCHA
0x00fe: 0x0427, # CYRILLIC CAPITAL LETTER CHE
0x00ff: 0x042a, # CYRILLIC CAPITAL LETTER HARD SIGN
})
### Encoding Map
encoding_map = {}
for k,v in decoding_map.items():
encoding_map[v] = k

+ 0
- 35
lib/jython/Lib/encodings/latin_1.py View File

@@ -1,35 +0,0 @@
""" Python 'latin-1' Codec
Written by Marc-Andre Lemburg (mal@lemburg.com).
(c) Copyright CNRI, All Rights Reserved. NO WARRANTY.
"""
import codecs
### Codec APIs
class Codec(codecs.Codec):
# Note: Binding these as C functions will result in the class not
# converting them to methods. This is intended.
encode = codecs.latin_1_encode
decode = codecs.latin_1_decode
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
class StreamConverter(StreamWriter,StreamReader):
encode = codecs.latin_1_decode
decode = codecs.latin_1_encode
### encodings module API
def getregentry():
return (Codec.encode,Codec.decode,StreamReader,StreamWriter)

+ 0
- 169
lib/jython/Lib/encodings/mac_cyrillic.py View File

@@ -1,169 +0,0 @@
""" Python Character Mapping Codec generated from 'CYRILLIC.TXT' with gencodec.py.
Written by Marc-Andre Lemburg (mal@lemburg.com).
(c) Copyright CNRI, All Rights Reserved. NO WARRANTY.
(c) Copyright 2000 Guido van Rossum.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_map)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_map)
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return (Codec().encode,Codec().decode,StreamReader,StreamWriter)
### Decoding Map
decoding_map = codecs.make_identity_dict(range(256))
decoding_map.update({
0x0080: 0x0410, # CYRILLIC CAPITAL LETTER A
0x0081: 0x0411, # CYRILLIC CAPITAL LETTER BE
0x0082: 0x0412, # CYRILLIC CAPITAL LETTER VE
0x0083: 0x0413, # CYRILLIC CAPITAL LETTER GHE
0x0084: 0x0414, # CYRILLIC CAPITAL LETTER DE
0x0085: 0x0415, # CYRILLIC CAPITAL LETTER IE
0x0086: 0x0416, # CYRILLIC CAPITAL LETTER ZHE
0x0087: 0x0417, # CYRILLIC CAPITAL LETTER ZE
0x0088: 0x0418, # CYRILLIC CAPITAL LETTER I
0x0089: 0x0419, # CYRILLIC CAPITAL LETTER SHORT I
0x008a: 0x041a, # CYRILLIC CAPITAL LETTER KA
0x008b: 0x041b, # CYRILLIC CAPITAL LETTER EL
0x008c: 0x041c, # CYRILLIC CAPITAL LETTER EM
0x008d: 0x041d, # CYRILLIC CAPITAL LETTER EN
0x008e: 0x041e, # CYRILLIC CAPITAL LETTER O
0x008f: 0x041f, # CYRILLIC CAPITAL LETTER PE
0x0090: 0x0420, # CYRILLIC CAPITAL LETTER ER
0x0091: 0x0421, # CYRILLIC CAPITAL LETTER ES
0x0092: 0x0422, # CYRILLIC CAPITAL LETTER TE
0x0093: 0x0423, # CYRILLIC CAPITAL LETTER U
0x0094: 0x0424, # CYRILLIC CAPITAL LETTER EF
0x0095: 0x0425, # CYRILLIC CAPITAL LETTER HA
0x0096: 0x0426, # CYRILLIC CAPITAL LETTER TSE
0x0097: 0x0427, # CYRILLIC CAPITAL LETTER CHE
0x0098: 0x0428, # CYRILLIC CAPITAL LETTER SHA
0x0099: 0x0429, # CYRILLIC CAPITAL LETTER SHCHA
0x009a: 0x042a, # CYRILLIC CAPITAL LETTER HARD SIGN
0x009b: 0x042b, # CYRILLIC CAPITAL LETTER YERU
0x009c: 0x042c, # CYRILLIC CAPITAL LETTER SOFT SIGN
0x009d: 0x042d, # CYRILLIC CAPITAL LETTER E
0x009e: 0x042e, # CYRILLIC CAPITAL LETTER YU
0x009f: 0x042f, # CYRILLIC CAPITAL LETTER YA
0x00a0: 0x2020, # DAGGER
0x00a1: 0x00b0, # DEGREE SIGN
0x00a4: 0x00a7, # SECTION SIGN
0x00a5: 0x2022, # BULLET
0x00a6: 0x00b6, # PILCROW SIGN
0x00a7: 0x0406, # CYRILLIC CAPITAL LETTER BYELORUSSIAN-UKRAINIAN I
0x00a8: 0x00ae, # REGISTERED SIGN
0x00aa: 0x2122, # TRADE MARK SIGN
0x00ab: 0x0402, # CYRILLIC CAPITAL LETTER DJE
0x00ac: 0x0452, # CYRILLIC SMALL LETTER DJE
0x00ad: 0x2260, # NOT EQUAL TO
0x00ae: 0x0403, # CYRILLIC CAPITAL LETTER GJE
0x00af: 0x0453, # CYRILLIC SMALL LETTER GJE
0x00b0: 0x221e, # INFINITY
0x00b2: 0x2264, # LESS-THAN OR EQUAL TO
0x00b3: 0x2265, # GREATER-THAN OR EQUAL TO
0x00b4: 0x0456, # CYRILLIC SMALL LETTER BYELORUSSIAN-UKRAINIAN I
0x00b6: 0x2202, # PARTIAL DIFFERENTIAL
0x00b7: 0x0408, # CYRILLIC CAPITAL LETTER JE
0x00b8: 0x0404, # CYRILLIC CAPITAL LETTER UKRAINIAN IE
0x00b9: 0x0454, # CYRILLIC SMALL LETTER UKRAINIAN IE
0x00ba: 0x0407, # CYRILLIC CAPITAL LETTER YI
0x00bb: 0x0457, # CYRILLIC SMALL LETTER YI
0x00bc: 0x0409, # CYRILLIC CAPITAL LETTER LJE
0x00bd: 0x0459, # CYRILLIC SMALL LETTER LJE
0x00be: 0x040a, # CYRILLIC CAPITAL LETTER NJE
0x00bf: 0x045a, # CYRILLIC SMALL LETTER NJE
0x00c0: 0x0458, # CYRILLIC SMALL LETTER JE
0x00c1: 0x0405, # CYRILLIC CAPITAL LETTER DZE
0x00c2: 0x00ac, # NOT SIGN
0x00c3: 0x221a, # SQUARE ROOT
0x00c4: 0x0192, # LATIN SMALL LETTER F WITH HOOK
0x00c5: 0x2248, # ALMOST EQUAL TO
0x00c6: 0x2206, # INCREMENT
0x00c7: 0x00ab, # LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00c8: 0x00bb, # RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00c9: 0x2026, # HORIZONTAL ELLIPSIS
0x00ca: 0x00a0, # NO-BREAK SPACE
0x00cb: 0x040b, # CYRILLIC CAPITAL LETTER TSHE
0x00cc: 0x045b, # CYRILLIC SMALL LETTER TSHE
0x00cd: 0x040c, # CYRILLIC CAPITAL LETTER KJE
0x00ce: 0x045c, # CYRILLIC SMALL LETTER KJE
0x00cf: 0x0455, # CYRILLIC SMALL LETTER DZE
0x00d0: 0x2013, # EN DASH
0x00d1: 0x2014, # EM DASH
0x00d2: 0x201c, # LEFT DOUBLE QUOTATION MARK
0x00d3: 0x201d, # RIGHT DOUBLE QUOTATION MARK
0x00d4: 0x2018, # LEFT SINGLE QUOTATION MARK
0x00d5: 0x2019, # RIGHT SINGLE QUOTATION MARK
0x00d6: 0x00f7, # DIVISION SIGN
0x00d7: 0x201e, # DOUBLE LOW-9 QUOTATION MARK
0x00d8: 0x040e, # CYRILLIC CAPITAL LETTER SHORT U
0x00d9: 0x045e, # CYRILLIC SMALL LETTER SHORT U
0x00da: 0x040f, # CYRILLIC CAPITAL LETTER DZHE
0x00db: 0x045f, # CYRILLIC SMALL LETTER DZHE
0x00dc: 0x2116, # NUMERO SIGN
0x00dd: 0x0401, # CYRILLIC CAPITAL LETTER IO
0x00de: 0x0451, # CYRILLIC SMALL LETTER IO
0x00df: 0x044f, # CYRILLIC SMALL LETTER YA
0x00e0: 0x0430, # CYRILLIC SMALL LETTER A
0x00e1: 0x0431, # CYRILLIC SMALL LETTER BE
0x00e2: 0x0432, # CYRILLIC SMALL LETTER VE
0x00e3: 0x0433, # CYRILLIC SMALL LETTER GHE
0x00e4: 0x0434, # CYRILLIC SMALL LETTER DE
0x00e5: 0x0435, # CYRILLIC SMALL LETTER IE
0x00e6: 0x0436, # CYRILLIC SMALL LETTER ZHE
0x00e7: 0x0437, # CYRILLIC SMALL LETTER ZE
0x00e8: 0x0438, # CYRILLIC SMALL LETTER I
0x00e9: 0x0439, # CYRILLIC SMALL LETTER SHORT I
0x00ea: 0x043a, # CYRILLIC SMALL LETTER KA
0x00eb: 0x043b, # CYRILLIC SMALL LETTER EL
0x00ec: 0x043c, # CYRILLIC SMALL LETTER EM
0x00ed: 0x043d, # CYRILLIC SMALL LETTER EN
0x00ee: 0x043e, # CYRILLIC SMALL LETTER O
0x00ef: 0x043f, # CYRILLIC SMALL LETTER PE
0x00f0: 0x0440, # CYRILLIC SMALL LETTER ER
0x00f1: 0x0441, # CYRILLIC SMALL LETTER ES
0x00f2: 0x0442, # CYRILLIC SMALL LETTER TE
0x00f3: 0x0443, # CYRILLIC SMALL LETTER U
0x00f4: 0x0444, # CYRILLIC SMALL LETTER EF
0x00f5: 0x0445, # CYRILLIC SMALL LETTER HA
0x00f6: 0x0446, # CYRILLIC SMALL LETTER TSE
0x00f7: 0x0447, # CYRILLIC SMALL LETTER CHE
0x00f8: 0x0448, # CYRILLIC SMALL LETTER SHA
0x00f9: 0x0449, # CYRILLIC SMALL LETTER SHCHA
0x00fa: 0x044a, # CYRILLIC SMALL LETTER HARD SIGN
0x00fb: 0x044b, # CYRILLIC SMALL LETTER YERU
0x00fc: 0x044c, # CYRILLIC SMALL LETTER SOFT SIGN
0x00fd: 0x044d, # CYRILLIC SMALL LETTER E
0x00fe: 0x044e, # CYRILLIC SMALL LETTER YU
0x00ff: 0x00a4, # CURRENCY SIGN
})
### Encoding Map
encoding_map = {}
for k,v in decoding_map.items():
encoding_map[v] = k

+ 0
- 172
lib/jython/Lib/encodings/mac_greek.py View File

@@ -1,172 +0,0 @@
""" Python Character Mapping Codec generated from 'GREEK.TXT' with gencodec.py.
Written by Marc-Andre Lemburg (mal@lemburg.com).
(c) Copyright CNRI, All Rights Reserved. NO WARRANTY.
(c) Copyright 2000 Guido van Rossum.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_map)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_map)
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return (Codec().encode,Codec().decode,StreamReader,StreamWriter)
### Decoding Map
decoding_map = codecs.make_identity_dict(range(256))
decoding_map.update({
0x0080: 0x00c4, # LATIN CAPITAL LETTER A WITH DIAERESIS
0x0081: 0x00b9, # SUPERSCRIPT ONE
0x0082: 0x00b2, # SUPERSCRIPT TWO
0x0083: 0x00c9, # LATIN CAPITAL LETTER E WITH ACUTE
0x0084: 0x00b3, # SUPERSCRIPT THREE
0x0085: 0x00d6, # LATIN CAPITAL LETTER O WITH DIAERESIS
0x0086: 0x00dc, # LATIN CAPITAL LETTER U WITH DIAERESIS
0x0087: 0x0385, # GREEK DIALYTIKA TONOS
0x0088: 0x00e0, # LATIN SMALL LETTER A WITH GRAVE
0x0089: 0x00e2, # LATIN SMALL LETTER A WITH CIRCUMFLEX
0x008a: 0x00e4, # LATIN SMALL LETTER A WITH DIAERESIS
0x008b: 0x0384, # GREEK TONOS
0x008c: 0x00a8, # DIAERESIS
0x008d: 0x00e7, # LATIN SMALL LETTER C WITH CEDILLA
0x008e: 0x00e9, # LATIN SMALL LETTER E WITH ACUTE
0x008f: 0x00e8, # LATIN SMALL LETTER E WITH GRAVE
0x0090: 0x00ea, # LATIN SMALL LETTER E WITH CIRCUMFLEX
0x0091: 0x00eb, # LATIN SMALL LETTER E WITH DIAERESIS
0x0092: 0x00a3, # POUND SIGN
0x0093: 0x2122, # TRADE MARK SIGN
0x0094: 0x00ee, # LATIN SMALL LETTER I WITH CIRCUMFLEX
0x0095: 0x00ef, # LATIN SMALL LETTER I WITH DIAERESIS
0x0096: 0x2022, # BULLET
0x0097: 0x00bd, # VULGAR FRACTION ONE HALF
0x0098: 0x2030, # PER MILLE SIGN
0x0099: 0x00f4, # LATIN SMALL LETTER O WITH CIRCUMFLEX
0x009a: 0x00f6, # LATIN SMALL LETTER O WITH DIAERESIS
0x009b: 0x00a6, # BROKEN BAR
0x009c: 0x00ad, # SOFT HYPHEN
0x009d: 0x00f9, # LATIN SMALL LETTER U WITH GRAVE
0x009e: 0x00fb, # LATIN SMALL LETTER U WITH CIRCUMFLEX
0x009f: 0x00fc, # LATIN SMALL LETTER U WITH DIAERESIS
0x00a0: 0x2020, # DAGGER
0x00a1: 0x0393, # GREEK CAPITAL LETTER GAMMA
0x00a2: 0x0394, # GREEK CAPITAL LETTER DELTA
0x00a3: 0x0398, # GREEK CAPITAL LETTER THETA
0x00a4: 0x039b, # GREEK CAPITAL LETTER LAMBDA
0x00a5: 0x039e, # GREEK CAPITAL LETTER XI
0x00a6: 0x03a0, # GREEK CAPITAL LETTER PI
0x00a7: 0x00df, # LATIN SMALL LETTER SHARP S
0x00a8: 0x00ae, # REGISTERED SIGN
0x00aa: 0x03a3, # GREEK CAPITAL LETTER SIGMA
0x00ab: 0x03aa, # GREEK CAPITAL LETTER IOTA WITH DIALYTIKA
0x00ac: 0x00a7, # SECTION SIGN
0x00ad: 0x2260, # NOT EQUAL TO
0x00ae: 0x00b0, # DEGREE SIGN
0x00af: 0x0387, # GREEK ANO TELEIA
0x00b0: 0x0391, # GREEK CAPITAL LETTER ALPHA
0x00b2: 0x2264, # LESS-THAN OR EQUAL TO
0x00b3: 0x2265, # GREATER-THAN OR EQUAL TO
0x00b4: 0x00a5, # YEN SIGN
0x00b5: 0x0392, # GREEK CAPITAL LETTER BETA
0x00b6: 0x0395, # GREEK CAPITAL LETTER EPSILON
0x00b7: 0x0396, # GREEK CAPITAL LETTER ZETA
0x00b8: 0x0397, # GREEK CAPITAL LETTER ETA
0x00b9: 0x0399, # GREEK CAPITAL LETTER IOTA
0x00ba: 0x039a, # GREEK CAPITAL LETTER KAPPA
0x00bb: 0x039c, # GREEK CAPITAL LETTER MU
0x00bc: 0x03a6, # GREEK CAPITAL LETTER PHI
0x00bd: 0x03ab, # GREEK CAPITAL LETTER UPSILON WITH DIALYTIKA
0x00be: 0x03a8, # GREEK CAPITAL LETTER PSI
0x00bf: 0x03a9, # GREEK CAPITAL LETTER OMEGA
0x00c0: 0x03ac, # GREEK SMALL LETTER ALPHA WITH TONOS
0x00c1: 0x039d, # GREEK CAPITAL LETTER NU
0x00c2: 0x00ac, # NOT SIGN
0x00c3: 0x039f, # GREEK CAPITAL LETTER OMICRON
0x00c4: 0x03a1, # GREEK CAPITAL LETTER RHO
0x00c5: 0x2248, # ALMOST EQUAL TO
0x00c6: 0x03a4, # GREEK CAPITAL LETTER TAU
0x00c7: 0x00ab, # LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00c8: 0x00bb, # RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00c9: 0x2026, # HORIZONTAL ELLIPSIS
0x00ca: 0x00a0, # NO-BREAK SPACE
0x00cb: 0x03a5, # GREEK CAPITAL LETTER UPSILON
0x00cc: 0x03a7, # GREEK CAPITAL LETTER CHI
0x00cd: 0x0386, # GREEK CAPITAL LETTER ALPHA WITH TONOS
0x00ce: 0x0388, # GREEK CAPITAL LETTER EPSILON WITH TONOS
0x00cf: 0x0153, # LATIN SMALL LIGATURE OE
0x00d0: 0x2013, # EN DASH
0x00d1: 0x2015, # HORIZONTAL BAR
0x00d2: 0x201c, # LEFT DOUBLE QUOTATION MARK
0x00d3: 0x201d, # RIGHT DOUBLE QUOTATION MARK
0x00d4: 0x2018, # LEFT SINGLE QUOTATION MARK
0x00d5: 0x2019, # RIGHT SINGLE QUOTATION MARK
0x00d6: 0x00f7, # DIVISION SIGN
0x00d7: 0x0389, # GREEK CAPITAL LETTER ETA WITH TONOS
0x00d8: 0x038a, # GREEK CAPITAL LETTER IOTA WITH TONOS
0x00d9: 0x038c, # GREEK CAPITAL LETTER OMICRON WITH TONOS
0x00da: 0x038e, # GREEK CAPITAL LETTER UPSILON WITH TONOS
0x00db: 0x03ad, # GREEK SMALL LETTER EPSILON WITH TONOS
0x00dc: 0x03ae, # GREEK SMALL LETTER ETA WITH TONOS
0x00dd: 0x03af, # GREEK SMALL LETTER IOTA WITH TONOS
0x00de: 0x03cc, # GREEK SMALL LETTER OMICRON WITH TONOS
0x00df: 0x038f, # GREEK CAPITAL LETTER OMEGA WITH TONOS
0x00e0: 0x03cd, # GREEK SMALL LETTER UPSILON WITH TONOS
0x00e1: 0x03b1, # GREEK SMALL LETTER ALPHA
0x00e2: 0x03b2, # GREEK SMALL LETTER BETA
0x00e3: 0x03c8, # GREEK SMALL LETTER PSI
0x00e4: 0x03b4, # GREEK SMALL LETTER DELTA
0x00e5: 0x03b5, # GREEK SMALL LETTER EPSILON
0x00e6: 0x03c6, # GREEK SMALL LETTER PHI
0x00e7: 0x03b3, # GREEK SMALL LETTER GAMMA
0x00e8: 0x03b7, # GREEK SMALL LETTER ETA
0x00e9: 0x03b9, # GREEK SMALL LETTER IOTA
0x00ea: 0x03be, # GREEK SMALL LETTER XI
0x00eb: 0x03ba, # GREEK SMALL LETTER KAPPA
0x00ec: 0x03bb, # GREEK SMALL LETTER LAMBDA
0x00ed: 0x03bc, # GREEK SMALL LETTER MU
0x00ee: 0x03bd, # GREEK SMALL LETTER NU
0x00ef: 0x03bf, # GREEK SMALL LETTER OMICRON
0x00f0: 0x03c0, # GREEK SMALL LETTER PI
0x00f1: 0x03ce, # GREEK SMALL LETTER OMEGA WITH TONOS
0x00f2: 0x03c1, # GREEK SMALL LETTER RHO
0x00f3: 0x03c3, # GREEK SMALL LETTER SIGMA
0x00f4: 0x03c4, # GREEK SMALL LETTER TAU
0x00f5: 0x03b8, # GREEK SMALL LETTER THETA
0x00f6: 0x03c9, # GREEK SMALL LETTER OMEGA
0x00f7: 0x03c2, # GREEK SMALL LETTER FINAL SIGMA
0x00f8: 0x03c7, # GREEK SMALL LETTER CHI
0x00f9: 0x03c5, # GREEK SMALL LETTER UPSILON
0x00fa: 0x03b6, # GREEK SMALL LETTER ZETA
0x00fb: 0x03ca, # GREEK SMALL LETTER IOTA WITH DIALYTIKA
0x00fc: 0x03cb, # GREEK SMALL LETTER UPSILON WITH DIALYTIKA
0x00fd: 0x0390, # GREEK SMALL LETTER IOTA WITH DIALYTIKA AND TONOS
0x00fe: 0x03b0, # GREEK SMALL LETTER UPSILON WITH DIALYTIKA AND TONOS
0x00ff: None, # UNDEFINED
})
### Encoding Map
encoding_map = {}
for k,v in decoding_map.items():
encoding_map[v] = k

+ 0
- 168
lib/jython/Lib/encodings/mac_iceland.py View File

@@ -1,168 +0,0 @@
""" Python Character Mapping Codec generated from 'ICELAND.TXT' with gencodec.py.
Written by Marc-Andre Lemburg (mal@lemburg.com).
(c) Copyright CNRI, All Rights Reserved. NO WARRANTY.
(c) Copyright 2000 Guido van Rossum.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_map)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_map)
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return (Codec().encode,Codec().decode,StreamReader,StreamWriter)
### Decoding Map
decoding_map = codecs.make_identity_dict(range(256))
decoding_map.update({
0x0080: 0x00c4, # LATIN CAPITAL LETTER A WITH DIAERESIS
0x0081: 0x00c5, # LATIN CAPITAL LETTER A WITH RING ABOVE
0x0082: 0x00c7, # LATIN CAPITAL LETTER C WITH CEDILLA
0x0083: 0x00c9, # LATIN CAPITAL LETTER E WITH ACUTE
0x0084: 0x00d1, # LATIN CAPITAL LETTER N WITH TILDE
0x0085: 0x00d6, # LATIN CAPITAL LETTER O WITH DIAERESIS
0x0086: 0x00dc, # LATIN CAPITAL LETTER U WITH DIAERESIS
0x0087: 0x00e1, # LATIN SMALL LETTER A WITH ACUTE
0x0088: 0x00e0, # LATIN SMALL LETTER A WITH GRAVE
0x0089: 0x00e2, # LATIN SMALL LETTER A WITH CIRCUMFLEX
0x008a: 0x00e4, # LATIN SMALL LETTER A WITH DIAERESIS
0x008b: 0x00e3, # LATIN SMALL LETTER A WITH TILDE
0x008c: 0x00e5, # LATIN SMALL LETTER A WITH RING ABOVE
0x008d: 0x00e7, # LATIN SMALL LETTER C WITH CEDILLA
0x008e: 0x00e9, # LATIN SMALL LETTER E WITH ACUTE
0x008f: 0x00e8, # LATIN SMALL LETTER E WITH GRAVE
0x0090: 0x00ea, # LATIN SMALL LETTER E WITH CIRCUMFLEX
0x0091: 0x00eb, # LATIN SMALL LETTER E WITH DIAERESIS
0x0092: 0x00ed, # LATIN SMALL LETTER I WITH ACUTE
0x0093: 0x00ec, # LATIN SMALL LETTER I WITH GRAVE
0x0094: 0x00ee, # LATIN SMALL LETTER I WITH CIRCUMFLEX
0x0095: 0x00ef, # LATIN SMALL LETTER I WITH DIAERESIS
0x0096: 0x00f1, # LATIN SMALL LETTER N WITH TILDE
0x0097: 0x00f3, # LATIN SMALL LETTER O WITH ACUTE
0x0098: 0x00f2, # LATIN SMALL LETTER O WITH GRAVE
0x0099: 0x00f4, # LATIN SMALL LETTER O WITH CIRCUMFLEX
0x009a: 0x00f6, # LATIN SMALL LETTER O WITH DIAERESIS
0x009b: 0x00f5, # LATIN SMALL LETTER O WITH TILDE
0x009c: 0x00fa, # LATIN SMALL LETTER U WITH ACUTE
0x009d: 0x00f9, # LATIN SMALL LETTER U WITH GRAVE
0x009e: 0x00fb, # LATIN SMALL LETTER U WITH CIRCUMFLEX
0x009f: 0x00fc, # LATIN SMALL LETTER U WITH DIAERESIS
0x00a0: 0x00dd, # LATIN CAPITAL LETTER Y WITH ACUTE
0x00a1: 0x00b0, # DEGREE SIGN
0x00a4: 0x00a7, # SECTION SIGN
0x00a5: 0x2022, # BULLET
0x00a6: 0x00b6, # PILCROW SIGN
0x00a7: 0x00df, # LATIN SMALL LETTER SHARP S
0x00a8: 0x00ae, # REGISTERED SIGN
0x00aa: 0x2122, # TRADE MARK SIGN
0x00ab: 0x00b4, # ACUTE ACCENT
0x00ac: 0x00a8, # DIAERESIS
0x00ad: 0x2260, # NOT EQUAL TO
0x00ae: 0x00c6, # LATIN CAPITAL LIGATURE AE
0x00af: 0x00d8, # LATIN CAPITAL LETTER O WITH STROKE
0x00b0: 0x221e, # INFINITY
0x00b2: 0x2264, # LESS-THAN OR EQUAL TO
0x00b3: 0x2265, # GREATER-THAN OR EQUAL TO
0x00b4: 0x00a5, # YEN SIGN
0x00b6: 0x2202, # PARTIAL DIFFERENTIAL
0x00b7: 0x2211, # N-ARY SUMMATION
0x00b8: 0x220f, # N-ARY PRODUCT
0x00b9: 0x03c0, # GREEK SMALL LETTER PI
0x00ba: 0x222b, # INTEGRAL
0x00bb: 0x00aa, # FEMININE ORDINAL INDICATOR
0x00bc: 0x00ba, # MASCULINE ORDINAL INDICATOR
0x00bd: 0x2126, # OHM SIGN
0x00be: 0x00e6, # LATIN SMALL LIGATURE AE
0x00bf: 0x00f8, # LATIN SMALL LETTER O WITH STROKE
0x00c0: 0x00bf, # INVERTED QUESTION MARK
0x00c1: 0x00a1, # INVERTED EXCLAMATION MARK
0x00c2: 0x00ac, # NOT SIGN
0x00c3: 0x221a, # SQUARE ROOT
0x00c4: 0x0192, # LATIN SMALL LETTER F WITH HOOK
0x00c5: 0x2248, # ALMOST EQUAL TO
0x00c6: 0x2206, # INCREMENT
0x00c7: 0x00ab, # LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00c8: 0x00bb, # RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00c9: 0x2026, # HORIZONTAL ELLIPSIS
0x00ca: 0x00a0, # NO-BREAK SPACE
0x00cb: 0x00c0, # LATIN CAPITAL LETTER A WITH GRAVE
0x00cc: 0x00c3, # LATIN CAPITAL LETTER A WITH TILDE
0x00cd: 0x00d5, # LATIN CAPITAL LETTER O WITH TILDE
0x00ce: 0x0152, # LATIN CAPITAL LIGATURE OE
0x00cf: 0x0153, # LATIN SMALL LIGATURE OE
0x00d0: 0x2013, # EN DASH
0x00d1: 0x2014, # EM DASH
0x00d2: 0x201c, # LEFT DOUBLE QUOTATION MARK
0x00d3: 0x201d, # RIGHT DOUBLE QUOTATION MARK
0x00d4: 0x2018, # LEFT SINGLE QUOTATION MARK
0x00d5: 0x2019, # RIGHT SINGLE QUOTATION MARK
0x00d6: 0x00f7, # DIVISION SIGN
0x00d7: 0x25ca, # LOZENGE
0x00d8: 0x00ff, # LATIN SMALL LETTER Y WITH DIAERESIS
0x00d9: 0x0178, # LATIN CAPITAL LETTER Y WITH DIAERESIS
0x00da: 0x2044, # FRACTION SLASH
0x00db: 0x00a4, # CURRENCY SIGN
0x00dc: 0x00d0, # LATIN CAPITAL LETTER ETH
0x00dd: 0x00f0, # LATIN SMALL LETTER ETH
0x00df: 0x00fe, # LATIN SMALL LETTER THORN
0x00e0: 0x00fd, # LATIN SMALL LETTER Y WITH ACUTE
0x00e1: 0x00b7, # MIDDLE DOT
0x00e2: 0x201a, # SINGLE LOW-9 QUOTATION MARK
0x00e3: 0x201e, # DOUBLE LOW-9 QUOTATION MARK
0x00e4: 0x2030, # PER MILLE SIGN
0x00e5: 0x00c2, # LATIN CAPITAL LETTER A WITH CIRCUMFLEX
0x00e6: 0x00ca, # LATIN CAPITAL LETTER E WITH CIRCUMFLEX
0x00e7: 0x00c1, # LATIN CAPITAL LETTER A WITH ACUTE
0x00e8: 0x00cb, # LATIN CAPITAL LETTER E WITH DIAERESIS
0x00e9: 0x00c8, # LATIN CAPITAL LETTER E WITH GRAVE
0x00ea: 0x00cd, # LATIN CAPITAL LETTER I WITH ACUTE
0x00eb: 0x00ce, # LATIN CAPITAL LETTER I WITH CIRCUMFLEX
0x00ec: 0x00cf, # LATIN CAPITAL LETTER I WITH DIAERESIS
0x00ed: 0x00cc, # LATIN CAPITAL LETTER I WITH GRAVE
0x00ee: 0x00d3, # LATIN CAPITAL LETTER O WITH ACUTE
0x00ef: 0x00d4, # LATIN CAPITAL LETTER O WITH CIRCUMFLEX
0x00f0: None, # UNDEFINED
0x00f1: 0x00d2, # LATIN CAPITAL LETTER O WITH GRAVE
0x00f2: 0x00da, # LATIN CAPITAL LETTER U WITH ACUTE
0x00f3: 0x00db, # LATIN CAPITAL LETTER U WITH CIRCUMFLEX
0x00f4: 0x00d9, # LATIN CAPITAL LETTER U WITH GRAVE
0x00f5: 0x0131, # LATIN SMALL LETTER DOTLESS I
0x00f6: 0x02c6, # MODIFIER LETTER CIRCUMFLEX ACCENT
0x00f7: 0x02dc, # SMALL TILDE
0x00f8: 0x00af, # MACRON
0x00f9: 0x02d8, # BREVE
0x00fa: 0x02d9, # DOT ABOVE
0x00fb: 0x02da, # RING ABOVE
0x00fc: 0x00b8, # CEDILLA
0x00fd: 0x02dd, # DOUBLE ACUTE ACCENT
0x00fe: 0x02db, # OGONEK
0x00ff: 0x02c7, # CARON
})
### Encoding Map
encoding_map = {}
for k,v in decoding_map.items():
encoding_map[v] = k

+ 0
- 172
lib/jython/Lib/encodings/mac_latin2.py View File

@@ -1,172 +0,0 @@
""" Python Character Mapping Codec generated from 'LATIN2.TXT' with gencodec.py.
Written by Marc-Andre Lemburg (mal@lemburg.com).
(c) Copyright CNRI, All Rights Reserved. NO WARRANTY.
(c) Copyright 2000 Guido van Rossum.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_map)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_map)
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return (Codec().encode,Codec().decode,StreamReader,StreamWriter)
### Decoding Map
decoding_map = codecs.make_identity_dict(range(256))
decoding_map.update({
0x0080: 0x00c4, # LATIN CAPITAL LETTER A WITH DIAERESIS
0x0081: 0x0100, # LATIN CAPITAL LETTER A WITH MACRON
0x0082: 0x0101, # LATIN SMALL LETTER A WITH MACRON
0x0083: 0x00c9, # LATIN CAPITAL LETTER E WITH ACUTE
0x0084: 0x0104, # LATIN CAPITAL LETTER A WITH OGONEK
0x0085: 0x00d6, # LATIN CAPITAL LETTER O WITH DIAERESIS
0x0086: 0x00dc, # LATIN CAPITAL LETTER U WITH DIAERESIS
0x0087: 0x00e1, # LATIN SMALL LETTER A WITH ACUTE
0x0088: 0x0105, # LATIN SMALL LETTER A WITH OGONEK
0x0089: 0x010c, # LATIN CAPITAL LETTER C WITH CARON
0x008a: 0x00e4, # LATIN SMALL LETTER A WITH DIAERESIS
0x008b: 0x010d, # LATIN SMALL LETTER C WITH CARON
0x008c: 0x0106, # LATIN CAPITAL LETTER C WITH ACUTE
0x008d: 0x0107, # LATIN SMALL LETTER C WITH ACUTE
0x008e: 0x00e9, # LATIN SMALL LETTER E WITH ACUTE
0x008f: 0x0179, # LATIN CAPITAL LETTER Z WITH ACUTE
0x0090: 0x017a, # LATIN SMALL LETTER Z WITH ACUTE
0x0091: 0x010e, # LATIN CAPITAL LETTER D WITH CARON
0x0092: 0x00ed, # LATIN SMALL LETTER I WITH ACUTE
0x0093: 0x010f, # LATIN SMALL LETTER D WITH CARON
0x0094: 0x0112, # LATIN CAPITAL LETTER E WITH MACRON
0x0095: 0x0113, # LATIN SMALL LETTER E WITH MACRON
0x0096: 0x0116, # LATIN CAPITAL LETTER E WITH DOT ABOVE
0x0097: 0x00f3, # LATIN SMALL LETTER O WITH ACUTE
0x0098: 0x0117, # LATIN SMALL LETTER E WITH DOT ABOVE
0x0099: 0x00f4, # LATIN SMALL LETTER O WITH CIRCUMFLEX
0x009a: 0x00f6, # LATIN SMALL LETTER O WITH DIAERESIS
0x009b: 0x00f5, # LATIN SMALL LETTER O WITH TILDE
0x009c: 0x00fa, # LATIN SMALL LETTER U WITH ACUTE
0x009d: 0x011a, # LATIN CAPITAL LETTER E WITH CARON
0x009e: 0x011b, # LATIN SMALL LETTER E WITH CARON
0x009f: 0x00fc, # LATIN SMALL LETTER U WITH DIAERESIS
0x00a0: 0x2020, # DAGGER
0x00a1: 0x00b0, # DEGREE SIGN
0x00a2: 0x0118, # LATIN CAPITAL LETTER E WITH OGONEK
0x00a4: 0x00a7, # SECTION SIGN
0x00a5: 0x2022, # BULLET
0x00a6: 0x00b6, # PILCROW SIGN
0x00a7: 0x00df, # LATIN SMALL LETTER SHARP S
0x00a8: 0x00ae, # REGISTERED SIGN
0x00aa: 0x2122, # TRADE MARK SIGN
0x00ab: 0x0119, # LATIN SMALL LETTER E WITH OGONEK
0x00ac: 0x00a8, # DIAERESIS
0x00ad: 0x2260, # NOT EQUAL TO
0x00ae: 0x0123, # LATIN SMALL LETTER G WITH CEDILLA
0x00af: 0x012e, # LATIN CAPITAL LETTER I WITH OGONEK
0x00b0: 0x012f, # LATIN SMALL LETTER I WITH OGONEK
0x00b1: 0x012a, # LATIN CAPITAL LETTER I WITH MACRON
0x00b2: 0x2264, # LESS-THAN OR EQUAL TO
0x00b3: 0x2265, # GREATER-THAN OR EQUAL TO
0x00b4: 0x012b, # LATIN SMALL LETTER I WITH MACRON
0x00b5: 0x0136, # LATIN CAPITAL LETTER K WITH CEDILLA
0x00b6: 0x2202, # PARTIAL DIFFERENTIAL
0x00b7: 0x2211, # N-ARY SUMMATION
0x00b8: 0x0142, # LATIN SMALL LETTER L WITH STROKE
0x00b9: 0x013b, # LATIN CAPITAL LETTER L WITH CEDILLA
0x00ba: 0x013c, # LATIN SMALL LETTER L WITH CEDILLA
0x00bb: 0x013d, # LATIN CAPITAL LETTER L WITH CARON
0x00bc: 0x013e, # LATIN SMALL LETTER L WITH CARON
0x00bd: 0x0139, # LATIN CAPITAL LETTER L WITH ACUTE
0x00be: 0x013a, # LATIN SMALL LETTER L WITH ACUTE
0x00bf: 0x0145, # LATIN CAPITAL LETTER N WITH CEDILLA
0x00c0: 0x0146, # LATIN SMALL LETTER N WITH CEDILLA
0x00c1: 0x0143, # LATIN CAPITAL LETTER N WITH ACUTE
0x00c2: 0x00ac, # NOT SIGN
0x00c3: 0x221a, # SQUARE ROOT
0x00c4: 0x0144, # LATIN SMALL LETTER N WITH ACUTE
0x00c5: 0x0147, # LATIN CAPITAL LETTER N WITH CARON
0x00c6: 0x2206, # INCREMENT
0x00c7: 0x00ab, # LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00c8: 0x00bb, # RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00c9: 0x2026, # HORIZONTAL ELLIPSIS
0x00ca: 0x00a0, # NO-BREAK SPACE
0x00cb: 0x0148, # LATIN SMALL LETTER N WITH CARON
0x00cc: 0x0150, # LATIN CAPITAL LETTER O WITH DOUBLE ACUTE
0x00cd: 0x00d5, # LATIN CAPITAL LETTER O WITH TILDE
0x00ce: 0x0151, # LATIN SMALL LETTER O WITH DOUBLE ACUTE
0x00cf: 0x014c, # LATIN CAPITAL LETTER O WITH MACRON
0x00d0: 0x2013, # EN DASH
0x00d1: 0x2014, # EM DASH
0x00d2: 0x201c, # LEFT DOUBLE QUOTATION MARK
0x00d3: 0x201d, # RIGHT DOUBLE QUOTATION MARK
0x00d4: 0x2018, # LEFT SINGLE QUOTATION MARK
0x00d5: 0x2019, # RIGHT SINGLE QUOTATION MARK
0x00d6: 0x00f7, # DIVISION SIGN
0x00d7: 0x25ca, # LOZENGE
0x00d8: 0x014d, # LATIN SMALL LETTER O WITH MACRON
0x00d9: 0x0154, # LATIN CAPITAL LETTER R WITH ACUTE
0x00da: 0x0155, # LATIN SMALL LETTER R WITH ACUTE
0x00db: 0x0158, # LATIN CAPITAL LETTER R WITH CARON
0x00dc: 0x2039, # SINGLE LEFT-POINTING ANGLE QUOTATION MARK
0x00dd: 0x203a, # SINGLE RIGHT-POINTING ANGLE QUOTATION MARK
0x00de: 0x0159, # LATIN SMALL LETTER R WITH CARON
0x00df: 0x0156, # LATIN CAPITAL LETTER R WITH CEDILLA
0x00e0: 0x0157, # LATIN SMALL LETTER R WITH CEDILLA
0x00e1: 0x0160, # LATIN CAPITAL LETTER S WITH CARON
0x00e2: 0x201a, # SINGLE LOW-9 QUOTATION MARK
0x00e3: 0x201e, # DOUBLE LOW-9 QUOTATION MARK
0x00e4: 0x0161, # LATIN SMALL LETTER S WITH CARON
0x00e5: 0x015a, # LATIN CAPITAL LETTER S WITH ACUTE
0x00e6: 0x015b, # LATIN SMALL LETTER S WITH ACUTE
0x00e7: 0x00c1, # LATIN CAPITAL LETTER A WITH ACUTE
0x00e8: 0x0164, # LATIN CAPITAL LETTER T WITH CARON
0x00e9: 0x0165, # LATIN SMALL LETTER T WITH CARON
0x00ea: 0x00cd, # LATIN CAPITAL LETTER I WITH ACUTE
0x00eb: 0x017d, # LATIN CAPITAL LETTER Z WITH CARON
0x00ec: 0x017e, # LATIN SMALL LETTER Z WITH CARON
0x00ed: 0x016a, # LATIN CAPITAL LETTER U WITH MACRON
0x00ee: 0x00d3, # LATIN CAPITAL LETTER O WITH ACUTE
0x00ef: 0x00d4, # LATIN CAPITAL LETTER O WITH CIRCUMFLEX
0x00f0: 0x016b, # LATIN SMALL LETTER U WITH MACRON
0x00f1: 0x016e, # LATIN CAPITAL LETTER U WITH RING ABOVE
0x00f2: 0x00da, # LATIN CAPITAL LETTER U WITH ACUTE
0x00f3: 0x016f, # LATIN SMALL LETTER U WITH RING ABOVE
0x00f4: 0x0170, # LATIN CAPITAL LETTER U WITH DOUBLE ACUTE
0x00f5: 0x0171, # LATIN SMALL LETTER U WITH DOUBLE ACUTE
0x00f6: 0x0172, # LATIN CAPITAL LETTER U WITH OGONEK
0x00f7: 0x0173, # LATIN SMALL LETTER U WITH OGONEK
0x00f8: 0x00dd, # LATIN CAPITAL LETTER Y WITH ACUTE
0x00f9: 0x00fd, # LATIN SMALL LETTER Y WITH ACUTE
0x00fa: 0x0137, # LATIN SMALL LETTER K WITH CEDILLA
0x00fb: 0x017b, # LATIN CAPITAL LETTER Z WITH DOT ABOVE
0x00fc: 0x0141, # LATIN CAPITAL LETTER L WITH STROKE
0x00fd: 0x017c, # LATIN SMALL LETTER Z WITH DOT ABOVE
0x00fe: 0x0122, # LATIN CAPITAL LETTER G WITH CEDILLA
0x00ff: 0x02c7, # CARON
})
### Encoding Map
encoding_map = {}
for k,v in decoding_map.items():
encoding_map[v] = k

+ 0
- 169
lib/jython/Lib/encodings/mac_roman.py View File

@@ -1,169 +0,0 @@
""" Python Character Mapping Codec generated from 'ROMAN.TXT' with gencodec.py.
Written by Marc-Andre Lemburg (mal@lemburg.com).
(c) Copyright CNRI, All Rights Reserved. NO WARRANTY.
(c) Copyright 2000 Guido van Rossum.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_map)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_map)
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return (Codec().encode,Codec().decode,StreamReader,StreamWriter)
### Decoding Map
decoding_map = codecs.make_identity_dict(range(256))
decoding_map.update({
0x0080: 0x00c4, # LATIN CAPITAL LETTER A WITH DIAERESIS
0x0081: 0x00c5, # LATIN CAPITAL LETTER A WITH RING ABOVE
0x0082: 0x00c7, # LATIN CAPITAL LETTER C WITH CEDILLA
0x0083: 0x00c9, # LATIN CAPITAL LETTER E WITH ACUTE
0x0084: 0x00d1, # LATIN CAPITAL LETTER N WITH TILDE
0x0085: 0x00d6, # LATIN CAPITAL LETTER O WITH DIAERESIS
0x0086: 0x00dc, # LATIN CAPITAL LETTER U WITH DIAERESIS
0x0087: 0x00e1, # LATIN SMALL LETTER A WITH ACUTE
0x0088: 0x00e0, # LATIN SMALL LETTER A WITH GRAVE
0x0089: 0x00e2, # LATIN SMALL LETTER A WITH CIRCUMFLEX
0x008a: 0x00e4, # LATIN SMALL LETTER A WITH DIAERESIS
0x008b: 0x00e3, # LATIN SMALL LETTER A WITH TILDE
0x008c: 0x00e5, # LATIN SMALL LETTER A WITH RING ABOVE
0x008d: 0x00e7, # LATIN SMALL LETTER C WITH CEDILLA
0x008e: 0x00e9, # LATIN SMALL LETTER E WITH ACUTE
0x008f: 0x00e8, # LATIN SMALL LETTER E WITH GRAVE
0x0090: 0x00ea, # LATIN SMALL LETTER E WITH CIRCUMFLEX
0x0091: 0x00eb, # LATIN SMALL LETTER E WITH DIAERESIS
0x0092: 0x00ed, # LATIN SMALL LETTER I WITH ACUTE
0x0093: 0x00ec, # LATIN SMALL LETTER I WITH GRAVE
0x0094: 0x00ee, # LATIN SMALL LETTER I WITH CIRCUMFLEX
0x0095: 0x00ef, # LATIN SMALL LETTER I WITH DIAERESIS
0x0096: 0x00f1, # LATIN SMALL LETTER N WITH TILDE
0x0097: 0x00f3, # LATIN SMALL LETTER O WITH ACUTE
0x0098: 0x00f2, # LATIN SMALL LETTER O WITH GRAVE
0x0099: 0x00f4, # LATIN SMALL LETTER O WITH CIRCUMFLEX
0x009a: 0x00f6, # LATIN SMALL LETTER O WITH DIAERESIS
0x009b: 0x00f5, # LATIN SMALL LETTER O WITH TILDE
0x009c: 0x00fa, # LATIN SMALL LETTER U WITH ACUTE
0x009d: 0x00f9, # LATIN SMALL LETTER U WITH GRAVE
0x009e: 0x00fb, # LATIN SMALL LETTER U WITH CIRCUMFLEX
0x009f: 0x00fc, # LATIN SMALL LETTER U WITH DIAERESIS
0x00a0: 0x2020, # DAGGER
0x00a1: 0x00b0, # DEGREE SIGN
0x00a4: 0x00a7, # SECTION SIGN
0x00a5: 0x2022, # BULLET
0x00a6: 0x00b6, # PILCROW SIGN
0x00a7: 0x00df, # LATIN SMALL LETTER SHARP S
0x00a8: 0x00ae, # REGISTERED SIGN
0x00aa: 0x2122, # TRADE MARK SIGN
0x00ab: 0x00b4, # ACUTE ACCENT
0x00ac: 0x00a8, # DIAERESIS
0x00ad: 0x2260, # NOT EQUAL TO
0x00ae: 0x00c6, # LATIN CAPITAL LIGATURE AE
0x00af: 0x00d8, # LATIN CAPITAL LETTER O WITH STROKE
0x00b0: 0x221e, # INFINITY
0x00b2: 0x2264, # LESS-THAN OR EQUAL TO
0x00b3: 0x2265, # GREATER-THAN OR EQUAL TO
0x00b4: 0x00a5, # YEN SIGN
0x00b6: 0x2202, # PARTIAL DIFFERENTIAL
0x00b7: 0x2211, # N-ARY SUMMATION
0x00b8: 0x220f, # N-ARY PRODUCT
0x00b9: 0x03c0, # GREEK SMALL LETTER PI
0x00ba: 0x222b, # INTEGRAL
0x00bb: 0x00aa, # FEMININE ORDINAL INDICATOR
0x00bc: 0x00ba, # MASCULINE ORDINAL INDICATOR
0x00bd: 0x2126, # OHM SIGN
0x00be: 0x00e6, # LATIN SMALL LIGATURE AE
0x00bf: 0x00f8, # LATIN SMALL LETTER O WITH STROKE
0x00c0: 0x00bf, # INVERTED QUESTION MARK
0x00c1: 0x00a1, # INVERTED EXCLAMATION MARK
0x00c2: 0x00ac, # NOT SIGN
0x00c3: 0x221a, # SQUARE ROOT
0x00c4: 0x0192, # LATIN SMALL LETTER F WITH HOOK
0x00c5: 0x2248, # ALMOST EQUAL TO
0x00c6: 0x2206, # INCREMENT
0x00c7: 0x00ab, # LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00c8: 0x00bb, # RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00c9: 0x2026, # HORIZONTAL ELLIPSIS
0x00ca: 0x00a0, # NO-BREAK SPACE
0x00cb: 0x00c0, # LATIN CAPITAL LETTER A WITH GRAVE
0x00cc: 0x00c3, # LATIN CAPITAL LETTER A WITH TILDE
0x00cd: 0x00d5, # LATIN CAPITAL LETTER O WITH TILDE
0x00ce: 0x0152, # LATIN CAPITAL LIGATURE OE
0x00cf: 0x0153, # LATIN SMALL LIGATURE OE
0x00d0: 0x2013, # EN DASH
0x00d1: 0x2014, # EM DASH
0x00d2: 0x201c, # LEFT DOUBLE QUOTATION MARK
0x00d3: 0x201d, # RIGHT DOUBLE QUOTATION MARK
0x00d4: 0x2018, # LEFT SINGLE QUOTATION MARK
0x00d5: 0x2019, # RIGHT SINGLE QUOTATION MARK
0x00d6: 0x00f7, # DIVISION SIGN
0x00d7: 0x25ca, # LOZENGE
0x00d8: 0x00ff, # LATIN SMALL LETTER Y WITH DIAERESIS
0x00d9: 0x0178, # LATIN CAPITAL LETTER Y WITH DIAERESIS
0x00da: 0x2044, # FRACTION SLASH
0x00db: 0x00a4, # CURRENCY SIGN
0x00dc: 0x2039, # SINGLE LEFT-POINTING ANGLE QUOTATION MARK
0x00dd: 0x203a, # SINGLE RIGHT-POINTING ANGLE QUOTATION MARK
0x00de: 0xfb01, # LATIN SMALL LIGATURE FI
0x00df: 0xfb02, # LATIN SMALL LIGATURE FL
0x00e0: 0x2021, # DOUBLE DAGGER
0x00e1: 0x00b7, # MIDDLE DOT
0x00e2: 0x201a, # SINGLE LOW-9 QUOTATION MARK
0x00e3: 0x201e, # DOUBLE LOW-9 QUOTATION MARK
0x00e4: 0x2030, # PER MILLE SIGN
0x00e5: 0x00c2, # LATIN CAPITAL LETTER A WITH CIRCUMFLEX
0x00e6: 0x00ca, # LATIN CAPITAL LETTER E WITH CIRCUMFLEX
0x00e7: 0x00c1, # LATIN CAPITAL LETTER A WITH ACUTE
0x00e8: 0x00cb, # LATIN CAPITAL LETTER E WITH DIAERESIS
0x00e9: 0x00c8, # LATIN CAPITAL LETTER E WITH GRAVE
0x00ea: 0x00cd, # LATIN CAPITAL LETTER I WITH ACUTE
0x00eb: 0x00ce, # LATIN CAPITAL LETTER I WITH CIRCUMFLEX
0x00ec: 0x00cf, # LATIN CAPITAL LETTER I WITH DIAERESIS
0x00ed: 0x00cc, # LATIN CAPITAL LETTER I WITH GRAVE
0x00ee: 0x00d3, # LATIN CAPITAL LETTER O WITH ACUTE
0x00ef: 0x00d4, # LATIN CAPITAL LETTER O WITH CIRCUMFLEX
0x00f0: None, # UNDEFINED
0x00f1: 0x00d2, # LATIN CAPITAL LETTER O WITH GRAVE
0x00f2: 0x00da, # LATIN CAPITAL LETTER U WITH ACUTE
0x00f3: 0x00db, # LATIN CAPITAL LETTER U WITH CIRCUMFLEX
0x00f4: 0x00d9, # LATIN CAPITAL LETTER U WITH GRAVE
0x00f5: 0x0131, # LATIN SMALL LETTER DOTLESS I
0x00f6: 0x02c6, # MODIFIER LETTER CIRCUMFLEX ACCENT
0x00f7: 0x02dc, # SMALL TILDE
0x00f8: 0x00af, # MACRON
0x00f9: 0x02d8, # BREVE
0x00fa: 0x02d9, # DOT ABOVE
0x00fb: 0x02da, # RING ABOVE
0x00fc: 0x00b8, # CEDILLA
0x00fd: 0x02dd, # DOUBLE ACUTE ACCENT
0x00fe: 0x02db, # OGONEK
0x00ff: 0x02c7, # CARON
})
### Encoding Map
encoding_map = {}
for k,v in decoding_map.items():
encoding_map[v] = k

+ 0
- 169
lib/jython/Lib/encodings/mac_turkish.py View File

@@ -1,169 +0,0 @@
""" Python Character Mapping Codec generated from 'TURKISH.TXT' with gencodec.py.
Written by Marc-Andre Lemburg (mal@lemburg.com).
(c) Copyright CNRI, All Rights Reserved. NO WARRANTY.
(c) Copyright 2000 Guido van Rossum.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_map)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_map)
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return (Codec().encode,Codec().decode,StreamReader,StreamWriter)
### Decoding Map
decoding_map = codecs.make_identity_dict(range(256))
decoding_map.update({
0x0080: 0x00c4, # LATIN CAPITAL LETTER A WITH DIAERESIS
0x0081: 0x00c5, # LATIN CAPITAL LETTER A WITH RING ABOVE
0x0082: 0x00c7, # LATIN CAPITAL LETTER C WITH CEDILLA
0x0083: 0x00c9, # LATIN CAPITAL LETTER E WITH ACUTE
0x0084: 0x00d1, # LATIN CAPITAL LETTER N WITH TILDE
0x0085: 0x00d6, # LATIN CAPITAL LETTER O WITH DIAERESIS
0x0086: 0x00dc, # LATIN CAPITAL LETTER U WITH DIAERESIS
0x0087: 0x00e1, # LATIN SMALL LETTER A WITH ACUTE
0x0088: 0x00e0, # LATIN SMALL LETTER A WITH GRAVE
0x0089: 0x00e2, # LATIN SMALL LETTER A WITH CIRCUMFLEX
0x008a: 0x00e4, # LATIN SMALL LETTER A WITH DIAERESIS
0x008b: 0x00e3, # LATIN SMALL LETTER A WITH TILDE
0x008c: 0x00e5, # LATIN SMALL LETTER A WITH RING ABOVE
0x008d: 0x00e7, # LATIN SMALL LETTER C WITH CEDILLA
0x008e: 0x00e9, # LATIN SMALL LETTER E WITH ACUTE
0x008f: 0x00e8, # LATIN SMALL LETTER E WITH GRAVE
0x0090: 0x00ea, # LATIN SMALL LETTER E WITH CIRCUMFLEX
0x0091: 0x00eb, # LATIN SMALL LETTER E WITH DIAERESIS
0x0092: 0x00ed, # LATIN SMALL LETTER I WITH ACUTE
0x0093: 0x00ec, # LATIN SMALL LETTER I WITH GRAVE
0x0094: 0x00ee, # LATIN SMALL LETTER I WITH CIRCUMFLEX
0x0095: 0x00ef, # LATIN SMALL LETTER I WITH DIAERESIS
0x0096: 0x00f1, # LATIN SMALL LETTER N WITH TILDE
0x0097: 0x00f3, # LATIN SMALL LETTER O WITH ACUTE
0x0098: 0x00f2, # LATIN SMALL LETTER O WITH GRAVE
0x0099: 0x00f4, # LATIN SMALL LETTER O WITH CIRCUMFLEX
0x009a: 0x00f6, # LATIN SMALL LETTER O WITH DIAERESIS
0x009b: 0x00f5, # LATIN SMALL LETTER O WITH TILDE
0x009c: 0x00fa, # LATIN SMALL LETTER U WITH ACUTE
0x009d: 0x00f9, # LATIN SMALL LETTER U WITH GRAVE
0x009e: 0x00fb, # LATIN SMALL LETTER U WITH CIRCUMFLEX
0x009f: 0x00fc, # LATIN SMALL LETTER U WITH DIAERESIS
0x00a0: 0x2020, # DAGGER
0x00a1: 0x00b0, # DEGREE SIGN
0x00a4: 0x00a7, # SECTION SIGN
0x00a5: 0x2022, # BULLET
0x00a6: 0x00b6, # PILCROW SIGN
0x00a7: 0x00df, # LATIN SMALL LETTER SHARP S
0x00a8: 0x00ae, # REGISTERED SIGN
0x00aa: 0x2122, # TRADE MARK SIGN
0x00ab: 0x00b4, # ACUTE ACCENT
0x00ac: 0x00a8, # DIAERESIS
0x00ad: 0x2260, # NOT EQUAL TO
0x00ae: 0x00c6, # LATIN CAPITAL LIGATURE AE
0x00af: 0x00d8, # LATIN CAPITAL LETTER O WITH STROKE
0x00b0: 0x221e, # INFINITY
0x00b2: 0x2264, # LESS-THAN OR EQUAL TO
0x00b3: 0x2265, # GREATER-THAN OR EQUAL TO
0x00b4: 0x00a5, # YEN SIGN
0x00b6: 0x2202, # PARTIAL DIFFERENTIAL
0x00b7: 0x2211, # N-ARY SUMMATION
0x00b8: 0x220f, # N-ARY PRODUCT
0x00b9: 0x03c0, # GREEK SMALL LETTER PI
0x00ba: 0x222b, # INTEGRAL
0x00bb: 0x00aa, # FEMININE ORDINAL INDICATOR
0x00bc: 0x00ba, # MASCULINE ORDINAL INDICATOR
0x00bd: 0x2126, # OHM SIGN
0x00be: 0x00e6, # LATIN SMALL LIGATURE AE
0x00bf: 0x00f8, # LATIN SMALL LETTER O WITH STROKE
0x00c0: 0x00bf, # INVERTED QUESTION MARK
0x00c1: 0x00a1, # INVERTED EXCLAMATION MARK
0x00c2: 0x00ac, # NOT SIGN
0x00c3: 0x221a, # SQUARE ROOT
0x00c4: 0x0192, # LATIN SMALL LETTER F WITH HOOK
0x00c5: 0x2248, # ALMOST EQUAL TO
0x00c6: 0x2206, # INCREMENT
0x00c7: 0x00ab, # LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00c8: 0x00bb, # RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00c9: 0x2026, # HORIZONTAL ELLIPSIS
0x00ca: 0x00a0, # NO-BREAK SPACE
0x00cb: 0x00c0, # LATIN CAPITAL LETTER A WITH GRAVE
0x00cc: 0x00c3, # LATIN CAPITAL LETTER A WITH TILDE
0x00cd: 0x00d5, # LATIN CAPITAL LETTER O WITH TILDE
0x00ce: 0x0152, # LATIN CAPITAL LIGATURE OE
0x00cf: 0x0153, # LATIN SMALL LIGATURE OE
0x00d0: 0x2013, # EN DASH
0x00d1: 0x2014, # EM DASH
0x00d2: 0x201c, # LEFT DOUBLE QUOTATION MARK
0x00d3: 0x201d, # RIGHT DOUBLE QUOTATION MARK
0x00d4: 0x2018, # LEFT SINGLE QUOTATION MARK
0x00d5: 0x2019, # RIGHT SINGLE QUOTATION MARK
0x00d6: 0x00f7, # DIVISION SIGN
0x00d7: 0x25ca, # LOZENGE
0x00d8: 0x00ff, # LATIN SMALL LETTER Y WITH DIAERESIS
0x00d9: 0x0178, # LATIN CAPITAL LETTER Y WITH DIAERESIS
0x00da: 0x011e, # LATIN CAPITAL LETTER G WITH BREVE
0x00db: 0x011f, # LATIN SMALL LETTER G WITH BREVE
0x00dc: 0x0130, # LATIN CAPITAL LETTER I WITH DOT ABOVE
0x00dd: 0x0131, # LATIN SMALL LETTER DOTLESS I
0x00de: 0x015e, # LATIN CAPITAL LETTER S WITH CEDILLA
0x00df: 0x015f, # LATIN SMALL LETTER S WITH CEDILLA
0x00e0: 0x2021, # DOUBLE DAGGER
0x00e1: 0x00b7, # MIDDLE DOT
0x00e2: 0x201a, # SINGLE LOW-9 QUOTATION MARK
0x00e3: 0x201e, # DOUBLE LOW-9 QUOTATION MARK
0x00e4: 0x2030, # PER MILLE SIGN
0x00e5: 0x00c2, # LATIN CAPITAL LETTER A WITH CIRCUMFLEX
0x00e6: 0x00ca, # LATIN CAPITAL LETTER E WITH CIRCUMFLEX
0x00e7: 0x00c1, # LATIN CAPITAL LETTER A WITH ACUTE
0x00e8: 0x00cb, # LATIN CAPITAL LETTER E WITH DIAERESIS
0x00e9: 0x00c8, # LATIN CAPITAL LETTER E WITH GRAVE
0x00ea: 0x00cd, # LATIN CAPITAL LETTER I WITH ACUTE
0x00eb: 0x00ce, # LATIN CAPITAL LETTER I WITH CIRCUMFLEX
0x00ec: 0x00cf, # LATIN CAPITAL LETTER I WITH DIAERESIS
0x00ed: 0x00cc, # LATIN CAPITAL LETTER I WITH GRAVE
0x00ee: 0x00d3, # LATIN CAPITAL LETTER O WITH ACUTE
0x00ef: 0x00d4, # LATIN CAPITAL LETTER O WITH CIRCUMFLEX
0x00f0: None, # UNDEFINED
0x00f1: 0x00d2, # LATIN CAPITAL LETTER O WITH GRAVE
0x00f2: 0x00da, # LATIN CAPITAL LETTER U WITH ACUTE
0x00f3: 0x00db, # LATIN CAPITAL LETTER U WITH CIRCUMFLEX
0x00f4: 0x00d9, # LATIN CAPITAL LETTER U WITH GRAVE
0x00f5: None, # UNDEFINED
0x00f6: 0x02c6, # MODIFIER LETTER CIRCUMFLEX ACCENT
0x00f7: 0x02dc, # SMALL TILDE
0x00f8: 0x00af, # MACRON
0x00f9: 0x02d8, # BREVE
0x00fa: 0x02d9, # DOT ABOVE
0x00fb: 0x02da, # RING ABOVE
0x00fc: 0x00b8, # CEDILLA
0x00fd: 0x02dd, # DOUBLE ACUTE ACCENT
0x00fe: 0x02db, # OGONEK
0x00ff: 0x02c7, # CARON
})
### Encoding Map
encoding_map = {}
for k,v in decoding_map.items():
encoding_map[v] = k

+ 0
- 36
lib/jython/Lib/encodings/mbcs.py View File

@@ -1,36 +0,0 @@
""" Python 'mbcs' Codec for Windows
Cloned by Mark Hammond (mhammond@skippinet.com.au) from ascii.py,
which was written by Marc-Andre Lemburg (mal@lemburg.com).
(c) Copyright CNRI, All Rights Reserved. NO WARRANTY.
"""
import codecs
### Codec APIs
class Codec(codecs.Codec):
# Note: Binding these as C functions will result in the class not
# converting them to methods. This is intended.
encode = codecs.mbcs_encode
decode = codecs.mbcs_decode
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
class StreamConverter(StreamWriter,StreamReader):
encode = codecs.mbcs_decode
decode = codecs.mbcs_encode
### encodings module API
def getregentry():
return (Codec.encode,Codec.decode,StreamReader,StreamWriter)

+ 0
- 30
lib/jython/Lib/encodings/raw_unicode_escape.py View File

@@ -1,30 +0,0 @@
""" Python 'raw-unicode-escape' Codec
Written by Marc-Andre Lemburg (mal@lemburg.com).
(c) Copyright CNRI, All Rights Reserved. NO WARRANTY.
"""
import codecs
### Codec APIs
class Codec(codecs.Codec):
# Note: Binding these as C functions will result in the class not
# converting them to methods. This is intended.
encode = codecs.raw_unicode_escape_encode
decode = codecs.raw_unicode_escape_decode
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return (Codec.encode,Codec.decode,StreamReader,StreamWriter)

+ 0
- 34
lib/jython/Lib/encodings/undefined.py View File

@@ -1,34 +0,0 @@
""" Python 'undefined' Codec
This codec will always raise a ValueError exception when being
used. It is intended for use by the site.py file to switch off
automatic string to Unicode coercion.
Written by Marc-Andre Lemburg (mal@lemburg.com).
(c) Copyright CNRI, All Rights Reserved. NO WARRANTY.
"""
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
raise UnicodeError, "undefined encoding"
def decode(self,input,errors='strict'):
raise UnicodeError, "undefined encoding"
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return (Codec().encode,Codec().decode,StreamReader,StreamWriter)

+ 0
- 0
lib/jython/Lib/encodings/unicode_escape.py View File


Some files were not shown because too many files changed in this diff

Loading…
Cancel
Save