summaryrefslogtreecommitdiff
path: root/lib/python2.7/bsddb
diff options
context:
space:
mode:
Diffstat (limited to 'lib/python2.7/bsddb')
-rw-r--r--lib/python2.7/bsddb/__init__.py455
-rw-r--r--lib/python2.7/bsddb/db.py60
-rw-r--r--lib/python2.7/bsddb/dbobj.py266
-rw-r--r--lib/python2.7/bsddb/dbrecio.py190
-rw-r--r--lib/python2.7/bsddb/dbshelve.py381
-rw-r--r--lib/python2.7/bsddb/dbtables.py843
-rw-r--r--lib/python2.7/bsddb/dbutils.py83
-rw-r--r--lib/python2.7/bsddb/test/__init__.py0
-rw-r--r--lib/python2.7/bsddb/test/test_all.py620
-rw-r--r--lib/python2.7/bsddb/test/test_associate.py465
-rw-r--r--lib/python2.7/bsddb/test/test_basics.py1158
-rw-r--r--lib/python2.7/bsddb/test/test_compare.py447
-rw-r--r--lib/python2.7/bsddb/test/test_compat.py184
-rw-r--r--lib/python2.7/bsddb/test/test_cursor_pget_bug.py54
-rw-r--r--lib/python2.7/bsddb/test/test_db.py163
-rw-r--r--lib/python2.7/bsddb/test/test_dbenv.py525
-rw-r--r--lib/python2.7/bsddb/test/test_dbobj.py70
-rw-r--r--lib/python2.7/bsddb/test/test_dbshelve.py398
-rw-r--r--lib/python2.7/bsddb/test/test_dbtables.py407
-rw-r--r--lib/python2.7/bsddb/test/test_distributed_transactions.py152
-rw-r--r--lib/python2.7/bsddb/test/test_early_close.py215
-rw-r--r--lib/python2.7/bsddb/test/test_fileid.py61
-rw-r--r--lib/python2.7/bsddb/test/test_get_none.py92
-rw-r--r--lib/python2.7/bsddb/test/test_join.py99
-rw-r--r--lib/python2.7/bsddb/test/test_lock.py184
-rw-r--r--lib/python2.7/bsddb/test/test_misc.py138
-rw-r--r--lib/python2.7/bsddb/test/test_pickle.py68
-rw-r--r--lib/python2.7/bsddb/test/test_queue.py163
-rw-r--r--lib/python2.7/bsddb/test/test_recno.py319
-rw-r--r--lib/python2.7/bsddb/test/test_replication.py543
-rw-r--r--lib/python2.7/bsddb/test/test_sequence.py136
-rw-r--r--lib/python2.7/bsddb/test/test_thread.py517
32 files changed, 9456 insertions, 0 deletions
diff --git a/lib/python2.7/bsddb/__init__.py b/lib/python2.7/bsddb/__init__.py
new file mode 100644
index 0000000..13c9c27
--- /dev/null
+++ b/lib/python2.7/bsddb/__init__.py
@@ -0,0 +1,455 @@
+#----------------------------------------------------------------------
+# Copyright (c) 1999-2001, Digital Creations, Fredericksburg, VA, USA
+# and Andrew Kuchling. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# o Redistributions of source code must retain the above copyright
+# notice, this list of conditions, and the disclaimer that follows.
+#
+# o Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions, and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+#
+# o Neither the name of Digital Creations nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY DIGITAL CREATIONS AND CONTRIBUTORS *AS
+# IS* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+# TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
+# PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL DIGITAL
+# CREATIONS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
+# OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
+# TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
+# DAMAGE.
+#----------------------------------------------------------------------
+
+
+"""Support for Berkeley DB 4.3 through 5.3 with a simple interface.
+
+For the full featured object oriented interface use the bsddb.db module
+instead. It mirrors the Oracle Berkeley DB C API.
+"""
+
+import sys
+absolute_import = (sys.version_info[0] >= 3)
+
+if (sys.version_info >= (2, 6)) and (sys.version_info < (3, 0)) :
+ import warnings
+ if sys.py3kwarning and (__name__ != 'bsddb3') :
+ warnings.warnpy3k("in 3.x, the bsddb module has been removed; "
+ "please use the pybsddb project instead",
+ DeprecationWarning, 2)
+ warnings.filterwarnings("ignore", ".*CObject.*", DeprecationWarning,
+ "bsddb.__init__")
+
+try:
+ if __name__ == 'bsddb3':
+ # import _pybsddb binary as it should be the more recent version from
+ # a standalone pybsddb addon package than the version included with
+ # python as bsddb._bsddb.
+ if absolute_import :
+ # Because this syntaxis is not valid before Python 2.5
+ exec("from . import _pybsddb")
+ else :
+ import _pybsddb
+ _bsddb = _pybsddb
+ from bsddb3.dbutils import DeadlockWrap as _DeadlockWrap
+ else:
+ import _bsddb
+ from bsddb.dbutils import DeadlockWrap as _DeadlockWrap
+except ImportError:
+ # Remove ourselves from sys.modules
+ import sys
+ del sys.modules[__name__]
+ raise
+
+# bsddb3 calls it db, but provide _db for backwards compatibility
+db = _db = _bsddb
+__version__ = db.__version__
+
+error = db.DBError # So bsddb.error will mean something...
+
+#----------------------------------------------------------------------
+
+import sys, os
+
+from weakref import ref
+
+if sys.version_info < (2, 6) :
+ import UserDict
+ MutableMapping = UserDict.DictMixin
+else :
+ import collections
+ MutableMapping = collections.MutableMapping
+
+class _iter_mixin(MutableMapping):
+ def _make_iter_cursor(self):
+ cur = _DeadlockWrap(self.db.cursor)
+ key = id(cur)
+ self._cursor_refs[key] = ref(cur, self._gen_cref_cleaner(key))
+ return cur
+
+ def _gen_cref_cleaner(self, key):
+ # use generate the function for the weakref callback here
+ # to ensure that we do not hold a strict reference to cur
+ # in the callback.
+ return lambda ref: self._cursor_refs.pop(key, None)
+
+ def __iter__(self):
+ self._kill_iteration = False
+ self._in_iter += 1
+ try:
+ try:
+ cur = self._make_iter_cursor()
+
+ # FIXME-20031102-greg: race condition. cursor could
+ # be closed by another thread before this call.
+
+ # since we're only returning keys, we call the cursor
+ # methods with flags=0, dlen=0, dofs=0
+ key = _DeadlockWrap(cur.first, 0,0,0)[0]
+ yield key
+
+ next = getattr(cur, "next")
+ while 1:
+ try:
+ key = _DeadlockWrap(next, 0,0,0)[0]
+ yield key
+ except _bsddb.DBCursorClosedError:
+ if self._kill_iteration:
+ raise RuntimeError('Database changed size '
+ 'during iteration.')
+ cur = self._make_iter_cursor()
+ # FIXME-20031101-greg: race condition. cursor could
+ # be closed by another thread before this call.
+ _DeadlockWrap(cur.set, key,0,0,0)
+ next = getattr(cur, "next")
+ except _bsddb.DBNotFoundError:
+ pass
+ except _bsddb.DBCursorClosedError:
+ # the database was modified during iteration. abort.
+ pass
+# When Python 2.4 not supported in bsddb3, we can change this to "finally"
+ except :
+ self._in_iter -= 1
+ raise
+
+ self._in_iter -= 1
+
+ def iteritems(self):
+ if not self.db:
+ return
+ self._kill_iteration = False
+ self._in_iter += 1
+ try:
+ try:
+ cur = self._make_iter_cursor()
+
+ # FIXME-20031102-greg: race condition. cursor could
+ # be closed by another thread before this call.
+
+ kv = _DeadlockWrap(cur.first)
+ key = kv[0]
+ yield kv
+
+ next = getattr(cur, "next")
+ while 1:
+ try:
+ kv = _DeadlockWrap(next)
+ key = kv[0]
+ yield kv
+ except _bsddb.DBCursorClosedError:
+ if self._kill_iteration:
+ raise RuntimeError('Database changed size '
+ 'during iteration.')
+ cur = self._make_iter_cursor()
+ # FIXME-20031101-greg: race condition. cursor could
+ # be closed by another thread before this call.
+ _DeadlockWrap(cur.set, key,0,0,0)
+ next = getattr(cur, "next")
+ except _bsddb.DBNotFoundError:
+ pass
+ except _bsddb.DBCursorClosedError:
+ # the database was modified during iteration. abort.
+ pass
+# When Python 2.4 not supported in bsddb3, we can change this to "finally"
+ except :
+ self._in_iter -= 1
+ raise
+
+ self._in_iter -= 1
+
+
+class _DBWithCursor(_iter_mixin):
+ """
+ A simple wrapper around DB that makes it look like the bsddbobject in
+ the old module. It uses a cursor as needed to provide DB traversal.
+ """
+ def __init__(self, db):
+ self.db = db
+ self.db.set_get_returns_none(0)
+
+ # FIXME-20031101-greg: I believe there is still the potential
+ # for deadlocks in a multithreaded environment if someone
+ # attempts to use the any of the cursor interfaces in one
+ # thread while doing a put or delete in another thread. The
+ # reason is that _checkCursor and _closeCursors are not atomic
+ # operations. Doing our own locking around self.dbc,
+ # self.saved_dbc_key and self._cursor_refs could prevent this.
+ # TODO: A test case demonstrating the problem needs to be written.
+
+ # self.dbc is a DBCursor object used to implement the
+ # first/next/previous/last/set_location methods.
+ self.dbc = None
+ self.saved_dbc_key = None
+
+ # a collection of all DBCursor objects currently allocated
+ # by the _iter_mixin interface.
+ self._cursor_refs = {}
+ self._in_iter = 0
+ self._kill_iteration = False
+
+ def __del__(self):
+ self.close()
+
+ def _checkCursor(self):
+ if self.dbc is None:
+ self.dbc = _DeadlockWrap(self.db.cursor)
+ if self.saved_dbc_key is not None:
+ _DeadlockWrap(self.dbc.set, self.saved_dbc_key)
+ self.saved_dbc_key = None
+
+ # This method is needed for all non-cursor DB calls to avoid
+ # Berkeley DB deadlocks (due to being opened with DB_INIT_LOCK
+ # and DB_THREAD to be thread safe) when intermixing database
+ # operations that use the cursor internally with those that don't.
+ def _closeCursors(self, save=1):
+ if self.dbc:
+ c = self.dbc
+ self.dbc = None
+ if save:
+ try:
+ self.saved_dbc_key = _DeadlockWrap(c.current, 0,0,0)[0]
+ except db.DBError:
+ pass
+ _DeadlockWrap(c.close)
+ del c
+ for cref in self._cursor_refs.values():
+ c = cref()
+ if c is not None:
+ _DeadlockWrap(c.close)
+
+ def _checkOpen(self):
+ if self.db is None:
+ raise error, "BSDDB object has already been closed"
+
+ def isOpen(self):
+ return self.db is not None
+
+ def __len__(self):
+ self._checkOpen()
+ return _DeadlockWrap(lambda: len(self.db)) # len(self.db)
+
+ if sys.version_info >= (2, 6) :
+ def __repr__(self) :
+ if self.isOpen() :
+ return repr(dict(_DeadlockWrap(self.db.items)))
+ return repr(dict())
+
+ def __getitem__(self, key):
+ self._checkOpen()
+ return _DeadlockWrap(lambda: self.db[key]) # self.db[key]
+
+ def __setitem__(self, key, value):
+ self._checkOpen()
+ self._closeCursors()
+ if self._in_iter and key not in self:
+ self._kill_iteration = True
+ def wrapF():
+ self.db[key] = value
+ _DeadlockWrap(wrapF) # self.db[key] = value
+
+ def __delitem__(self, key):
+ self._checkOpen()
+ self._closeCursors()
+ if self._in_iter and key in self:
+ self._kill_iteration = True
+ def wrapF():
+ del self.db[key]
+ _DeadlockWrap(wrapF) # del self.db[key]
+
+ def close(self):
+ self._closeCursors(save=0)
+ if self.dbc is not None:
+ _DeadlockWrap(self.dbc.close)
+ v = 0
+ if self.db is not None:
+ v = _DeadlockWrap(self.db.close)
+ self.dbc = None
+ self.db = None
+ return v
+
+ def keys(self):
+ self._checkOpen()
+ return _DeadlockWrap(self.db.keys)
+
+ def has_key(self, key):
+ self._checkOpen()
+ return _DeadlockWrap(self.db.has_key, key)
+
+ def set_location(self, key):
+ self._checkOpen()
+ self._checkCursor()
+ return _DeadlockWrap(self.dbc.set_range, key)
+
+ def next(self): # Renamed by "2to3"
+ self._checkOpen()
+ self._checkCursor()
+ rv = _DeadlockWrap(getattr(self.dbc, "next"))
+ return rv
+
+ if sys.version_info[0] >= 3 : # For "2to3" conversion
+ next = __next__
+
+ def previous(self):
+ self._checkOpen()
+ self._checkCursor()
+ rv = _DeadlockWrap(self.dbc.prev)
+ return rv
+
+ def first(self):
+ self._checkOpen()
+ # fix 1725856: don't needlessly try to restore our cursor position
+ self.saved_dbc_key = None
+ self._checkCursor()
+ rv = _DeadlockWrap(self.dbc.first)
+ return rv
+
+ def last(self):
+ self._checkOpen()
+ # fix 1725856: don't needlessly try to restore our cursor position
+ self.saved_dbc_key = None
+ self._checkCursor()
+ rv = _DeadlockWrap(self.dbc.last)
+ return rv
+
+ def sync(self):
+ self._checkOpen()
+ return _DeadlockWrap(self.db.sync)
+
+
+#----------------------------------------------------------------------
+# Compatibility object factory functions
+
+def hashopen(file, flag='c', mode=0666, pgsize=None, ffactor=None, nelem=None,
+ cachesize=None, lorder=None, hflags=0):
+
+ flags = _checkflag(flag, file)
+ e = _openDBEnv(cachesize)
+ d = db.DB(e)
+ d.set_flags(hflags)
+ if pgsize is not None: d.set_pagesize(pgsize)
+ if lorder is not None: d.set_lorder(lorder)
+ if ffactor is not None: d.set_h_ffactor(ffactor)
+ if nelem is not None: d.set_h_nelem(nelem)
+ d.open(file, db.DB_HASH, flags, mode)
+ return _DBWithCursor(d)
+
+#----------------------------------------------------------------------
+
+def btopen(file, flag='c', mode=0666,
+ btflags=0, cachesize=None, maxkeypage=None, minkeypage=None,
+ pgsize=None, lorder=None):
+
+ flags = _checkflag(flag, file)
+ e = _openDBEnv(cachesize)
+ d = db.DB(e)
+ if pgsize is not None: d.set_pagesize(pgsize)
+ if lorder is not None: d.set_lorder(lorder)
+ d.set_flags(btflags)
+ if minkeypage is not None: d.set_bt_minkey(minkeypage)
+ if maxkeypage is not None: d.set_bt_maxkey(maxkeypage)
+ d.open(file, db.DB_BTREE, flags, mode)
+ return _DBWithCursor(d)
+
+#----------------------------------------------------------------------
+
+
+def rnopen(file, flag='c', mode=0666,
+ rnflags=0, cachesize=None, pgsize=None, lorder=None,
+ rlen=None, delim=None, source=None, pad=None):
+
+ flags = _checkflag(flag, file)
+ e = _openDBEnv(cachesize)
+ d = db.DB(e)
+ if pgsize is not None: d.set_pagesize(pgsize)
+ if lorder is not None: d.set_lorder(lorder)
+ d.set_flags(rnflags)
+ if delim is not None: d.set_re_delim(delim)
+ if rlen is not None: d.set_re_len(rlen)
+ if source is not None: d.set_re_source(source)
+ if pad is not None: d.set_re_pad(pad)
+ d.open(file, db.DB_RECNO, flags, mode)
+ return _DBWithCursor(d)
+
+#----------------------------------------------------------------------
+
+def _openDBEnv(cachesize):
+ e = db.DBEnv()
+ if cachesize is not None:
+ if cachesize >= 20480:
+ e.set_cachesize(0, cachesize)
+ else:
+ raise error, "cachesize must be >= 20480"
+ e.set_lk_detect(db.DB_LOCK_DEFAULT)
+ e.open('.', db.DB_PRIVATE | db.DB_CREATE | db.DB_THREAD | db.DB_INIT_LOCK | db.DB_INIT_MPOOL)
+ return e
+
+def _checkflag(flag, file):
+ if flag == 'r':
+ flags = db.DB_RDONLY
+ elif flag == 'rw':
+ flags = 0
+ elif flag == 'w':
+ flags = db.DB_CREATE
+ elif flag == 'c':
+ flags = db.DB_CREATE
+ elif flag == 'n':
+ flags = db.DB_CREATE
+ #flags = db.DB_CREATE | db.DB_TRUNCATE
+ # we used db.DB_TRUNCATE flag for this before but Berkeley DB
+ # 4.2.52 changed to disallowed truncate with txn environments.
+ if file is not None and os.path.isfile(file):
+ os.unlink(file)
+ else:
+ raise error, "flags should be one of 'r', 'w', 'c' or 'n'"
+ return flags | db.DB_THREAD
+
+#----------------------------------------------------------------------
+
+
+# This is a silly little hack that allows apps to continue to use the
+# DB_THREAD flag even on systems without threads without freaking out
+# Berkeley DB.
+#
+# This assumes that if Python was built with thread support then
+# Berkeley DB was too.
+
+try:
+ # 2to3 automatically changes "import thread" to "import _thread"
+ import thread as T
+ del T
+
+except ImportError:
+ db.DB_THREAD = 0
+
+#----------------------------------------------------------------------
diff --git a/lib/python2.7/bsddb/db.py b/lib/python2.7/bsddb/db.py
new file mode 100644
index 0000000..c3aee30
--- /dev/null
+++ b/lib/python2.7/bsddb/db.py
@@ -0,0 +1,60 @@
+#----------------------------------------------------------------------
+# Copyright (c) 1999-2001, Digital Creations, Fredericksburg, VA, USA
+# and Andrew Kuchling. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# o Redistributions of source code must retain the above copyright
+# notice, this list of conditions, and the disclaimer that follows.
+#
+# o Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions, and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+#
+# o Neither the name of Digital Creations nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY DIGITAL CREATIONS AND CONTRIBUTORS *AS
+# IS* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+# TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
+# PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL DIGITAL
+# CREATIONS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
+# OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
+# TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
+# DAMAGE.
+#----------------------------------------------------------------------
+
+
+# This module is just a placeholder for possible future expansion, in
+# case we ever want to augment the stuff in _db in any way. For now
+# it just simply imports everything from _db.
+
+import sys
+absolute_import = (sys.version_info[0] >= 3)
+
+if not absolute_import :
+ if __name__.startswith('bsddb3.') :
+ # import _pybsddb binary as it should be the more recent version from
+ # a standalone pybsddb addon package than the version included with
+ # python as bsddb._bsddb.
+ from _pybsddb import *
+ from _pybsddb import __version__
+ else:
+ from _bsddb import *
+ from _bsddb import __version__
+else :
+ # Because this syntaxis is not valid before Python 2.5
+ if __name__.startswith('bsddb3.') :
+ exec("from ._pybsddb import *")
+ exec("from ._pybsddb import __version__")
+ else :
+ exec("from ._bsddb import *")
+ exec("from ._bsddb import __version__")
diff --git a/lib/python2.7/bsddb/dbobj.py b/lib/python2.7/bsddb/dbobj.py
new file mode 100644
index 0000000..1400fe1
--- /dev/null
+++ b/lib/python2.7/bsddb/dbobj.py
@@ -0,0 +1,266 @@
+#-------------------------------------------------------------------------
+# This file contains real Python object wrappers for DB and DBEnv
+# C "objects" that can be usefully subclassed. The previous SWIG
+# based interface allowed this thanks to SWIG's shadow classes.
+# -- Gregory P. Smith
+#-------------------------------------------------------------------------
+#
+# (C) Copyright 2001 Autonomous Zone Industries
+#
+# License: This is free software. You may use this software for any
+# purpose including modification/redistribution, so long as
+# this header remains intact and that you do not claim any
+# rights of ownership or authorship of this software. This
+# software has been tested, but no warranty is expressed or
+# implied.
+#
+
+#
+# TODO it would be *really nice* to have an automatic shadow class populator
+# so that new methods don't need to be added here manually after being
+# added to _bsddb.c.
+#
+
+import sys
+absolute_import = (sys.version_info[0] >= 3)
+if absolute_import :
+ # Because this syntaxis is not valid before Python 2.5
+ exec("from . import db")
+else :
+ import db
+
+if sys.version_info < (2, 6) :
+ from UserDict import DictMixin as MutableMapping
+else :
+ import collections
+ MutableMapping = collections.MutableMapping
+
+class DBEnv:
+ def __init__(self, *args, **kwargs):
+ self._cobj = db.DBEnv(*args, **kwargs)
+
+ def close(self, *args, **kwargs):
+ return self._cobj.close(*args, **kwargs)
+ def open(self, *args, **kwargs):
+ return self._cobj.open(*args, **kwargs)
+ def remove(self, *args, **kwargs):
+ return self._cobj.remove(*args, **kwargs)
+ def set_shm_key(self, *args, **kwargs):
+ return self._cobj.set_shm_key(*args, **kwargs)
+ def set_cachesize(self, *args, **kwargs):
+ return self._cobj.set_cachesize(*args, **kwargs)
+ def set_data_dir(self, *args, **kwargs):
+ return self._cobj.set_data_dir(*args, **kwargs)
+ def set_flags(self, *args, **kwargs):
+ return self._cobj.set_flags(*args, **kwargs)
+ def set_lg_bsize(self, *args, **kwargs):
+ return self._cobj.set_lg_bsize(*args, **kwargs)
+ def set_lg_dir(self, *args, **kwargs):
+ return self._cobj.set_lg_dir(*args, **kwargs)
+ def set_lg_max(self, *args, **kwargs):
+ return self._cobj.set_lg_max(*args, **kwargs)
+ def set_lk_detect(self, *args, **kwargs):
+ return self._cobj.set_lk_detect(*args, **kwargs)
+ if db.version() < (4,5):
+ def set_lk_max(self, *args, **kwargs):
+ return self._cobj.set_lk_max(*args, **kwargs)
+ def set_lk_max_locks(self, *args, **kwargs):
+ return self._cobj.set_lk_max_locks(*args, **kwargs)
+ def set_lk_max_lockers(self, *args, **kwargs):
+ return self._cobj.set_lk_max_lockers(*args, **kwargs)
+ def set_lk_max_objects(self, *args, **kwargs):
+ return self._cobj.set_lk_max_objects(*args, **kwargs)
+ def set_mp_mmapsize(self, *args, **kwargs):
+ return self._cobj.set_mp_mmapsize(*args, **kwargs)
+ def set_timeout(self, *args, **kwargs):
+ return self._cobj.set_timeout(*args, **kwargs)
+ def set_tmp_dir(self, *args, **kwargs):
+ return self._cobj.set_tmp_dir(*args, **kwargs)
+ def txn_begin(self, *args, **kwargs):
+ return self._cobj.txn_begin(*args, **kwargs)
+ def txn_checkpoint(self, *args, **kwargs):
+ return self._cobj.txn_checkpoint(*args, **kwargs)
+ def txn_stat(self, *args, **kwargs):
+ return self._cobj.txn_stat(*args, **kwargs)
+ def set_tx_max(self, *args, **kwargs):
+ return self._cobj.set_tx_max(*args, **kwargs)
+ def set_tx_timestamp(self, *args, **kwargs):
+ return self._cobj.set_tx_timestamp(*args, **kwargs)
+ def lock_detect(self, *args, **kwargs):
+ return self._cobj.lock_detect(*args, **kwargs)
+ def lock_get(self, *args, **kwargs):
+ return self._cobj.lock_get(*args, **kwargs)
+ def lock_id(self, *args, **kwargs):
+ return self._cobj.lock_id(*args, **kwargs)
+ def lock_put(self, *args, **kwargs):
+ return self._cobj.lock_put(*args, **kwargs)
+ def lock_stat(self, *args, **kwargs):
+ return self._cobj.lock_stat(*args, **kwargs)
+ def log_archive(self, *args, **kwargs):
+ return self._cobj.log_archive(*args, **kwargs)
+
+ def set_get_returns_none(self, *args, **kwargs):
+ return self._cobj.set_get_returns_none(*args, **kwargs)
+
+ def log_stat(self, *args, **kwargs):
+ return self._cobj.log_stat(*args, **kwargs)
+
+ def dbremove(self, *args, **kwargs):
+ return self._cobj.dbremove(*args, **kwargs)
+ def dbrename(self, *args, **kwargs):
+ return self._cobj.dbrename(*args, **kwargs)
+ def set_encrypt(self, *args, **kwargs):
+ return self._cobj.set_encrypt(*args, **kwargs)
+
+ if db.version() >= (4,4):
+ def fileid_reset(self, *args, **kwargs):
+ return self._cobj.fileid_reset(*args, **kwargs)
+
+ def lsn_reset(self, *args, **kwargs):
+ return self._cobj.lsn_reset(*args, **kwargs)
+
+
+class DB(MutableMapping):
+ def __init__(self, dbenv, *args, **kwargs):
+ # give it the proper DBEnv C object that its expecting
+ self._cobj = db.DB(*((dbenv._cobj,) + args), **kwargs)
+
+ # TODO are there other dict methods that need to be overridden?
+ def __len__(self):
+ return len(self._cobj)
+ def __getitem__(self, arg):
+ return self._cobj[arg]
+ def __setitem__(self, key, value):
+ self._cobj[key] = value
+ def __delitem__(self, arg):
+ del self._cobj[arg]
+
+ if sys.version_info >= (2, 6) :
+ def __iter__(self) :
+ return self._cobj.__iter__()
+
+ def append(self, *args, **kwargs):
+ return self._cobj.append(*args, **kwargs)
+ def associate(self, *args, **kwargs):
+ return self._cobj.associate(*args, **kwargs)
+ def close(self, *args, **kwargs):
+ return self._cobj.close(*args, **kwargs)
+ def consume(self, *args, **kwargs):
+ return self._cobj.consume(*args, **kwargs)
+ def consume_wait(self, *args, **kwargs):
+ return self._cobj.consume_wait(*args, **kwargs)
+ def cursor(self, *args, **kwargs):
+ return self._cobj.cursor(*args, **kwargs)
+ def delete(self, *args, **kwargs):
+ return self._cobj.delete(*args, **kwargs)
+ def fd(self, *args, **kwargs):
+ return self._cobj.fd(*args, **kwargs)
+ def get(self, *args, **kwargs):
+ return self._cobj.get(*args, **kwargs)
+ def pget(self, *args, **kwargs):
+ return self._cobj.pget(*args, **kwargs)
+ def get_both(self, *args, **kwargs):
+ return self._cobj.get_both(*args, **kwargs)
+ def get_byteswapped(self, *args, **kwargs):
+ return self._cobj.get_byteswapped(*args, **kwargs)
+ def get_size(self, *args, **kwargs):
+ return self._cobj.get_size(*args, **kwargs)
+ def get_type(self, *args, **kwargs):
+ return self._cobj.get_type(*args, **kwargs)
+ def join(self, *args, **kwargs):
+ return self._cobj.join(*args, **kwargs)
+ def key_range(self, *args, **kwargs):
+ return self._cobj.key_range(*args, **kwargs)
+ def has_key(self, *args, **kwargs):
+ return self._cobj.has_key(*args, **kwargs)
+ def items(self, *args, **kwargs):
+ return self._cobj.items(*args, **kwargs)
+ def keys(self, *args, **kwargs):
+ return self._cobj.keys(*args, **kwargs)
+ def open(self, *args, **kwargs):
+ return self._cobj.open(*args, **kwargs)
+ def put(self, *args, **kwargs):
+ return self._cobj.put(*args, **kwargs)
+ def remove(self, *args, **kwargs):
+ return self._cobj.remove(*args, **kwargs)
+ def rename(self, *args, **kwargs):
+ return self._cobj.rename(*args, **kwargs)
+ def set_bt_minkey(self, *args, **kwargs):
+ return self._cobj.set_bt_minkey(*args, **kwargs)
+ def set_bt_compare(self, *args, **kwargs):
+ return self._cobj.set_bt_compare(*args, **kwargs)
+ def set_cachesize(self, *args, **kwargs):
+ return self._cobj.set_cachesize(*args, **kwargs)
+ def set_dup_compare(self, *args, **kwargs) :
+ return self._cobj.set_dup_compare(*args, **kwargs)
+ def set_flags(self, *args, **kwargs):
+ return self._cobj.set_flags(*args, **kwargs)
+ def set_h_ffactor(self, *args, **kwargs):
+ return self._cobj.set_h_ffactor(*args, **kwargs)
+ def set_h_nelem(self, *args, **kwargs):
+ return self._cobj.set_h_nelem(*args, **kwargs)
+ def set_lorder(self, *args, **kwargs):
+ return self._cobj.set_lorder(*args, **kwargs)
+ def set_pagesize(self, *args, **kwargs):
+ return self._cobj.set_pagesize(*args, **kwargs)
+ def set_re_delim(self, *args, **kwargs):
+ return self._cobj.set_re_delim(*args, **kwargs)
+ def set_re_len(self, *args, **kwargs):
+ return self._cobj.set_re_len(*args, **kwargs)
+ def set_re_pad(self, *args, **kwargs):
+ return self._cobj.set_re_pad(*args, **kwargs)
+ def set_re_source(self, *args, **kwargs):
+ return self._cobj.set_re_source(*args, **kwargs)
+ def set_q_extentsize(self, *args, **kwargs):
+ return self._cobj.set_q_extentsize(*args, **kwargs)
+ def stat(self, *args, **kwargs):
+ return self._cobj.stat(*args, **kwargs)
+ def sync(self, *args, **kwargs):
+ return self._cobj.sync(*args, **kwargs)
+ def type(self, *args, **kwargs):
+ return self._cobj.type(*args, **kwargs)
+ def upgrade(self, *args, **kwargs):
+ return self._cobj.upgrade(*args, **kwargs)
+ def values(self, *args, **kwargs):
+ return self._cobj.values(*args, **kwargs)
+ def verify(self, *args, **kwargs):
+ return self._cobj.verify(*args, **kwargs)
+ def set_get_returns_none(self, *args, **kwargs):
+ return self._cobj.set_get_returns_none(*args, **kwargs)
+
+ def set_encrypt(self, *args, **kwargs):
+ return self._cobj.set_encrypt(*args, **kwargs)
+
+
+class DBSequence:
+ def __init__(self, *args, **kwargs):
+ self._cobj = db.DBSequence(*args, **kwargs)
+
+ def close(self, *args, **kwargs):
+ return self._cobj.close(*args, **kwargs)
+ def get(self, *args, **kwargs):
+ return self._cobj.get(*args, **kwargs)
+ def get_dbp(self, *args, **kwargs):
+ return self._cobj.get_dbp(*args, **kwargs)
+ def get_key(self, *args, **kwargs):
+ return self._cobj.get_key(*args, **kwargs)
+ def init_value(self, *args, **kwargs):
+ return self._cobj.init_value(*args, **kwargs)
+ def open(self, *args, **kwargs):
+ return self._cobj.open(*args, **kwargs)
+ def remove(self, *args, **kwargs):
+ return self._cobj.remove(*args, **kwargs)
+ def stat(self, *args, **kwargs):
+ return self._cobj.stat(*args, **kwargs)
+ def set_cachesize(self, *args, **kwargs):
+ return self._cobj.set_cachesize(*args, **kwargs)
+ def set_flags(self, *args, **kwargs):
+ return self._cobj.set_flags(*args, **kwargs)
+ def set_range(self, *args, **kwargs):
+ return self._cobj.set_range(*args, **kwargs)
+ def get_cachesize(self, *args, **kwargs):
+ return self._cobj.get_cachesize(*args, **kwargs)
+ def get_flags(self, *args, **kwargs):
+ return self._cobj.get_flags(*args, **kwargs)
+ def get_range(self, *args, **kwargs):
+ return self._cobj.get_range(*args, **kwargs)
diff --git a/lib/python2.7/bsddb/dbrecio.py b/lib/python2.7/bsddb/dbrecio.py
new file mode 100644
index 0000000..d439f32
--- /dev/null
+++ b/lib/python2.7/bsddb/dbrecio.py
@@ -0,0 +1,190 @@
+
+"""
+File-like objects that read from or write to a bsddb record.
+
+This implements (nearly) all stdio methods.
+
+f = DBRecIO(db, key, txn=None)
+f.close() # explicitly release resources held
+flag = f.isatty() # always false
+pos = f.tell() # get current position
+f.seek(pos) # set current position
+f.seek(pos, mode) # mode 0: absolute; 1: relative; 2: relative to EOF
+buf = f.read() # read until EOF
+buf = f.read(n) # read up to n bytes
+f.truncate([size]) # truncate file at to at most size (default: current pos)
+f.write(buf) # write at current position
+f.writelines(list) # for line in list: f.write(line)
+
+Notes:
+- fileno() is left unimplemented so that code which uses it triggers
+ an exception early.
+- There's a simple test set (see end of this file) - not yet updated
+ for DBRecIO.
+- readline() is not implemented yet.
+
+
+From:
+ Itamar Shtull-Trauring <itamar@maxnm.com>
+"""
+
+import errno
+import string
+
+class DBRecIO:
+ def __init__(self, db, key, txn=None):
+ self.db = db
+ self.key = key
+ self.txn = txn
+ self.len = None
+ self.pos = 0
+ self.closed = 0
+ self.softspace = 0
+
+ def close(self):
+ if not self.closed:
+ self.closed = 1
+ del self.db, self.txn
+
+ def isatty(self):
+ if self.closed:
+ raise ValueError, "I/O operation on closed file"
+ return 0
+
+ def seek(self, pos, mode = 0):
+ if self.closed:
+ raise ValueError, "I/O operation on closed file"
+ if mode == 1:
+ pos = pos + self.pos
+ elif mode == 2:
+ pos = pos + self.len
+ self.pos = max(0, pos)
+
+ def tell(self):
+ if self.closed:
+ raise ValueError, "I/O operation on closed file"
+ return self.pos
+
+ def read(self, n = -1):
+ if self.closed:
+ raise ValueError, "I/O operation on closed file"
+ if n < 0:
+ newpos = self.len
+ else:
+ newpos = min(self.pos+n, self.len)
+
+ dlen = newpos - self.pos
+
+ r = self.db.get(self.key, txn=self.txn, dlen=dlen, doff=self.pos)
+ self.pos = newpos
+ return r
+
+ __fixme = """
+ def readline(self, length=None):
+ if self.closed:
+ raise ValueError, "I/O operation on closed file"
+ if self.buflist:
+ self.buf = self.buf + string.joinfields(self.buflist, '')
+ self.buflist = []
+ i = string.find(self.buf, '\n', self.pos)
+ if i < 0:
+ newpos = self.len
+ else:
+ newpos = i+1
+ if length is not None:
+ if self.pos + length < newpos:
+ newpos = self.pos + length
+ r = self.buf[self.pos:newpos]
+ self.pos = newpos
+ return r
+
+ def readlines(self, sizehint = 0):
+ total = 0
+ lines = []
+ line = self.readline()
+ while line:
+ lines.append(line)
+ total += len(line)
+ if 0 < sizehint <= total:
+ break
+ line = self.readline()
+ return lines
+ """
+
+ def truncate(self, size=None):
+ if self.closed:
+ raise ValueError, "I/O operation on closed file"
+ if size is None:
+ size = self.pos
+ elif size < 0:
+ raise IOError(errno.EINVAL,
+ "Negative size not allowed")
+ elif size < self.pos:
+ self.pos = size
+ self.db.put(self.key, "", txn=self.txn, dlen=self.len-size, doff=size)
+
+ def write(self, s):
+ if self.closed:
+ raise ValueError, "I/O operation on closed file"
+ if not s: return
+ if self.pos > self.len:
+ self.buflist.append('\0'*(self.pos - self.len))
+ self.len = self.pos
+ newpos = self.pos + len(s)
+ self.db.put(self.key, s, txn=self.txn, dlen=len(s), doff=self.pos)
+ self.pos = newpos
+
+ def writelines(self, list):
+ self.write(string.joinfields(list, ''))
+
+ def flush(self):
+ if self.closed:
+ raise ValueError, "I/O operation on closed file"
+
+
+"""
+# A little test suite
+
+def _test():
+ import sys
+ if sys.argv[1:]:
+ file = sys.argv[1]
+ else:
+ file = '/etc/passwd'
+ lines = open(file, 'r').readlines()
+ text = open(file, 'r').read()
+ f = StringIO()
+ for line in lines[:-2]:
+ f.write(line)
+ f.writelines(lines[-2:])
+ if f.getvalue() != text:
+ raise RuntimeError, 'write failed'
+ length = f.tell()
+ print 'File length =', length
+ f.seek(len(lines[0]))
+ f.write(lines[1])
+ f.seek(0)
+ print 'First line =', repr(f.readline())
+ here = f.tell()
+ line = f.readline()
+ print 'Second line =', repr(line)
+ f.seek(-len(line), 1)
+ line2 = f.read(len(line))
+ if line != line2:
+ raise RuntimeError, 'bad result after seek back'
+ f.seek(len(line2), 1)
+ list = f.readlines()
+ line = list[-1]
+ f.seek(f.tell() - len(line))
+ line2 = f.read()
+ if line != line2:
+ raise RuntimeError, 'bad result after seek back from EOF'
+ print 'Read', len(list), 'more lines'
+ print 'File length =', f.tell()
+ if f.tell() != length:
+ raise RuntimeError, 'bad length'
+ f.close()
+
+if __name__ == '__main__':
+ _test()
+"""
diff --git a/lib/python2.7/bsddb/dbshelve.py b/lib/python2.7/bsddb/dbshelve.py
new file mode 100644
index 0000000..7d0daa2
--- /dev/null
+++ b/lib/python2.7/bsddb/dbshelve.py
@@ -0,0 +1,381 @@
+#------------------------------------------------------------------------
+# Copyright (c) 1997-2001 by Total Control Software
+# All Rights Reserved
+#------------------------------------------------------------------------
+#
+# Module Name: dbShelve.py
+#
+# Description: A reimplementation of the standard shelve.py that
+# forces the use of cPickle, and DB.
+#
+# Creation Date: 11/3/97 3:39:04PM
+#
+# License: This is free software. You may use this software for any
+# purpose including modification/redistribution, so long as
+# this header remains intact and that you do not claim any
+# rights of ownership or authorship of this software. This
+# software has been tested, but no warranty is expressed or
+# implied.
+#
+# 13-Dec-2000: Updated to be used with the new bsddb3 package.
+# Added DBShelfCursor class.
+#
+#------------------------------------------------------------------------
+
+"""Manage shelves of pickled objects using bsddb database files for the
+storage.
+"""
+
+#------------------------------------------------------------------------
+
+import sys
+absolute_import = (sys.version_info[0] >= 3)
+if absolute_import :
+ # Because this syntaxis is not valid before Python 2.5
+ exec("from . import db")
+else :
+ import db
+
+if sys.version_info[0] >= 3 :
+ import cPickle # Will be converted to "pickle" by "2to3"
+else :
+ if sys.version_info < (2, 6) :
+ import cPickle
+ else :
+ # When we drop support for python 2.4
+ # we could use: (in 2.5 we need a __future__ statement)
+ #
+ # with warnings.catch_warnings():
+ # warnings.filterwarnings(...)
+ # ...
+ #
+ # We can not use "with" as is, because it would be invalid syntax
+ # in python 2.4 and (with no __future__) 2.5.
+ # Here we simulate "with" following PEP 343 :
+ import warnings
+ w = warnings.catch_warnings()
+ w.__enter__()
+ try :
+ warnings.filterwarnings('ignore',
+ message='the cPickle module has been removed in Python 3.0',
+ category=DeprecationWarning)
+ import cPickle
+ finally :
+ w.__exit__()
+ del w
+
+HIGHEST_PROTOCOL = cPickle.HIGHEST_PROTOCOL
+def _dumps(object, protocol):
+ return cPickle.dumps(object, protocol=protocol)
+
+if sys.version_info < (2, 6) :
+ from UserDict import DictMixin as MutableMapping
+else :
+ import collections
+ MutableMapping = collections.MutableMapping
+
+#------------------------------------------------------------------------
+
+
+def open(filename, flags=db.DB_CREATE, mode=0660, filetype=db.DB_HASH,
+ dbenv=None, dbname=None):
+ """
+ A simple factory function for compatibility with the standard
+ shleve.py module. It can be used like this, where key is a string
+ and data is a pickleable object:
+
+ from bsddb import dbshelve
+ db = dbshelve.open(filename)
+
+ db[key] = data
+
+ db.close()
+ """
+ if type(flags) == type(''):
+ sflag = flags
+ if sflag == 'r':
+ flags = db.DB_RDONLY
+ elif sflag == 'rw':
+ flags = 0
+ elif sflag == 'w':
+ flags = db.DB_CREATE
+ elif sflag == 'c':
+ flags = db.DB_CREATE
+ elif sflag == 'n':
+ flags = db.DB_TRUNCATE | db.DB_CREATE
+ else:
+ raise db.DBError, "flags should be one of 'r', 'w', 'c' or 'n' or use the bsddb.db.DB_* flags"
+
+ d = DBShelf(dbenv)
+ d.open(filename, dbname, filetype, flags, mode)
+ return d
+
+#---------------------------------------------------------------------------
+
+class DBShelveError(db.DBError): pass
+
+
+class DBShelf(MutableMapping):
+ """A shelf to hold pickled objects, built upon a bsddb DB object. It
+ automatically pickles/unpickles data objects going to/from the DB.
+ """
+ def __init__(self, dbenv=None):
+ self.db = db.DB(dbenv)
+ self._closed = True
+ if HIGHEST_PROTOCOL:
+ self.protocol = HIGHEST_PROTOCOL
+ else:
+ self.protocol = 1
+
+
+ def __del__(self):
+ self.close()
+
+
+ def __getattr__(self, name):
+ """Many methods we can just pass through to the DB object.
+ (See below)
+ """
+ return getattr(self.db, name)
+
+
+ #-----------------------------------
+ # Dictionary access methods
+
+ def __len__(self):
+ return len(self.db)
+
+
+ def __getitem__(self, key):
+ data = self.db[key]
+ return cPickle.loads(data)
+
+
+ def __setitem__(self, key, value):
+ data = _dumps(value, self.protocol)
+ self.db[key] = data
+
+
+ def __delitem__(self, key):
+ del self.db[key]
+
+
+ def keys(self, txn=None):
+ if txn is not None:
+ return self.db.keys(txn)
+ else:
+ return self.db.keys()
+
+ if sys.version_info >= (2, 6) :
+ def __iter__(self) : # XXX: Load all keys in memory :-(
+ for k in self.db.keys() :
+ yield k
+
+ # Do this when "DB" support iteration
+ # Or is it enough to pass thru "getattr"?
+ #
+ # def __iter__(self) :
+ # return self.db.__iter__()
+
+
+ def open(self, *args, **kwargs):
+ self.db.open(*args, **kwargs)
+ self._closed = False
+
+
+ def close(self, *args, **kwargs):
+ self.db.close(*args, **kwargs)
+ self._closed = True
+
+
+ def __repr__(self):
+ if self._closed:
+ return '<DBShelf @ 0x%x - closed>' % (id(self))
+ else:
+ return repr(dict(self.iteritems()))
+
+
+ def items(self, txn=None):
+ if txn is not None:
+ items = self.db.items(txn)
+ else:
+ items = self.db.items()
+ newitems = []
+
+ for k, v in items:
+ newitems.append( (k, cPickle.loads(v)) )
+ return newitems
+
+ def values(self, txn=None):
+ if txn is not None:
+ values = self.db.values(txn)
+ else:
+ values = self.db.values()
+
+ return map(cPickle.loads, values)
+
+ #-----------------------------------
+ # Other methods
+
+ def __append(self, value, txn=None):
+ data = _dumps(value, self.protocol)
+ return self.db.append(data, txn)
+
+ def append(self, value, txn=None):
+ if self.get_type() == db.DB_RECNO:
+ return self.__append(value, txn=txn)
+ raise DBShelveError, "append() only supported when dbshelve opened with filetype=dbshelve.db.DB_RECNO"
+
+
+ def associate(self, secondaryDB, callback, flags=0):
+ def _shelf_callback(priKey, priData, realCallback=callback):
+ # Safe in Python 2.x because expresion short circuit
+ if sys.version_info[0] < 3 or isinstance(priData, bytes) :
+ data = cPickle.loads(priData)
+ else :
+ data = cPickle.loads(bytes(priData, "iso8859-1")) # 8 bits
+ return realCallback(priKey, data)
+
+ return self.db.associate(secondaryDB, _shelf_callback, flags)
+
+
+ #def get(self, key, default=None, txn=None, flags=0):
+ def get(self, *args, **kw):
+ # We do it with *args and **kw so if the default value wasn't
+ # given nothing is passed to the extension module. That way
+ # an exception can be raised if set_get_returns_none is turned
+ # off.
+ data = self.db.get(*args, **kw)
+ try:
+ return cPickle.loads(data)
+ except (EOFError, TypeError, cPickle.UnpicklingError):
+ return data # we may be getting the default value, or None,
+ # so it doesn't need unpickled.
+
+ def get_both(self, key, value, txn=None, flags=0):
+ data = _dumps(value, self.protocol)
+ data = self.db.get(key, data, txn, flags)
+ return cPickle.loads(data)
+
+
+ def cursor(self, txn=None, flags=0):
+ c = DBShelfCursor(self.db.cursor(txn, flags))
+ c.protocol = self.protocol
+ return c
+
+
+ def put(self, key, value, txn=None, flags=0):
+ data = _dumps(value, self.protocol)
+ return self.db.put(key, data, txn, flags)
+
+
+ def join(self, cursorList, flags=0):
+ raise NotImplementedError
+
+
+ #----------------------------------------------
+ # Methods allowed to pass-through to self.db
+ #
+ # close, delete, fd, get_byteswapped, get_type, has_key,
+ # key_range, open, remove, rename, stat, sync,
+ # upgrade, verify, and all set_* methods.
+
+
+#---------------------------------------------------------------------------
+
+class DBShelfCursor:
+ """
+ """
+ def __init__(self, cursor):
+ self.dbc = cursor
+
+ def __del__(self):
+ self.close()
+
+
+ def __getattr__(self, name):
+ """Some methods we can just pass through to the cursor object. (See below)"""
+ return getattr(self.dbc, name)
+
+
+ #----------------------------------------------
+
+ def dup(self, flags=0):
+ c = DBShelfCursor(self.dbc.dup(flags))
+ c.protocol = self.protocol
+ return c
+
+
+ def put(self, key, value, flags=0):
+ data = _dumps(value, self.protocol)
+ return self.dbc.put(key, data, flags)
+
+
+ def get(self, *args):
+ count = len(args) # a method overloading hack
+ method = getattr(self, 'get_%d' % count)
+ method(*args)
+
+ def get_1(self, flags):
+ rec = self.dbc.get(flags)
+ return self._extract(rec)
+
+ def get_2(self, key, flags):
+ rec = self.dbc.get(key, flags)
+ return self._extract(rec)
+
+ def get_3(self, key, value, flags):
+ data = _dumps(value, self.protocol)
+ rec = self.dbc.get(key, flags)
+ return self._extract(rec)
+
+
+ def current(self, flags=0): return self.get_1(flags|db.DB_CURRENT)
+ def first(self, flags=0): return self.get_1(flags|db.DB_FIRST)
+ def last(self, flags=0): return self.get_1(flags|db.DB_LAST)
+ def next(self, flags=0): return self.get_1(flags|db.DB_NEXT)
+ def prev(self, flags=0): return self.get_1(flags|db.DB_PREV)
+ def consume(self, flags=0): return self.get_1(flags|db.DB_CONSUME)
+ def next_dup(self, flags=0): return self.get_1(flags|db.DB_NEXT_DUP)
+ def next_nodup(self, flags=0): return self.get_1(flags|db.DB_NEXT_NODUP)
+ def prev_nodup(self, flags=0): return self.get_1(flags|db.DB_PREV_NODUP)
+
+
+ def get_both(self, key, value, flags=0):
+ data = _dumps(value, self.protocol)
+ rec = self.dbc.get_both(key, flags)
+ return self._extract(rec)
+
+
+ def set(self, key, flags=0):
+ rec = self.dbc.set(key, flags)
+ return self._extract(rec)
+
+ def set_range(self, key, flags=0):
+ rec = self.dbc.set_range(key, flags)
+ return self._extract(rec)
+
+ def set_recno(self, recno, flags=0):
+ rec = self.dbc.set_recno(recno, flags)
+ return self._extract(rec)
+
+ set_both = get_both
+
+ def _extract(self, rec):
+ if rec is None:
+ return None
+ else:
+ key, data = rec
+ # Safe in Python 2.x because expresion short circuit
+ if sys.version_info[0] < 3 or isinstance(data, bytes) :
+ return key, cPickle.loads(data)
+ else :
+ return key, cPickle.loads(bytes(data, "iso8859-1")) # 8 bits
+
+ #----------------------------------------------
+ # Methods allowed to pass-through to self.dbc
+ #
+ # close, count, delete, get_recno, join_item
+
+
+#---------------------------------------------------------------------------
diff --git a/lib/python2.7/bsddb/dbtables.py b/lib/python2.7/bsddb/dbtables.py
new file mode 100644
index 0000000..e8acdd0
--- /dev/null
+++ b/lib/python2.7/bsddb/dbtables.py
@@ -0,0 +1,843 @@
+#-----------------------------------------------------------------------
+#
+# Copyright (C) 2000, 2001 by Autonomous Zone Industries
+# Copyright (C) 2002 Gregory P. Smith
+#
+# License: This is free software. You may use this software for any
+# purpose including modification/redistribution, so long as
+# this header remains intact and that you do not claim any
+# rights of ownership or authorship of this software. This
+# software has been tested, but no warranty is expressed or
+# implied.
+#
+# -- Gregory P. Smith <greg@krypto.org>
+
+# This provides a simple database table interface built on top of
+# the Python Berkeley DB 3 interface.
+#
+_cvsid = '$Id$'
+
+import re
+import sys
+import copy
+import random
+import struct
+
+
+if sys.version_info[0] >= 3 :
+ import pickle
+else :
+ if sys.version_info < (2, 6) :
+ import cPickle as pickle
+ else :
+ # When we drop support for python 2.4
+ # we could use: (in 2.5 we need a __future__ statement)
+ #
+ # with warnings.catch_warnings():
+ # warnings.filterwarnings(...)
+ # ...
+ #
+ # We can not use "with" as is, because it would be invalid syntax
+ # in python 2.4 and (with no __future__) 2.5.
+ # Here we simulate "with" following PEP 343 :
+ import warnings
+ w = warnings.catch_warnings()
+ w.__enter__()
+ try :
+ warnings.filterwarnings('ignore',
+ message='the cPickle module has been removed in Python 3.0',
+ category=DeprecationWarning)
+ import cPickle as pickle
+ finally :
+ w.__exit__()
+ del w
+
+try:
+ # For Pythons w/distutils pybsddb
+ from bsddb3 import db
+except ImportError:
+ # For Python 2.3
+ from bsddb import db
+
+class TableDBError(StandardError):
+ pass
+class TableAlreadyExists(TableDBError):
+ pass
+
+
+class Cond:
+ """This condition matches everything"""
+ def __call__(self, s):
+ return 1
+
+class ExactCond(Cond):
+ """Acts as an exact match condition function"""
+ def __init__(self, strtomatch):
+ self.strtomatch = strtomatch
+ def __call__(self, s):
+ return s == self.strtomatch
+
+class PrefixCond(Cond):
+ """Acts as a condition function for matching a string prefix"""
+ def __init__(self, prefix):
+ self.prefix = prefix
+ def __call__(self, s):
+ return s[:len(self.prefix)] == self.prefix
+
+class PostfixCond(Cond):
+ """Acts as a condition function for matching a string postfix"""
+ def __init__(self, postfix):
+ self.postfix = postfix
+ def __call__(self, s):
+ return s[-len(self.postfix):] == self.postfix
+
+class LikeCond(Cond):
+ """
+ Acts as a function that will match using an SQL 'LIKE' style
+ string. Case insensitive and % signs are wild cards.
+ This isn't perfect but it should work for the simple common cases.
+ """
+ def __init__(self, likestr, re_flags=re.IGNORECASE):
+ # escape python re characters
+ chars_to_escape = '.*+()[]?'
+ for char in chars_to_escape :
+ likestr = likestr.replace(char, '\\'+char)
+ # convert %s to wildcards
+ self.likestr = likestr.replace('%', '.*')
+ self.re = re.compile('^'+self.likestr+'$', re_flags)
+ def __call__(self, s):
+ return self.re.match(s)
+
+#
+# keys used to store database metadata
+#
+_table_names_key = '__TABLE_NAMES__' # list of the tables in this db
+_columns = '._COLUMNS__' # table_name+this key contains a list of columns
+
+def _columns_key(table):
+ return table + _columns
+
+#
+# these keys are found within table sub databases
+#
+_data = '._DATA_.' # this+column+this+rowid key contains table data
+_rowid = '._ROWID_.' # this+rowid+this key contains a unique entry for each
+ # row in the table. (no data is stored)
+_rowid_str_len = 8 # length in bytes of the unique rowid strings
+
+
+def _data_key(table, col, rowid):
+ return table + _data + col + _data + rowid
+
+def _search_col_data_key(table, col):
+ return table + _data + col + _data
+
+def _search_all_data_key(table):
+ return table + _data
+
+def _rowid_key(table, rowid):
+ return table + _rowid + rowid + _rowid
+
+def _search_rowid_key(table):
+ return table + _rowid
+
+def contains_metastrings(s) :
+ """Verify that the given string does not contain any
+ metadata strings that might interfere with dbtables database operation.
+ """
+ if (s.find(_table_names_key) >= 0 or
+ s.find(_columns) >= 0 or
+ s.find(_data) >= 0 or
+ s.find(_rowid) >= 0):
+ # Then
+ return 1
+ else:
+ return 0
+
+
+class bsdTableDB :
+ def __init__(self, filename, dbhome, create=0, truncate=0, mode=0600,
+ recover=0, dbflags=0):
+ """bsdTableDB(filename, dbhome, create=0, truncate=0, mode=0600)
+
+ Open database name in the dbhome Berkeley DB directory.
+ Use keyword arguments when calling this constructor.
+ """
+ self.db = None
+ myflags = db.DB_THREAD
+ if create:
+ myflags |= db.DB_CREATE
+ flagsforenv = (db.DB_INIT_MPOOL | db.DB_INIT_LOCK | db.DB_INIT_LOG |
+ db.DB_INIT_TXN | dbflags)
+ # DB_AUTO_COMMIT isn't a valid flag for env.open()
+ try:
+ dbflags |= db.DB_AUTO_COMMIT
+ except AttributeError:
+ pass
+ if recover:
+ flagsforenv = flagsforenv | db.DB_RECOVER
+ self.env = db.DBEnv()
+ # enable auto deadlock avoidance
+ self.env.set_lk_detect(db.DB_LOCK_DEFAULT)
+ self.env.open(dbhome, myflags | flagsforenv)
+ if truncate:
+ myflags |= db.DB_TRUNCATE
+ self.db = db.DB(self.env)
+ # this code relies on DBCursor.set* methods to raise exceptions
+ # rather than returning None
+ self.db.set_get_returns_none(1)
+ # allow duplicate entries [warning: be careful w/ metadata]
+ self.db.set_flags(db.DB_DUP)
+ self.db.open(filename, db.DB_BTREE, dbflags | myflags, mode)
+ self.dbfilename = filename
+
+ if sys.version_info[0] >= 3 :
+ class cursor_py3k(object) :
+ def __init__(self, dbcursor) :
+ self._dbcursor = dbcursor
+
+ def close(self) :
+ return self._dbcursor.close()
+
+ def set_range(self, search) :
+ v = self._dbcursor.set_range(bytes(search, "iso8859-1"))
+ if v is not None :
+ v = (v[0].decode("iso8859-1"),
+ v[1].decode("iso8859-1"))
+ return v
+
+ def __next__(self) :
+ v = getattr(self._dbcursor, "next")()
+ if v is not None :
+ v = (v[0].decode("iso8859-1"),
+ v[1].decode("iso8859-1"))
+ return v
+
+ class db_py3k(object) :
+ def __init__(self, db) :
+ self._db = db
+
+ def cursor(self, txn=None) :
+ return cursor_py3k(self._db.cursor(txn=txn))
+
+ def has_key(self, key, txn=None) :
+ return getattr(self._db,"has_key")(bytes(key, "iso8859-1"),
+ txn=txn)
+
+ def put(self, key, value, flags=0, txn=None) :
+ key = bytes(key, "iso8859-1")
+ if value is not None :
+ value = bytes(value, "iso8859-1")
+ return self._db.put(key, value, flags=flags, txn=txn)
+
+ def put_bytes(self, key, value, txn=None) :
+ key = bytes(key, "iso8859-1")
+ return self._db.put(key, value, txn=txn)
+
+ def get(self, key, txn=None, flags=0) :
+ key = bytes(key, "iso8859-1")
+ v = self._db.get(key, txn=txn, flags=flags)
+ if v is not None :
+ v = v.decode("iso8859-1")
+ return v
+
+ def get_bytes(self, key, txn=None, flags=0) :
+ key = bytes(key, "iso8859-1")
+ return self._db.get(key, txn=txn, flags=flags)
+
+ def delete(self, key, txn=None) :
+ key = bytes(key, "iso8859-1")
+ return self._db.delete(key, txn=txn)
+
+ def close (self) :
+ return self._db.close()
+
+ self.db = db_py3k(self.db)
+ else : # Python 2.x
+ pass
+
+ # Initialize the table names list if this is a new database
+ txn = self.env.txn_begin()
+ try:
+ if not getattr(self.db, "has_key")(_table_names_key, txn):
+ getattr(self.db, "put_bytes", self.db.put) \
+ (_table_names_key, pickle.dumps([], 1), txn=txn)
+ # Yes, bare except
+ except:
+ txn.abort()
+ raise
+ else:
+ txn.commit()
+ # TODO verify more of the database's metadata?
+ self.__tablecolumns = {}
+
+ def __del__(self):
+ self.close()
+
+ def close(self):
+ if self.db is not None:
+ self.db.close()
+ self.db = None
+ if self.env is not None:
+ self.env.close()
+ self.env = None
+
+ def checkpoint(self, mins=0):
+ self.env.txn_checkpoint(mins)
+
+ def sync(self):
+ self.db.sync()
+
+ def _db_print(self) :
+ """Print the database to stdout for debugging"""
+ print "******** Printing raw database for debugging ********"
+ cur = self.db.cursor()
+ try:
+ key, data = cur.first()
+ while 1:
+ print repr({key: data})
+ next = cur.next()
+ if next:
+ key, data = next
+ else:
+ cur.close()
+ return
+ except db.DBNotFoundError:
+ cur.close()
+
+
+ def CreateTable(self, table, columns):
+ """CreateTable(table, columns) - Create a new table in the database.
+
+ raises TableDBError if it already exists or for other DB errors.
+ """
+ assert isinstance(columns, list)
+
+ txn = None
+ try:
+ # checking sanity of the table and column names here on
+ # table creation will prevent problems elsewhere.
+ if contains_metastrings(table):
+ raise ValueError(
+ "bad table name: contains reserved metastrings")
+ for column in columns :
+ if contains_metastrings(column):
+ raise ValueError(
+ "bad column name: contains reserved metastrings")
+
+ columnlist_key = _columns_key(table)
+ if getattr(self.db, "has_key")(columnlist_key):
+ raise TableAlreadyExists, "table already exists"
+
+ txn = self.env.txn_begin()
+ # store the table's column info
+ getattr(self.db, "put_bytes", self.db.put)(columnlist_key,
+ pickle.dumps(columns, 1), txn=txn)
+
+ # add the table name to the tablelist
+ tablelist = pickle.loads(getattr(self.db, "get_bytes",
+ self.db.get) (_table_names_key, txn=txn, flags=db.DB_RMW))
+ tablelist.append(table)
+ # delete 1st, in case we opened with DB_DUP
+ self.db.delete(_table_names_key, txn=txn)
+ getattr(self.db, "put_bytes", self.db.put)(_table_names_key,
+ pickle.dumps(tablelist, 1), txn=txn)
+
+ txn.commit()
+ txn = None
+ except db.DBError, dberror:
+ if txn:
+ txn.abort()
+ if sys.version_info < (2, 6) :
+ raise TableDBError, dberror[1]
+ else :
+ raise TableDBError, dberror.args[1]
+
+
+ def ListTableColumns(self, table):
+ """Return a list of columns in the given table.
+ [] if the table doesn't exist.
+ """
+ assert isinstance(table, str)
+ if contains_metastrings(table):
+ raise ValueError, "bad table name: contains reserved metastrings"
+
+ columnlist_key = _columns_key(table)
+ if not getattr(self.db, "has_key")(columnlist_key):
+ return []
+ pickledcolumnlist = getattr(self.db, "get_bytes",
+ self.db.get)(columnlist_key)
+ if pickledcolumnlist:
+ return pickle.loads(pickledcolumnlist)
+ else:
+ return []
+
+ def ListTables(self):
+ """Return a list of tables in this database."""
+ pickledtablelist = self.db.get_get(_table_names_key)
+ if pickledtablelist:
+ return pickle.loads(pickledtablelist)
+ else:
+ return []
+
+ def CreateOrExtendTable(self, table, columns):
+ """CreateOrExtendTable(table, columns)
+
+ Create a new table in the database.
+
+ If a table of this name already exists, extend it to have any
+ additional columns present in the given list as well as
+ all of its current columns.
+ """
+ assert isinstance(columns, list)
+
+ try:
+ self.CreateTable(table, columns)
+ except TableAlreadyExists:
+ # the table already existed, add any new columns
+ txn = None
+ try:
+ columnlist_key = _columns_key(table)
+ txn = self.env.txn_begin()
+
+ # load the current column list
+ oldcolumnlist = pickle.loads(
+ getattr(self.db, "get_bytes",
+ self.db.get)(columnlist_key, txn=txn, flags=db.DB_RMW))
+ # create a hash table for fast lookups of column names in the
+ # loop below
+ oldcolumnhash = {}
+ for c in oldcolumnlist:
+ oldcolumnhash[c] = c
+
+ # create a new column list containing both the old and new
+ # column names
+ newcolumnlist = copy.copy(oldcolumnlist)
+ for c in columns:
+ if not c in oldcolumnhash:
+ newcolumnlist.append(c)
+
+ # store the table's new extended column list
+ if newcolumnlist != oldcolumnlist :
+ # delete the old one first since we opened with DB_DUP
+ self.db.delete(columnlist_key, txn=txn)
+ getattr(self.db, "put_bytes", self.db.put)(columnlist_key,
+ pickle.dumps(newcolumnlist, 1),
+ txn=txn)
+
+ txn.commit()
+ txn = None
+
+ self.__load_column_info(table)
+ except db.DBError, dberror:
+ if txn:
+ txn.abort()
+ if sys.version_info < (2, 6) :
+ raise TableDBError, dberror[1]
+ else :
+ raise TableDBError, dberror.args[1]
+
+
+ def __load_column_info(self, table) :
+ """initialize the self.__tablecolumns dict"""
+ # check the column names
+ try:
+ tcolpickles = getattr(self.db, "get_bytes",
+ self.db.get)(_columns_key(table))
+ except db.DBNotFoundError:
+ raise TableDBError, "unknown table: %r" % (table,)
+ if not tcolpickles:
+ raise TableDBError, "unknown table: %r" % (table,)
+ self.__tablecolumns[table] = pickle.loads(tcolpickles)
+
+ def __new_rowid(self, table, txn) :
+ """Create a new unique row identifier"""
+ unique = 0
+ while not unique:
+ # Generate a random 64-bit row ID string
+ # (note: might have <64 bits of true randomness
+ # but it's plenty for our database id needs!)
+ blist = []
+ for x in xrange(_rowid_str_len):
+ blist.append(random.randint(0,255))
+ newid = struct.pack('B'*_rowid_str_len, *blist)
+
+ if sys.version_info[0] >= 3 :
+ newid = newid.decode("iso8859-1") # 8 bits
+
+ # Guarantee uniqueness by adding this key to the database
+ try:
+ self.db.put(_rowid_key(table, newid), None, txn=txn,
+ flags=db.DB_NOOVERWRITE)
+ except db.DBKeyExistError:
+ pass
+ else:
+ unique = 1
+
+ return newid
+
+
+ def Insert(self, table, rowdict) :
+ """Insert(table, datadict) - Insert a new row into the table
+ using the keys+values from rowdict as the column values.
+ """
+
+ txn = None
+ try:
+ if not getattr(self.db, "has_key")(_columns_key(table)):
+ raise TableDBError, "unknown table"
+
+ # check the validity of each column name
+ if not table in self.__tablecolumns:
+ self.__load_column_info(table)
+ for column in rowdict.keys() :
+ if not self.__tablecolumns[table].count(column):
+ raise TableDBError, "unknown column: %r" % (column,)
+
+ # get a unique row identifier for this row
+ txn = self.env.txn_begin()
+ rowid = self.__new_rowid(table, txn=txn)
+
+ # insert the row values into the table database
+ for column, dataitem in rowdict.items():
+ # store the value
+ self.db.put(_data_key(table, column, rowid), dataitem, txn=txn)
+
+ txn.commit()
+ txn = None
+
+ except db.DBError, dberror:
+ # WIBNI we could just abort the txn and re-raise the exception?
+ # But no, because TableDBError is not related to DBError via
+ # inheritance, so it would be backwards incompatible. Do the next
+ # best thing.
+ info = sys.exc_info()
+ if txn:
+ txn.abort()
+ self.db.delete(_rowid_key(table, rowid))
+ if sys.version_info < (2, 6) :
+ raise TableDBError, dberror[1], info[2]
+ else :
+ raise TableDBError, dberror.args[1], info[2]
+
+
+ def Modify(self, table, conditions={}, mappings={}):
+ """Modify(table, conditions={}, mappings={}) - Modify items in rows matching 'conditions' using mapping functions in 'mappings'
+
+ * table - the table name
+ * conditions - a dictionary keyed on column names containing
+ a condition callable expecting the data string as an
+ argument and returning a boolean.
+ * mappings - a dictionary keyed on column names containing a
+ condition callable expecting the data string as an argument and
+ returning the new string for that column.
+ """
+
+ try:
+ matching_rowids = self.__Select(table, [], conditions)
+
+ # modify only requested columns
+ columns = mappings.keys()
+ for rowid in matching_rowids.keys():
+ txn = None
+ try:
+ for column in columns:
+ txn = self.env.txn_begin()
+ # modify the requested column
+ try:
+ dataitem = self.db.get(
+ _data_key(table, column, rowid),
+ txn=txn)
+ self.db.delete(
+ _data_key(table, column, rowid),
+ txn=txn)
+ except db.DBNotFoundError:
+ # XXXXXXX row key somehow didn't exist, assume no
+ # error
+ dataitem = None
+ dataitem = mappings[column](dataitem)
+ if dataitem is not None:
+ self.db.put(
+ _data_key(table, column, rowid),
+ dataitem, txn=txn)
+ txn.commit()
+ txn = None
+
+ # catch all exceptions here since we call unknown callables
+ except:
+ if txn:
+ txn.abort()
+ raise
+
+ except db.DBError, dberror:
+ if sys.version_info < (2, 6) :
+ raise TableDBError, dberror[1]
+ else :
+ raise TableDBError, dberror.args[1]
+
+ def Delete(self, table, conditions={}):
+ """Delete(table, conditions) - Delete items matching the given
+ conditions from the table.
+
+ * conditions - a dictionary keyed on column names containing
+ condition functions expecting the data string as an
+ argument and returning a boolean.
+ """
+
+ try:
+ matching_rowids = self.__Select(table, [], conditions)
+
+ # delete row data from all columns
+ columns = self.__tablecolumns[table]
+ for rowid in matching_rowids.keys():
+ txn = None
+ try:
+ txn = self.env.txn_begin()
+ for column in columns:
+ # delete the data key
+ try:
+ self.db.delete(_data_key(table, column, rowid),
+ txn=txn)
+ except db.DBNotFoundError:
+ # XXXXXXX column may not exist, assume no error
+ pass
+
+ try:
+ self.db.delete(_rowid_key(table, rowid), txn=txn)
+ except db.DBNotFoundError:
+ # XXXXXXX row key somehow didn't exist, assume no error
+ pass
+ txn.commit()
+ txn = None
+ except db.DBError, dberror:
+ if txn:
+ txn.abort()
+ raise
+ except db.DBError, dberror:
+ if sys.version_info < (2, 6) :
+ raise TableDBError, dberror[1]
+ else :
+ raise TableDBError, dberror.args[1]
+
+
+ def Select(self, table, columns, conditions={}):
+ """Select(table, columns, conditions) - retrieve specific row data
+ Returns a list of row column->value mapping dictionaries.
+
+ * columns - a list of which column data to return. If
+ columns is None, all columns will be returned.
+ * conditions - a dictionary keyed on column names
+ containing callable conditions expecting the data string as an
+ argument and returning a boolean.
+ """
+ try:
+ if not table in self.__tablecolumns:
+ self.__load_column_info(table)
+ if columns is None:
+ columns = self.__tablecolumns[table]
+ matching_rowids = self.__Select(table, columns, conditions)
+ except db.DBError, dberror:
+ if sys.version_info < (2, 6) :
+ raise TableDBError, dberror[1]
+ else :
+ raise TableDBError, dberror.args[1]
+ # return the matches as a list of dictionaries
+ return matching_rowids.values()
+
+
+ def __Select(self, table, columns, conditions):
+ """__Select() - Used to implement Select and Delete (above)
+ Returns a dictionary keyed on rowids containing dicts
+ holding the row data for columns listed in the columns param
+ that match the given conditions.
+ * conditions is a dictionary keyed on column names
+ containing callable conditions expecting the data string as an
+ argument and returning a boolean.
+ """
+ # check the validity of each column name
+ if not table in self.__tablecolumns:
+ self.__load_column_info(table)
+ if columns is None:
+ columns = self.tablecolumns[table]
+ for column in (columns + conditions.keys()):
+ if not self.__tablecolumns[table].count(column):
+ raise TableDBError, "unknown column: %r" % (column,)
+
+ # keyed on rows that match so far, containings dicts keyed on
+ # column names containing the data for that row and column.
+ matching_rowids = {}
+ # keys are rowids that do not match
+ rejected_rowids = {}
+
+ # attempt to sort the conditions in such a way as to minimize full
+ # column lookups
+ def cmp_conditions(atuple, btuple):
+ a = atuple[1]
+ b = btuple[1]
+ if type(a) is type(b):
+
+ # Needed for python 3. "cmp" vanished in 3.0.1
+ def cmp(a, b) :
+ if a==b : return 0
+ if a<b : return -1
+ return 1
+
+ if isinstance(a, PrefixCond) and isinstance(b, PrefixCond):
+ # longest prefix first
+ return cmp(len(b.prefix), len(a.prefix))
+ if isinstance(a, LikeCond) and isinstance(b, LikeCond):
+ # longest likestr first
+ return cmp(len(b.likestr), len(a.likestr))
+ return 0
+ if isinstance(a, ExactCond):
+ return -1
+ if isinstance(b, ExactCond):
+ return 1
+ if isinstance(a, PrefixCond):
+ return -1
+ if isinstance(b, PrefixCond):
+ return 1
+ # leave all unknown condition callables alone as equals
+ return 0
+
+ if sys.version_info < (2, 6) :
+ conditionlist = conditions.items()
+ conditionlist.sort(cmp_conditions)
+ else : # Insertion Sort. Please, improve
+ conditionlist = []
+ for i in conditions.items() :
+ for j, k in enumerate(conditionlist) :
+ r = cmp_conditions(k, i)
+ if r == 1 :
+ conditionlist.insert(j, i)
+ break
+ else :
+ conditionlist.append(i)
+
+ # Apply conditions to column data to find what we want
+ cur = self.db.cursor()
+ column_num = -1
+ for column, condition in conditionlist:
+ column_num = column_num + 1
+ searchkey = _search_col_data_key(table, column)
+ # speedup: don't linear search columns within loop
+ if column in columns:
+ savethiscolumndata = 1 # save the data for return
+ else:
+ savethiscolumndata = 0 # data only used for selection
+
+ try:
+ key, data = cur.set_range(searchkey)
+ while key[:len(searchkey)] == searchkey:
+ # extract the rowid from the key
+ rowid = key[-_rowid_str_len:]
+
+ if not rowid in rejected_rowids:
+ # if no condition was specified or the condition
+ # succeeds, add row to our match list.
+ if not condition or condition(data):
+ if not rowid in matching_rowids:
+ matching_rowids[rowid] = {}
+ if savethiscolumndata:
+ matching_rowids[rowid][column] = data
+ else:
+ if rowid in matching_rowids:
+ del matching_rowids[rowid]
+ rejected_rowids[rowid] = rowid
+
+ key, data = cur.next()
+
+ except db.DBError, dberror:
+ if dberror.args[0] != db.DB_NOTFOUND:
+ raise
+ continue
+
+ cur.close()
+
+ # we're done selecting rows, garbage collect the reject list
+ del rejected_rowids
+
+ # extract any remaining desired column data from the
+ # database for the matching rows.
+ if len(columns) > 0:
+ for rowid, rowdata in matching_rowids.items():
+ for column in columns:
+ if column in rowdata:
+ continue
+ try:
+ rowdata[column] = self.db.get(
+ _data_key(table, column, rowid))
+ except db.DBError, dberror:
+ if sys.version_info < (2, 6) :
+ if dberror[0] != db.DB_NOTFOUND:
+ raise
+ else :
+ if dberror.args[0] != db.DB_NOTFOUND:
+ raise
+ rowdata[column] = None
+
+ # return the matches
+ return matching_rowids
+
+
+ def Drop(self, table):
+ """Remove an entire table from the database"""
+ txn = None
+ try:
+ txn = self.env.txn_begin()
+
+ # delete the column list
+ self.db.delete(_columns_key(table), txn=txn)
+
+ cur = self.db.cursor(txn)
+
+ # delete all keys containing this tables column and row info
+ table_key = _search_all_data_key(table)
+ while 1:
+ try:
+ key, data = cur.set_range(table_key)
+ except db.DBNotFoundError:
+ break
+ # only delete items in this table
+ if key[:len(table_key)] != table_key:
+ break
+ cur.delete()
+
+ # delete all rowids used by this table
+ table_key = _search_rowid_key(table)
+ while 1:
+ try:
+ key, data = cur.set_range(table_key)
+ except db.DBNotFoundError:
+ break
+ # only delete items in this table
+ if key[:len(table_key)] != table_key:
+ break
+ cur.delete()
+
+ cur.close()
+
+ # delete the tablename from the table name list
+ tablelist = pickle.loads(
+ getattr(self.db, "get_bytes", self.db.get)(_table_names_key,
+ txn=txn, flags=db.DB_RMW))
+ try:
+ tablelist.remove(table)
+ except ValueError:
+ # hmm, it wasn't there, oh well, that's what we want.
+ pass
+ # delete 1st, incase we opened with DB_DUP
+ self.db.delete(_table_names_key, txn=txn)
+ getattr(self.db, "put_bytes", self.db.put)(_table_names_key,
+ pickle.dumps(tablelist, 1), txn=txn)
+
+ txn.commit()
+ txn = None
+
+ if table in self.__tablecolumns:
+ del self.__tablecolumns[table]
+
+ except db.DBError, dberror:
+ if txn:
+ txn.abort()
+ raise TableDBError(dberror.args[1])
diff --git a/lib/python2.7/bsddb/dbutils.py b/lib/python2.7/bsddb/dbutils.py
new file mode 100644
index 0000000..02a686f
--- /dev/null
+++ b/lib/python2.7/bsddb/dbutils.py
@@ -0,0 +1,83 @@
+#------------------------------------------------------------------------
+#
+# Copyright (C) 2000 Autonomous Zone Industries
+#
+# License: This is free software. You may use this software for any
+# purpose including modification/redistribution, so long as
+# this header remains intact and that you do not claim any
+# rights of ownership or authorship of this software. This
+# software has been tested, but no warranty is expressed or
+# implied.
+#
+# Author: Gregory P. Smith <greg@krypto.org>
+#
+# Note: I don't know how useful this is in reality since when a
+# DBLockDeadlockError happens the current transaction is supposed to be
+# aborted. If it doesn't then when the operation is attempted again
+# the deadlock is still happening...
+# --Robin
+#
+#------------------------------------------------------------------------
+
+
+#
+# import the time.sleep function in a namespace safe way to allow
+# "from bsddb.dbutils import *"
+#
+from time import sleep as _sleep
+
+import sys
+absolute_import = (sys.version_info[0] >= 3)
+if absolute_import :
+ # Because this syntaxis is not valid before Python 2.5
+ exec("from . import db")
+else :
+ import db
+
+# always sleep at least N seconds between retrys
+_deadlock_MinSleepTime = 1.0/128
+# never sleep more than N seconds between retrys
+_deadlock_MaxSleepTime = 3.14159
+
+# Assign a file object to this for a "sleeping" message to be written to it
+# each retry
+_deadlock_VerboseFile = None
+
+
+def DeadlockWrap(function, *_args, **_kwargs):
+ """DeadlockWrap(function, *_args, **_kwargs) - automatically retries
+ function in case of a database deadlock.
+
+ This is a function intended to be used to wrap database calls such
+ that they perform retrys with exponentially backing off sleeps in
+ between when a DBLockDeadlockError exception is raised.
+
+ A 'max_retries' parameter may optionally be passed to prevent it
+ from retrying forever (in which case the exception will be reraised).
+
+ d = DB(...)
+ d.open(...)
+ DeadlockWrap(d.put, "foo", data="bar") # set key "foo" to "bar"
+ """
+ sleeptime = _deadlock_MinSleepTime
+ max_retries = _kwargs.get('max_retries', -1)
+ if 'max_retries' in _kwargs:
+ del _kwargs['max_retries']
+ while True:
+ try:
+ return function(*_args, **_kwargs)
+ except db.DBLockDeadlockError:
+ if _deadlock_VerboseFile:
+ _deadlock_VerboseFile.write(
+ 'dbutils.DeadlockWrap: sleeping %1.3f\n' % sleeptime)
+ _sleep(sleeptime)
+ # exponential backoff in the sleep time
+ sleeptime *= 2
+ if sleeptime > _deadlock_MaxSleepTime:
+ sleeptime = _deadlock_MaxSleepTime
+ max_retries -= 1
+ if max_retries == -1:
+ raise
+
+
+#------------------------------------------------------------------------
diff --git a/lib/python2.7/bsddb/test/__init__.py b/lib/python2.7/bsddb/test/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/lib/python2.7/bsddb/test/__init__.py
diff --git a/lib/python2.7/bsddb/test/test_all.py b/lib/python2.7/bsddb/test/test_all.py
new file mode 100644
index 0000000..529dfad
--- /dev/null
+++ b/lib/python2.7/bsddb/test/test_all.py
@@ -0,0 +1,620 @@
+"""Run all test cases.
+"""
+
+import sys
+import os
+import unittest
+try:
+ # For Pythons w/distutils pybsddb
+ import bsddb3 as bsddb
+except ImportError:
+ # For Python 2.3
+ import bsddb
+
+
+if sys.version_info[0] >= 3 :
+ charset = "iso8859-1" # Full 8 bit
+
+ class logcursor_py3k(object) :
+ def __init__(self, env) :
+ self._logcursor = env.log_cursor()
+
+ def __getattr__(self, v) :
+ return getattr(self._logcursor, v)
+
+ def __next__(self) :
+ v = getattr(self._logcursor, "next")()
+ if v is not None :
+ v = (v[0], v[1].decode(charset))
+ return v
+
+ next = __next__
+
+ def first(self) :
+ v = self._logcursor.first()
+ if v is not None :
+ v = (v[0], v[1].decode(charset))
+ return v
+
+ def last(self) :
+ v = self._logcursor.last()
+ if v is not None :
+ v = (v[0], v[1].decode(charset))
+ return v
+
+ def prev(self) :
+ v = self._logcursor.prev()
+ if v is not None :
+ v = (v[0], v[1].decode(charset))
+ return v
+
+ def current(self) :
+ v = self._logcursor.current()
+ if v is not None :
+ v = (v[0], v[1].decode(charset))
+ return v
+
+ def set(self, lsn) :
+ v = self._logcursor.set(lsn)
+ if v is not None :
+ v = (v[0], v[1].decode(charset))
+ return v
+
+ class cursor_py3k(object) :
+ def __init__(self, db, *args, **kwargs) :
+ self._dbcursor = db.cursor(*args, **kwargs)
+
+ def __getattr__(self, v) :
+ return getattr(self._dbcursor, v)
+
+ def _fix(self, v) :
+ if v is None : return None
+ key, value = v
+ if isinstance(key, bytes) :
+ key = key.decode(charset)
+ return (key, value.decode(charset))
+
+ def __next__(self) :
+ v = getattr(self._dbcursor, "next")()
+ return self._fix(v)
+
+ next = __next__
+
+ def previous(self) :
+ v = self._dbcursor.previous()
+ return self._fix(v)
+
+ def last(self) :
+ v = self._dbcursor.last()
+ return self._fix(v)
+
+ def set(self, k) :
+ if isinstance(k, str) :
+ k = bytes(k, charset)
+ v = self._dbcursor.set(k)
+ return self._fix(v)
+
+ def set_recno(self, num) :
+ v = self._dbcursor.set_recno(num)
+ return self._fix(v)
+
+ def set_range(self, k, dlen=-1, doff=-1) :
+ if isinstance(k, str) :
+ k = bytes(k, charset)
+ v = self._dbcursor.set_range(k, dlen=dlen, doff=doff)
+ return self._fix(v)
+
+ def dup(self, flags=0) :
+ cursor = self._dbcursor.dup(flags)
+ return dup_cursor_py3k(cursor)
+
+ def next_dup(self) :
+ v = self._dbcursor.next_dup()
+ return self._fix(v)
+
+ def next_nodup(self) :
+ v = self._dbcursor.next_nodup()
+ return self._fix(v)
+
+ def put(self, key, data, flags=0, dlen=-1, doff=-1) :
+ if isinstance(key, str) :
+ key = bytes(key, charset)
+ if isinstance(data, str) :
+ value = bytes(data, charset)
+ return self._dbcursor.put(key, data, flags=flags, dlen=dlen,
+ doff=doff)
+
+ def current(self, flags=0, dlen=-1, doff=-1) :
+ v = self._dbcursor.current(flags=flags, dlen=dlen, doff=doff)
+ return self._fix(v)
+
+ def first(self) :
+ v = self._dbcursor.first()
+ return self._fix(v)
+
+ def pget(self, key=None, data=None, flags=0) :
+ # Incorrect because key can be a bare number,
+ # but enough to pass testsuite
+ if isinstance(key, int) and (data is None) and (flags == 0) :
+ flags = key
+ key = None
+ if isinstance(key, str) :
+ key = bytes(key, charset)
+ if isinstance(data, int) and (flags==0) :
+ flags = data
+ data = None
+ if isinstance(data, str) :
+ data = bytes(data, charset)
+ v=self._dbcursor.pget(key=key, data=data, flags=flags)
+ if v is not None :
+ v1, v2, v3 = v
+ if isinstance(v1, bytes) :
+ v1 = v1.decode(charset)
+ if isinstance(v2, bytes) :
+ v2 = v2.decode(charset)
+
+ v = (v1, v2, v3.decode(charset))
+
+ return v
+
+ def join_item(self) :
+ v = self._dbcursor.join_item()
+ if v is not None :
+ v = v.decode(charset)
+ return v
+
+ def get(self, *args, **kwargs) :
+ l = len(args)
+ if l == 2 :
+ k, f = args
+ if isinstance(k, str) :
+ k = bytes(k, "iso8859-1")
+ args = (k, f)
+ elif l == 3 :
+ k, d, f = args
+ if isinstance(k, str) :
+ k = bytes(k, charset)
+ if isinstance(d, str) :
+ d = bytes(d, charset)
+ args =(k, d, f)
+
+ v = self._dbcursor.get(*args, **kwargs)
+ if v is not None :
+ k, v = v
+ if isinstance(k, bytes) :
+ k = k.decode(charset)
+ v = (k, v.decode(charset))
+ return v
+
+ def get_both(self, key, value) :
+ if isinstance(key, str) :
+ key = bytes(key, charset)
+ if isinstance(value, str) :
+ value = bytes(value, charset)
+ v=self._dbcursor.get_both(key, value)
+ return self._fix(v)
+
+ class dup_cursor_py3k(cursor_py3k) :
+ def __init__(self, dbcursor) :
+ self._dbcursor = dbcursor
+
+ class DB_py3k(object) :
+ def __init__(self, *args, **kwargs) :
+ args2=[]
+ for i in args :
+ if isinstance(i, DBEnv_py3k) :
+ i = i._dbenv
+ args2.append(i)
+ args = tuple(args2)
+ for k, v in kwargs.items() :
+ if isinstance(v, DBEnv_py3k) :
+ kwargs[k] = v._dbenv
+
+ self._db = bsddb._db.DB_orig(*args, **kwargs)
+
+ def __contains__(self, k) :
+ if isinstance(k, str) :
+ k = bytes(k, charset)
+ return getattr(self._db, "has_key")(k)
+
+ def __getitem__(self, k) :
+ if isinstance(k, str) :
+ k = bytes(k, charset)
+ v = self._db[k]
+ if v is not None :
+ v = v.decode(charset)
+ return v
+
+ def __setitem__(self, k, v) :
+ if isinstance(k, str) :
+ k = bytes(k, charset)
+ if isinstance(v, str) :
+ v = bytes(v, charset)
+ self._db[k] = v
+
+ def __delitem__(self, k) :
+ if isinstance(k, str) :
+ k = bytes(k, charset)
+ del self._db[k]
+
+ def __getattr__(self, v) :
+ return getattr(self._db, v)
+
+ def __len__(self) :
+ return len(self._db)
+
+ def has_key(self, k, txn=None) :
+ if isinstance(k, str) :
+ k = bytes(k, charset)
+ return self._db.has_key(k, txn=txn)
+
+ def set_re_delim(self, c) :
+ if isinstance(c, str) : # We can use a numeric value byte too
+ c = bytes(c, charset)
+ return self._db.set_re_delim(c)
+
+ def set_re_pad(self, c) :
+ if isinstance(c, str) : # We can use a numeric value byte too
+ c = bytes(c, charset)
+ return self._db.set_re_pad(c)
+
+ def get_re_source(self) :
+ source = self._db.get_re_source()
+ return source.decode(charset)
+
+ def put(self, key, data, txn=None, flags=0, dlen=-1, doff=-1) :
+ if isinstance(key, str) :
+ key = bytes(key, charset)
+ if isinstance(data, str) :
+ value = bytes(data, charset)
+ return self._db.put(key, data, flags=flags, txn=txn, dlen=dlen,
+ doff=doff)
+
+ def append(self, value, txn=None) :
+ if isinstance(value, str) :
+ value = bytes(value, charset)
+ return self._db.append(value, txn=txn)
+
+ def get_size(self, key) :
+ if isinstance(key, str) :
+ key = bytes(key, charset)
+ return self._db.get_size(key)
+
+ def exists(self, key, *args, **kwargs) :
+ if isinstance(key, str) :
+ key = bytes(key, charset)
+ return self._db.exists(key, *args, **kwargs)
+
+ def get(self, key, default="MagicCookie", txn=None, flags=0, dlen=-1, doff=-1) :
+ if isinstance(key, str) :
+ key = bytes(key, charset)
+ if default != "MagicCookie" : # Magic for 'test_get_none.py'
+ v=self._db.get(key, default=default, txn=txn, flags=flags,
+ dlen=dlen, doff=doff)
+ else :
+ v=self._db.get(key, txn=txn, flags=flags,
+ dlen=dlen, doff=doff)
+ if (v is not None) and isinstance(v, bytes) :
+ v = v.decode(charset)
+ return v
+
+ def pget(self, key, txn=None) :
+ if isinstance(key, str) :
+ key = bytes(key, charset)
+ v=self._db.pget(key, txn=txn)
+ if v is not None :
+ v1, v2 = v
+ if isinstance(v1, bytes) :
+ v1 = v1.decode(charset)
+
+ v = (v1, v2.decode(charset))
+ return v
+
+ def get_both(self, key, value, txn=None, flags=0) :
+ if isinstance(key, str) :
+ key = bytes(key, charset)
+ if isinstance(value, str) :
+ value = bytes(value, charset)
+ v=self._db.get_both(key, value, txn=txn, flags=flags)
+ if v is not None :
+ v = v.decode(charset)
+ return v
+
+ def delete(self, key, txn=None) :
+ if isinstance(key, str) :
+ key = bytes(key, charset)
+ return self._db.delete(key, txn=txn)
+
+ def keys(self) :
+ k = self._db.keys()
+ if len(k) and isinstance(k[0], bytes) :
+ return [i.decode(charset) for i in self._db.keys()]
+ else :
+ return k
+
+ def items(self) :
+ data = self._db.items()
+ if not len(data) : return data
+ data2 = []
+ for k, v in data :
+ if isinstance(k, bytes) :
+ k = k.decode(charset)
+ data2.append((k, v.decode(charset)))
+ return data2
+
+ def associate(self, secondarydb, callback, flags=0, txn=None) :
+ class associate_callback(object) :
+ def __init__(self, callback) :
+ self._callback = callback
+
+ def callback(self, key, data) :
+ if isinstance(key, str) :
+ key = key.decode(charset)
+ data = data.decode(charset)
+ key = self._callback(key, data)
+ if (key != bsddb._db.DB_DONOTINDEX) :
+ if isinstance(key, str) :
+ key = bytes(key, charset)
+ elif isinstance(key, list) :
+ key2 = []
+ for i in key :
+ if isinstance(i, str) :
+ i = bytes(i, charset)
+ key2.append(i)
+ key = key2
+ return key
+
+ return self._db.associate(secondarydb._db,
+ associate_callback(callback).callback, flags=flags,
+ txn=txn)
+
+ def cursor(self, txn=None, flags=0) :
+ return cursor_py3k(self._db, txn=txn, flags=flags)
+
+ def join(self, cursor_list) :
+ cursor_list = [i._dbcursor for i in cursor_list]
+ return dup_cursor_py3k(self._db.join(cursor_list))
+
+ class DBEnv_py3k(object) :
+ def __init__(self, *args, **kwargs) :
+ self._dbenv = bsddb._db.DBEnv_orig(*args, **kwargs)
+
+ def __getattr__(self, v) :
+ return getattr(self._dbenv, v)
+
+ def log_cursor(self, flags=0) :
+ return logcursor_py3k(self._dbenv)
+
+ def get_lg_dir(self) :
+ return self._dbenv.get_lg_dir().decode(charset)
+
+ def get_tmp_dir(self) :
+ return self._dbenv.get_tmp_dir().decode(charset)
+
+ def get_data_dirs(self) :
+ return tuple(
+ (i.decode(charset) for i in self._dbenv.get_data_dirs()))
+
+ class DBSequence_py3k(object) :
+ def __init__(self, db, *args, **kwargs) :
+ self._db=db
+ self._dbsequence = bsddb._db.DBSequence_orig(db._db, *args, **kwargs)
+
+ def __getattr__(self, v) :
+ return getattr(self._dbsequence, v)
+
+ def open(self, key, *args, **kwargs) :
+ return self._dbsequence.open(bytes(key, charset), *args, **kwargs)
+
+ def get_key(self) :
+ return self._dbsequence.get_key().decode(charset)
+
+ def get_dbp(self) :
+ return self._db
+
+ bsddb._db.DBEnv_orig = bsddb._db.DBEnv
+ bsddb._db.DB_orig = bsddb._db.DB
+ if bsddb.db.version() <= (4, 3) :
+ bsddb._db.DBSequence_orig = None
+ else :
+ bsddb._db.DBSequence_orig = bsddb._db.DBSequence
+
+ def do_proxy_db_py3k(flag) :
+ flag2 = do_proxy_db_py3k.flag
+ do_proxy_db_py3k.flag = flag
+ if flag :
+ bsddb.DBEnv = bsddb.db.DBEnv = bsddb._db.DBEnv = DBEnv_py3k
+ bsddb.DB = bsddb.db.DB = bsddb._db.DB = DB_py3k
+ bsddb._db.DBSequence = DBSequence_py3k
+ else :
+ bsddb.DBEnv = bsddb.db.DBEnv = bsddb._db.DBEnv = bsddb._db.DBEnv_orig
+ bsddb.DB = bsddb.db.DB = bsddb._db.DB = bsddb._db.DB_orig
+ bsddb._db.DBSequence = bsddb._db.DBSequence_orig
+ return flag2
+
+ do_proxy_db_py3k.flag = False
+ do_proxy_db_py3k(True)
+
+try:
+ # For Pythons w/distutils pybsddb
+ from bsddb3 import db, dbtables, dbutils, dbshelve, \
+ hashopen, btopen, rnopen, dbobj
+except ImportError:
+ # For Python 2.3
+ from bsddb import db, dbtables, dbutils, dbshelve, \
+ hashopen, btopen, rnopen, dbobj
+
+try:
+ from bsddb3 import test_support
+except ImportError:
+ if sys.version_info[0] < 3 :
+ from test import test_support
+ else :
+ from test import support as test_support
+
+
+try:
+ if sys.version_info[0] < 3 :
+ from threading import Thread, currentThread
+ del Thread, currentThread
+ else :
+ from threading import Thread, current_thread
+ del Thread, current_thread
+ have_threads = True
+except ImportError:
+ have_threads = False
+
+verbose = 0
+if 'verbose' in sys.argv:
+ verbose = 1
+ sys.argv.remove('verbose')
+
+if 'silent' in sys.argv: # take care of old flag, just in case
+ verbose = 0
+ sys.argv.remove('silent')
+
+
+def print_versions():
+ print
+ print '-=' * 38
+ print db.DB_VERSION_STRING
+ print 'bsddb.db.version(): %s' % (db.version(), )
+ if db.version() >= (5, 0) :
+ print 'bsddb.db.full_version(): %s' %repr(db.full_version())
+ print 'bsddb.db.__version__: %s' % db.__version__
+ print 'bsddb.db.cvsid: %s' % db.cvsid
+
+ # Workaround for allowing generating an EGGs as a ZIP files.
+ suffix="__"
+ print 'py module: %s' % getattr(bsddb, "__file"+suffix)
+ print 'extension module: %s' % getattr(bsddb, "__file"+suffix)
+
+ print 'python version: %s' % sys.version
+ print 'My pid: %s' % os.getpid()
+ print '-=' * 38
+
+
+def get_new_path(name) :
+ get_new_path.mutex.acquire()
+ try :
+ import os
+ path=os.path.join(get_new_path.prefix,
+ name+"_"+str(os.getpid())+"_"+str(get_new_path.num))
+ get_new_path.num+=1
+ finally :
+ get_new_path.mutex.release()
+ return path
+
+def get_new_environment_path() :
+ path=get_new_path("environment")
+ import os
+ try:
+ os.makedirs(path,mode=0700)
+ except os.error:
+ test_support.rmtree(path)
+ os.makedirs(path)
+ return path
+
+def get_new_database_path() :
+ path=get_new_path("database")
+ import os
+ if os.path.exists(path) :
+ os.remove(path)
+ return path
+
+
+# This path can be overridden via "set_test_path_prefix()".
+import os, os.path
+get_new_path.prefix=os.path.join(os.environ.get("TMPDIR",
+ os.path.join(os.sep,"tmp")), "z-Berkeley_DB")
+get_new_path.num=0
+
+def get_test_path_prefix() :
+ return get_new_path.prefix
+
+def set_test_path_prefix(path) :
+ get_new_path.prefix=path
+
+def remove_test_path_directory() :
+ test_support.rmtree(get_new_path.prefix)
+
+if have_threads :
+ import threading
+ get_new_path.mutex=threading.Lock()
+ del threading
+else :
+ class Lock(object) :
+ def acquire(self) :
+ pass
+ def release(self) :
+ pass
+ get_new_path.mutex=Lock()
+ del Lock
+
+
+
+class PrintInfoFakeTest(unittest.TestCase):
+ def testPrintVersions(self):
+ print_versions()
+
+
+# This little hack is for when this module is run as main and all the
+# other modules import it so they will still be able to get the right
+# verbose setting. It's confusing but it works.
+if sys.version_info[0] < 3 :
+ import test_all
+ test_all.verbose = verbose
+else :
+ import sys
+ print >>sys.stderr, "Work to do!"
+
+
+def suite(module_prefix='', timing_check=None):
+ test_modules = [
+ 'test_associate',
+ 'test_basics',
+ 'test_dbenv',
+ 'test_db',
+ 'test_compare',
+ 'test_compat',
+ 'test_cursor_pget_bug',
+ 'test_dbobj',
+ 'test_dbshelve',
+ 'test_dbtables',
+ 'test_distributed_transactions',
+ 'test_early_close',
+ 'test_fileid',
+ 'test_get_none',
+ 'test_join',
+ 'test_lock',
+ 'test_misc',
+ 'test_pickle',
+ 'test_queue',
+ 'test_recno',
+ 'test_replication',
+ 'test_sequence',
+ 'test_thread',
+ ]
+
+ alltests = unittest.TestSuite()
+ for name in test_modules:
+ #module = __import__(name)
+ # Do it this way so that suite may be called externally via
+ # python's Lib/test/test_bsddb3.
+ module = __import__(module_prefix+name, globals(), locals(), name)
+
+ alltests.addTest(module.test_suite())
+ if timing_check:
+ alltests.addTest(unittest.makeSuite(timing_check))
+ return alltests
+
+
+def test_suite():
+ suite = unittest.TestSuite()
+ suite.addTest(unittest.makeSuite(PrintInfoFakeTest))
+ return suite
+
+
+if __name__ == '__main__':
+ print_versions()
+ unittest.main(defaultTest='suite')
diff --git a/lib/python2.7/bsddb/test/test_associate.py b/lib/python2.7/bsddb/test/test_associate.py
new file mode 100644
index 0000000..7a49e11
--- /dev/null
+++ b/lib/python2.7/bsddb/test/test_associate.py
@@ -0,0 +1,465 @@
+"""
+TestCases for DB.associate.
+"""
+
+import sys, os, string
+import time
+from pprint import pprint
+
+import unittest
+from test_all import db, dbshelve, test_support, verbose, have_threads, \
+ get_new_environment_path
+
+
+#----------------------------------------------------------------------
+
+
+musicdata = {
+1 : ("Bad English", "The Price Of Love", "Rock"),
+2 : ("DNA featuring Suzanne Vega", "Tom's Diner", "Rock"),
+3 : ("George Michael", "Praying For Time", "Rock"),
+4 : ("Gloria Estefan", "Here We Are", "Rock"),
+5 : ("Linda Ronstadt", "Don't Know Much", "Rock"),
+6 : ("Michael Bolton", "How Am I Supposed To Live Without You", "Blues"),
+7 : ("Paul Young", "Oh Girl", "Rock"),
+8 : ("Paula Abdul", "Opposites Attract", "Rock"),
+9 : ("Richard Marx", "Should've Known Better", "Rock"),
+10: ("Rod Stewart", "Forever Young", "Rock"),
+11: ("Roxette", "Dangerous", "Rock"),
+12: ("Sheena Easton", "The Lover In Me", "Rock"),
+13: ("Sinead O'Connor", "Nothing Compares 2 U", "Rock"),
+14: ("Stevie B.", "Because I Love You", "Rock"),
+15: ("Taylor Dayne", "Love Will Lead You Back", "Rock"),
+16: ("The Bangles", "Eternal Flame", "Rock"),
+17: ("Wilson Phillips", "Release Me", "Rock"),
+18: ("Billy Joel", "Blonde Over Blue", "Rock"),
+19: ("Billy Joel", "Famous Last Words", "Rock"),
+20: ("Billy Joel", "Lullabye (Goodnight, My Angel)", "Rock"),
+21: ("Billy Joel", "The River Of Dreams", "Rock"),
+22: ("Billy Joel", "Two Thousand Years", "Rock"),
+23: ("Janet Jackson", "Alright", "Rock"),
+24: ("Janet Jackson", "Black Cat", "Rock"),
+25: ("Janet Jackson", "Come Back To Me", "Rock"),
+26: ("Janet Jackson", "Escapade", "Rock"),
+27: ("Janet Jackson", "Love Will Never Do (Without You)", "Rock"),
+28: ("Janet Jackson", "Miss You Much", "Rock"),
+29: ("Janet Jackson", "Rhythm Nation", "Rock"),
+30: ("Janet Jackson", "State Of The World", "Rock"),
+31: ("Janet Jackson", "The Knowledge", "Rock"),
+32: ("Spyro Gyra", "End of Romanticism", "Jazz"),
+33: ("Spyro Gyra", "Heliopolis", "Jazz"),
+34: ("Spyro Gyra", "Jubilee", "Jazz"),
+35: ("Spyro Gyra", "Little Linda", "Jazz"),
+36: ("Spyro Gyra", "Morning Dance", "Jazz"),
+37: ("Spyro Gyra", "Song for Lorraine", "Jazz"),
+38: ("Yes", "Owner Of A Lonely Heart", "Rock"),
+39: ("Yes", "Rhythm Of Love", "Rock"),
+40: ("Cusco", "Dream Catcher", "New Age"),
+41: ("Cusco", "Geronimos Laughter", "New Age"),
+42: ("Cusco", "Ghost Dance", "New Age"),
+43: ("Blue Man Group", "Drumbone", "New Age"),
+44: ("Blue Man Group", "Endless Column", "New Age"),
+45: ("Blue Man Group", "Klein Mandelbrot", "New Age"),
+46: ("Kenny G", "Silhouette", "Jazz"),
+47: ("Sade", "Smooth Operator", "Jazz"),
+48: ("David Arkenstone", "Papillon (On The Wings Of The Butterfly)",
+ "New Age"),
+49: ("David Arkenstone", "Stepping Stars", "New Age"),
+50: ("David Arkenstone", "Carnation Lily Lily Rose", "New Age"),
+51: ("David Lanz", "Behind The Waterfall", "New Age"),
+52: ("David Lanz", "Cristofori's Dream", "New Age"),
+53: ("David Lanz", "Heartsounds", "New Age"),
+54: ("David Lanz", "Leaves on the Seine", "New Age"),
+99: ("unknown artist", "Unnamed song", "Unknown"),
+}
+
+#----------------------------------------------------------------------
+
+class AssociateErrorTestCase(unittest.TestCase):
+ def setUp(self):
+ self.filename = self.__class__.__name__ + '.db'
+ self.homeDir = get_new_environment_path()
+ self.env = db.DBEnv()
+ self.env.open(self.homeDir, db.DB_CREATE | db.DB_INIT_MPOOL)
+
+ def tearDown(self):
+ self.env.close()
+ self.env = None
+ test_support.rmtree(self.homeDir)
+
+ def test00_associateDBError(self):
+ if verbose:
+ print '\n', '-=' * 30
+ print "Running %s.test00_associateDBError..." % \
+ self.__class__.__name__
+
+ dupDB = db.DB(self.env)
+ dupDB.set_flags(db.DB_DUP)
+ dupDB.open(self.filename, "primary", db.DB_BTREE, db.DB_CREATE)
+
+ secDB = db.DB(self.env)
+ secDB.open(self.filename, "secondary", db.DB_BTREE, db.DB_CREATE)
+
+ # dupDB has been configured to allow duplicates, it can't
+ # associate with a secondary. Berkeley DB will return an error.
+ try:
+ def f(a,b): return a+b
+ dupDB.associate(secDB, f)
+ except db.DBError:
+ # good
+ secDB.close()
+ dupDB.close()
+ else:
+ secDB.close()
+ dupDB.close()
+ self.fail("DBError exception was expected")
+
+
+
+#----------------------------------------------------------------------
+
+
+class AssociateTestCase(unittest.TestCase):
+ keytype = ''
+ envFlags = 0
+ dbFlags = 0
+
+ def setUp(self):
+ self.filename = self.__class__.__name__ + '.db'
+ self.homeDir = get_new_environment_path()
+ self.env = db.DBEnv()
+ self.env.open(self.homeDir, db.DB_CREATE | db.DB_INIT_MPOOL |
+ db.DB_INIT_LOCK | db.DB_THREAD | self.envFlags)
+
+ def tearDown(self):
+ self.closeDB()
+ self.env.close()
+ self.env = None
+ test_support.rmtree(self.homeDir)
+
+ def addDataToDB(self, d, txn=None):
+ for key, value in musicdata.items():
+ if type(self.keytype) == type(''):
+ key = "%02d" % key
+ d.put(key, '|'.join(value), txn=txn)
+
+ def createDB(self, txn=None):
+ self.cur = None
+ self.secDB = None
+ self.primary = db.DB(self.env)
+ self.primary.set_get_returns_none(2)
+ self.primary.open(self.filename, "primary", self.dbtype,
+ db.DB_CREATE | db.DB_THREAD | self.dbFlags, txn=txn)
+
+ def closeDB(self):
+ if self.cur:
+ self.cur.close()
+ self.cur = None
+ if self.secDB:
+ self.secDB.close()
+ self.secDB = None
+ self.primary.close()
+ self.primary = None
+
+ def getDB(self):
+ return self.primary
+
+
+ def _associateWithDB(self, getGenre):
+ self.createDB()
+
+ self.secDB = db.DB(self.env)
+ self.secDB.set_flags(db.DB_DUP)
+ self.secDB.set_get_returns_none(2)
+ self.secDB.open(self.filename, "secondary", db.DB_BTREE,
+ db.DB_CREATE | db.DB_THREAD | self.dbFlags)
+ self.getDB().associate(self.secDB, getGenre)
+
+ self.addDataToDB(self.getDB())
+
+ self.finish_test(self.secDB)
+
+ def test01_associateWithDB(self):
+ if verbose:
+ print '\n', '-=' * 30
+ print "Running %s.test01_associateWithDB..." % \
+ self.__class__.__name__
+
+ return self._associateWithDB(self.getGenre)
+
+ def _associateAfterDB(self, getGenre) :
+ self.createDB()
+ self.addDataToDB(self.getDB())
+
+ self.secDB = db.DB(self.env)
+ self.secDB.set_flags(db.DB_DUP)
+ self.secDB.open(self.filename, "secondary", db.DB_BTREE,
+ db.DB_CREATE | db.DB_THREAD | self.dbFlags)
+
+ # adding the DB_CREATE flag will cause it to index existing records
+ self.getDB().associate(self.secDB, getGenre, db.DB_CREATE)
+
+ self.finish_test(self.secDB)
+
+ def test02_associateAfterDB(self):
+ if verbose:
+ print '\n', '-=' * 30
+ print "Running %s.test02_associateAfterDB..." % \
+ self.__class__.__name__
+
+ return self._associateAfterDB(self.getGenre)
+
+ if db.version() >= (4, 6):
+ def test03_associateWithDB(self):
+ if verbose:
+ print '\n', '-=' * 30
+ print "Running %s.test03_associateWithDB..." % \
+ self.__class__.__name__
+
+ return self._associateWithDB(self.getGenreList)
+
+ def test04_associateAfterDB(self):
+ if verbose:
+ print '\n', '-=' * 30
+ print "Running %s.test04_associateAfterDB..." % \
+ self.__class__.__name__
+
+ return self._associateAfterDB(self.getGenreList)
+
+
+ def finish_test(self, secDB, txn=None):
+ # 'Blues' should not be in the secondary database
+ vals = secDB.pget('Blues', txn=txn)
+ self.assertEqual(vals, None, vals)
+
+ vals = secDB.pget('Unknown', txn=txn)
+ self.assertTrue(vals[0] == 99 or vals[0] == '99', vals)
+ vals[1].index('Unknown')
+ vals[1].index('Unnamed')
+ vals[1].index('unknown')
+
+ if verbose:
+ print "Primary key traversal:"
+ self.cur = self.getDB().cursor(txn)
+ count = 0
+ rec = self.cur.first()
+ while rec is not None:
+ if type(self.keytype) == type(''):
+ self.assertTrue(int(rec[0])) # for primary db, key is a number
+ else:
+ self.assertTrue(rec[0] and type(rec[0]) == type(0))
+ count = count + 1
+ if verbose:
+ print rec
+ rec = getattr(self.cur, "next")()
+ self.assertEqual(count, len(musicdata)) # all items accounted for
+
+
+ if verbose:
+ print "Secondary key traversal:"
+ self.cur = secDB.cursor(txn)
+ count = 0
+
+ # test cursor pget
+ vals = self.cur.pget('Unknown', flags=db.DB_LAST)
+ self.assertTrue(vals[1] == 99 or vals[1] == '99', vals)
+ self.assertEqual(vals[0], 'Unknown')
+ vals[2].index('Unknown')
+ vals[2].index('Unnamed')
+ vals[2].index('unknown')
+
+ vals = self.cur.pget('Unknown', data='wrong value', flags=db.DB_GET_BOTH)
+ self.assertEqual(vals, None, vals)
+
+ rec = self.cur.first()
+ self.assertEqual(rec[0], "Jazz")
+ while rec is not None:
+ count = count + 1
+ if verbose:
+ print rec
+ rec = getattr(self.cur, "next")()
+ # all items accounted for EXCEPT for 1 with "Blues" genre
+ self.assertEqual(count, len(musicdata)-1)
+
+ self.cur = None
+
+ def getGenre(self, priKey, priData):
+ self.assertEqual(type(priData), type(""))
+ genre = priData.split('|')[2]
+
+ if verbose:
+ print 'getGenre key: %r data: %r' % (priKey, priData)
+
+ if genre == 'Blues':
+ return db.DB_DONOTINDEX
+ else:
+ return genre
+
+ def getGenreList(self, priKey, PriData) :
+ v = self.getGenre(priKey, PriData)
+ if type(v) == type("") :
+ v = [v]
+ return v
+
+
+#----------------------------------------------------------------------
+
+
+class AssociateHashTestCase(AssociateTestCase):
+ dbtype = db.DB_HASH
+
+class AssociateBTreeTestCase(AssociateTestCase):
+ dbtype = db.DB_BTREE
+
+class AssociateRecnoTestCase(AssociateTestCase):
+ dbtype = db.DB_RECNO
+ keytype = 0
+
+#----------------------------------------------------------------------
+
+class AssociateBTreeTxnTestCase(AssociateBTreeTestCase):
+ envFlags = db.DB_INIT_TXN
+ dbFlags = 0
+
+ def txn_finish_test(self, sDB, txn):
+ try:
+ self.finish_test(sDB, txn=txn)
+ finally:
+ if self.cur:
+ self.cur.close()
+ self.cur = None
+ if txn:
+ txn.commit()
+
+ def test13_associate_in_transaction(self):
+ if verbose:
+ print '\n', '-=' * 30
+ print "Running %s.test13_associateAutoCommit..." % \
+ self.__class__.__name__
+
+ txn = self.env.txn_begin()
+ try:
+ self.createDB(txn=txn)
+
+ self.secDB = db.DB(self.env)
+ self.secDB.set_flags(db.DB_DUP)
+ self.secDB.set_get_returns_none(2)
+ self.secDB.open(self.filename, "secondary", db.DB_BTREE,
+ db.DB_CREATE | db.DB_THREAD, txn=txn)
+ self.getDB().associate(self.secDB, self.getGenre, txn=txn)
+
+ self.addDataToDB(self.getDB(), txn=txn)
+ except:
+ txn.abort()
+ raise
+
+ self.txn_finish_test(self.secDB, txn=txn)
+
+
+#----------------------------------------------------------------------
+
+class ShelveAssociateTestCase(AssociateTestCase):
+
+ def createDB(self):
+ self.primary = dbshelve.open(self.filename,
+ dbname="primary",
+ dbenv=self.env,
+ filetype=self.dbtype)
+
+ def addDataToDB(self, d):
+ for key, value in musicdata.items():
+ if type(self.keytype) == type(''):
+ key = "%02d" % key
+ d.put(key, value) # save the value as is this time
+
+
+ def getGenre(self, priKey, priData):
+ self.assertEqual(type(priData), type(()))
+ if verbose:
+ print 'getGenre key: %r data: %r' % (priKey, priData)
+ genre = priData[2]
+ if genre == 'Blues':
+ return db.DB_DONOTINDEX
+ else:
+ return genre
+
+
+class ShelveAssociateHashTestCase(ShelveAssociateTestCase):
+ dbtype = db.DB_HASH
+
+class ShelveAssociateBTreeTestCase(ShelveAssociateTestCase):
+ dbtype = db.DB_BTREE
+
+class ShelveAssociateRecnoTestCase(ShelveAssociateTestCase):
+ dbtype = db.DB_RECNO
+ keytype = 0
+
+
+#----------------------------------------------------------------------
+
+class ThreadedAssociateTestCase(AssociateTestCase):
+
+ def addDataToDB(self, d):
+ t1 = Thread(target = self.writer1,
+ args = (d, ))
+ t2 = Thread(target = self.writer2,
+ args = (d, ))
+
+ t1.setDaemon(True)
+ t2.setDaemon(True)
+ t1.start()
+ t2.start()
+ t1.join()
+ t2.join()
+
+ def writer1(self, d):
+ for key, value in musicdata.items():
+ if type(self.keytype) == type(''):
+ key = "%02d" % key
+ d.put(key, '|'.join(value))
+
+ def writer2(self, d):
+ for x in range(100, 600):
+ key = 'z%2d' % x
+ value = [key] * 4
+ d.put(key, '|'.join(value))
+
+
+class ThreadedAssociateHashTestCase(ShelveAssociateTestCase):
+ dbtype = db.DB_HASH
+
+class ThreadedAssociateBTreeTestCase(ShelveAssociateTestCase):
+ dbtype = db.DB_BTREE
+
+class ThreadedAssociateRecnoTestCase(ShelveAssociateTestCase):
+ dbtype = db.DB_RECNO
+ keytype = 0
+
+
+#----------------------------------------------------------------------
+
+def test_suite():
+ suite = unittest.TestSuite()
+
+ suite.addTest(unittest.makeSuite(AssociateErrorTestCase))
+
+ suite.addTest(unittest.makeSuite(AssociateHashTestCase))
+ suite.addTest(unittest.makeSuite(AssociateBTreeTestCase))
+ suite.addTest(unittest.makeSuite(AssociateRecnoTestCase))
+
+ suite.addTest(unittest.makeSuite(AssociateBTreeTxnTestCase))
+
+ suite.addTest(unittest.makeSuite(ShelveAssociateHashTestCase))
+ suite.addTest(unittest.makeSuite(ShelveAssociateBTreeTestCase))
+ suite.addTest(unittest.makeSuite(ShelveAssociateRecnoTestCase))
+
+ if have_threads:
+ suite.addTest(unittest.makeSuite(ThreadedAssociateHashTestCase))
+ suite.addTest(unittest.makeSuite(ThreadedAssociateBTreeTestCase))
+ suite.addTest(unittest.makeSuite(ThreadedAssociateRecnoTestCase))
+
+ return suite
+
+
+if __name__ == '__main__':
+ unittest.main(defaultTest='test_suite')
diff --git a/lib/python2.7/bsddb/test/test_basics.py b/lib/python2.7/bsddb/test/test_basics.py
new file mode 100644
index 0000000..1459d36
--- /dev/null
+++ b/lib/python2.7/bsddb/test/test_basics.py
@@ -0,0 +1,1158 @@
+"""
+Basic TestCases for BTree and hash DBs, with and without a DBEnv, with
+various DB flags, etc.
+"""
+
+import os
+import errno
+import string
+from pprint import pprint
+import unittest
+import time
+import sys
+
+from test_all import db, test_support, verbose, get_new_environment_path, \
+ get_new_database_path
+
+DASH = '-'
+
+
+#----------------------------------------------------------------------
+
+class VersionTestCase(unittest.TestCase):
+ def test00_version(self):
+ info = db.version()
+ if verbose:
+ print '\n', '-=' * 20
+ print 'bsddb.db.version(): %s' % (info, )
+ print db.DB_VERSION_STRING
+ print '-=' * 20
+ self.assertEqual(info, (db.DB_VERSION_MAJOR, db.DB_VERSION_MINOR,
+ db.DB_VERSION_PATCH))
+
+#----------------------------------------------------------------------
+
+class BasicTestCase(unittest.TestCase):
+ dbtype = db.DB_UNKNOWN # must be set in derived class
+ cachesize = (0, 1024*1024, 1)
+ dbopenflags = 0
+ dbsetflags = 0
+ dbmode = 0660
+ dbname = None
+ useEnv = 0
+ envflags = 0
+ envsetflags = 0
+
+ _numKeys = 1002 # PRIVATE. NOTE: must be an even value
+
+ def setUp(self):
+ if self.useEnv:
+ self.homeDir=get_new_environment_path()
+ try:
+ self.env = db.DBEnv()
+ self.env.set_lg_max(1024*1024)
+ self.env.set_tx_max(30)
+ self._t = int(time.time())
+ self.env.set_tx_timestamp(self._t)
+ self.env.set_flags(self.envsetflags, 1)
+ self.env.open(self.homeDir, self.envflags | db.DB_CREATE)
+ self.filename = "test"
+ # Yes, a bare except is intended, since we're re-raising the exc.
+ except:
+ test_support.rmtree(self.homeDir)
+ raise
+ else:
+ self.env = None
+ self.filename = get_new_database_path()
+
+ # create and open the DB
+ self.d = db.DB(self.env)
+ if not self.useEnv :
+ self.d.set_cachesize(*self.cachesize)
+ cachesize = self.d.get_cachesize()
+ self.assertEqual(cachesize[0], self.cachesize[0])
+ self.assertEqual(cachesize[2], self.cachesize[2])
+ # Berkeley DB expands the cache 25% accounting overhead,
+ # if the cache is small.
+ self.assertEqual(125, int(100.0*cachesize[1]/self.cachesize[1]))
+ self.d.set_flags(self.dbsetflags)
+ if self.dbname:
+ self.d.open(self.filename, self.dbname, self.dbtype,
+ self.dbopenflags|db.DB_CREATE, self.dbmode)
+ else:
+ self.d.open(self.filename, # try out keyword args
+ mode = self.dbmode,
+ dbtype = self.dbtype,
+ flags = self.dbopenflags|db.DB_CREATE)
+
+ if not self.useEnv:
+ self.assertRaises(db.DBInvalidArgError,
+ self.d.set_cachesize, *self.cachesize)
+
+ self.populateDB()
+
+
+ def tearDown(self):
+ self.d.close()
+ if self.env is not None:
+ self.env.close()
+ test_support.rmtree(self.homeDir)
+ else:
+ os.remove(self.filename)
+
+
+
+ def populateDB(self, _txn=None):
+ d = self.d
+
+ for x in range(self._numKeys//2):
+ key = '%04d' % (self._numKeys - x) # insert keys in reverse order
+ data = self.makeData(key)
+ d.put(key, data, _txn)
+
+ d.put('empty value', '', _txn)
+
+ for x in range(self._numKeys//2-1):
+ key = '%04d' % x # and now some in forward order
+ data = self.makeData(key)
+ d.put(key, data, _txn)
+
+ if _txn:
+ _txn.commit()
+
+ num = len(d)
+ if verbose:
+ print "created %d records" % num
+
+
+ def makeData(self, key):
+ return DASH.join([key] * 5)
+
+
+
+ #----------------------------------------
+
+ def test01_GetsAndPuts(self):
+ d = self.d
+
+ if verbose:
+ print '\n', '-=' * 30
+ print "Running %s.test01_GetsAndPuts..." % self.__class__.__name__
+
+ for key in ['0001', '0100', '0400', '0700', '0999']:
+ data = d.get(key)
+ if verbose:
+ print data
+
+ self.assertEqual(d.get('0321'), '0321-0321-0321-0321-0321')
+
+ # By default non-existent keys return None...
+ self.assertEqual(d.get('abcd'), None)
+
+ # ...but they raise exceptions in other situations. Call
+ # set_get_returns_none() to change it.
+ try:
+ d.delete('abcd')
+ except db.DBNotFoundError, val:
+ if sys.version_info < (2, 6) :
+ self.assertEqual(val[0], db.DB_NOTFOUND)
+ else :
+ self.assertEqual(val.args[0], db.DB_NOTFOUND)
+ if verbose: print val
+ else:
+ self.fail("expected exception")
+
+
+ d.put('abcd', 'a new record')
+ self.assertEqual(d.get('abcd'), 'a new record')
+
+ d.put('abcd', 'same key')
+ if self.dbsetflags & db.DB_DUP:
+ self.assertEqual(d.get('abcd'), 'a new record')
+ else:
+ self.assertEqual(d.get('abcd'), 'same key')
+
+
+ try:
+ d.put('abcd', 'this should fail', flags=db.DB_NOOVERWRITE)
+ except db.DBKeyExistError, val:
+ if sys.version_info < (2, 6) :
+ self.assertEqual(val[0], db.DB_KEYEXIST)
+ else :
+ self.assertEqual(val.args[0], db.DB_KEYEXIST)
+ if verbose: print val
+ else:
+ self.fail("expected exception")
+
+ if self.dbsetflags & db.DB_DUP:
+ self.assertEqual(d.get('abcd'), 'a new record')
+ else:
+ self.assertEqual(d.get('abcd'), 'same key')
+
+
+ d.sync()
+ d.close()
+ del d
+
+ self.d = db.DB(self.env)
+ if self.dbname:
+ self.d.open(self.filename, self.dbname)
+ else:
+ self.d.open(self.filename)
+ d = self.d
+
+ self.assertEqual(d.get('0321'), '0321-0321-0321-0321-0321')
+ if self.dbsetflags & db.DB_DUP:
+ self.assertEqual(d.get('abcd'), 'a new record')
+ else:
+ self.assertEqual(d.get('abcd'), 'same key')
+
+ rec = d.get_both('0555', '0555-0555-0555-0555-0555')
+ if verbose:
+ print rec
+
+ self.assertEqual(d.get_both('0555', 'bad data'), None)
+
+ # test default value
+ data = d.get('bad key', 'bad data')
+ self.assertEqual(data, 'bad data')
+
+ # any object can pass through
+ data = d.get('bad key', self)
+ self.assertEqual(data, self)
+
+ s = d.stat()
+ self.assertEqual(type(s), type({}))
+ if verbose:
+ print 'd.stat() returned this dictionary:'
+ pprint(s)
+
+
+ #----------------------------------------
+
+ def test02_DictionaryMethods(self):
+ d = self.d
+
+ if verbose:
+ print '\n', '-=' * 30
+ print "Running %s.test02_DictionaryMethods..." % \
+ self.__class__.__name__
+
+ for key in ['0002', '0101', '0401', '0701', '0998']:
+ data = d[key]
+ self.assertEqual(data, self.makeData(key))
+ if verbose:
+ print data
+
+ self.assertEqual(len(d), self._numKeys)
+ keys = d.keys()
+ self.assertEqual(len(keys), self._numKeys)
+ self.assertEqual(type(keys), type([]))
+
+ d['new record'] = 'a new record'
+ self.assertEqual(len(d), self._numKeys+1)
+ keys = d.keys()
+ self.assertEqual(len(keys), self._numKeys+1)
+
+ d['new record'] = 'a replacement record'
+ self.assertEqual(len(d), self._numKeys+1)
+ keys = d.keys()
+ self.assertEqual(len(keys), self._numKeys+1)
+
+ if verbose:
+ print "the first 10 keys are:"
+ pprint(keys[:10])
+
+ self.assertEqual(d['new record'], 'a replacement record')
+
+# We check also the positional parameter
+ self.assertEqual(d.has_key('0001', None), 1)
+# We check also the keyword parameter
+ self.assertEqual(d.has_key('spam', txn=None), 0)
+
+ items = d.items()
+ self.assertEqual(len(items), self._numKeys+1)
+ self.assertEqual(type(items), type([]))
+ self.assertEqual(type(items[0]), type(()))
+ self.assertEqual(len(items[0]), 2)
+
+ if verbose:
+ print "the first 10 items are:"
+ pprint(items[:10])
+
+ values = d.values()
+ self.assertEqual(len(values), self._numKeys+1)
+ self.assertEqual(type(values), type([]))
+
+ if verbose:
+ print "the first 10 values are:"
+ pprint(values[:10])
+
+
+ #----------------------------------------
+
+ def test02b_SequenceMethods(self):
+ d = self.d
+
+ for key in ['0002', '0101', '0401', '0701', '0998']:
+ data = d[key]
+ self.assertEqual(data, self.makeData(key))
+ if verbose:
+ print data
+
+ self.assertTrue(hasattr(d, "__contains__"))
+ self.assertTrue("0401" in d)
+ self.assertFalse("1234" in d)
+
+
+ #----------------------------------------
+
+ def test03_SimpleCursorStuff(self, get_raises_error=0, set_raises_error=0):
+ if verbose:
+ print '\n', '-=' * 30
+ print "Running %s.test03_SimpleCursorStuff (get_error %s, set_error %s)..." % \
+ (self.__class__.__name__, get_raises_error, set_raises_error)
+
+ if self.env and self.dbopenflags & db.DB_AUTO_COMMIT:
+ txn = self.env.txn_begin()
+ else:
+ txn = None
+ c = self.d.cursor(txn=txn)
+
+ rec = c.first()
+ count = 0
+ while rec is not None:
+ count = count + 1
+ if verbose and count % 100 == 0:
+ print rec
+ try:
+ rec = c.next()
+ except db.DBNotFoundError, val:
+ if get_raises_error:
+ if sys.version_info < (2, 6) :
+ self.assertEqual(val[0], db.DB_NOTFOUND)
+ else :
+ self.assertEqual(val.args[0], db.DB_NOTFOUND)
+ if verbose: print val
+ rec = None
+ else:
+ self.fail("unexpected DBNotFoundError")
+ self.assertEqual(c.get_current_size(), len(c.current()[1]),
+ "%s != len(%r)" % (c.get_current_size(), c.current()[1]))
+
+ self.assertEqual(count, self._numKeys)
+
+
+ rec = c.last()
+ count = 0
+ while rec is not None:
+ count = count + 1
+ if verbose and count % 100 == 0:
+ print rec
+ try:
+ rec = c.prev()
+ except db.DBNotFoundError, val:
+ if get_raises_error:
+ if sys.version_info < (2, 6) :
+ self.assertEqual(val[0], db.DB_NOTFOUND)
+ else :
+ self.assertEqual(val.args[0], db.DB_NOTFOUND)
+ if verbose: print val
+ rec = None
+ else:
+ self.fail("unexpected DBNotFoundError")
+
+ self.assertEqual(count, self._numKeys)
+
+ rec = c.set('0505')
+ rec2 = c.current()
+ self.assertEqual(rec, rec2)
+ self.assertEqual(rec[0], '0505')
+ self.assertEqual(rec[1], self.makeData('0505'))
+ self.assertEqual(c.get_current_size(), len(rec[1]))
+
+ # make sure we get empty values properly
+ rec = c.set('empty value')
+ self.assertEqual(rec[1], '')
+ self.assertEqual(c.get_current_size(), 0)
+
+ try:
+ n = c.set('bad key')
+ except db.DBNotFoundError, val:
+ if sys.version_info < (2, 6) :
+ self.assertEqual(val[0], db.DB_NOTFOUND)
+ else :
+ self.assertEqual(val.args[0], db.DB_NOTFOUND)
+ if verbose: print val
+ else:
+ if set_raises_error:
+ self.fail("expected exception")
+ if n is not None:
+ self.fail("expected None: %r" % (n,))
+
+ rec = c.get_both('0404', self.makeData('0404'))
+ self.assertEqual(rec, ('0404', self.makeData('0404')))
+
+ try:
+ n = c.get_both('0404', 'bad data')
+ except db.DBNotFoundError, val:
+ if sys.version_info < (2, 6) :
+ self.assertEqual(val[0], db.DB_NOTFOUND)
+ else :
+ self.assertEqual(val.args[0], db.DB_NOTFOUND)
+ if verbose: print val
+ else:
+ if get_raises_error:
+ self.fail("expected exception")
+ if n is not None:
+ self.fail("expected None: %r" % (n,))
+
+ if self.d.get_type() == db.DB_BTREE:
+ rec = c.set_range('011')
+ if verbose:
+ print "searched for '011', found: ", rec
+
+ rec = c.set_range('011',dlen=0,doff=0)
+ if verbose:
+ print "searched (partial) for '011', found: ", rec
+ if rec[1] != '': self.fail('expected empty data portion')
+
+ ev = c.set_range('empty value')
+ if verbose:
+ print "search for 'empty value' returned", ev
+ if ev[1] != '': self.fail('empty value lookup failed')
+
+ c.set('0499')
+ c.delete()
+ try:
+ rec = c.current()
+ except db.DBKeyEmptyError, val:
+ if get_raises_error:
+ if sys.version_info < (2, 6) :
+ self.assertEqual(val[0], db.DB_KEYEMPTY)
+ else :
+ self.assertEqual(val.args[0], db.DB_KEYEMPTY)
+ if verbose: print val
+ else:
+ self.fail("unexpected DBKeyEmptyError")
+ else:
+ if get_raises_error:
+ self.fail('DBKeyEmptyError exception expected')
+
+ c.next()
+ c2 = c.dup(db.DB_POSITION)
+ self.assertEqual(c.current(), c2.current())
+
+ c2.put('', 'a new value', db.DB_CURRENT)
+ self.assertEqual(c.current(), c2.current())
+ self.assertEqual(c.current()[1], 'a new value')
+
+ c2.put('', 'er', db.DB_CURRENT, dlen=0, doff=5)
+ self.assertEqual(c2.current()[1], 'a newer value')
+
+ c.close()
+ c2.close()
+ if txn:
+ txn.commit()
+
+ # time to abuse the closed cursors and hope we don't crash
+ methods_to_test = {
+ 'current': (),
+ 'delete': (),
+ 'dup': (db.DB_POSITION,),
+ 'first': (),
+ 'get': (0,),
+ 'next': (),
+ 'prev': (),
+ 'last': (),
+ 'put':('', 'spam', db.DB_CURRENT),
+ 'set': ("0505",),
+ }
+ for method, args in methods_to_test.items():
+ try:
+ if verbose:
+ print "attempting to use a closed cursor's %s method" % \
+ method
+ # a bug may cause a NULL pointer dereference...
+ getattr(c, method)(*args)
+ except db.DBError, val:
+ if sys.version_info < (2, 6) :
+ self.assertEqual(val[0], 0)
+ else :
+ self.assertEqual(val.args[0], 0)
+ if verbose: print val
+ else:
+ self.fail("no exception raised when using a buggy cursor's"
+ "%s method" % method)
+
+ #
+ # free cursor referencing a closed database, it should not barf:
+ #
+ oldcursor = self.d.cursor(txn=txn)
+ self.d.close()
+
+ # this would originally cause a segfault when the cursor for a
+ # closed database was cleaned up. it should not anymore.
+ # SF pybsddb bug id 667343
+ del oldcursor
+
+ def test03b_SimpleCursorWithoutGetReturnsNone0(self):
+ # same test but raise exceptions instead of returning None
+ if verbose:
+ print '\n', '-=' * 30
+ print "Running %s.test03b_SimpleCursorStuffWithoutGetReturnsNone..." % \
+ self.__class__.__name__
+
+ old = self.d.set_get_returns_none(0)
+ self.assertEqual(old, 2)
+ self.test03_SimpleCursorStuff(get_raises_error=1, set_raises_error=1)
+
+ def test03b_SimpleCursorWithGetReturnsNone1(self):
+ # same test but raise exceptions instead of returning None
+ if verbose:
+ print '\n', '-=' * 30
+ print "Running %s.test03b_SimpleCursorStuffWithoutGetReturnsNone..." % \
+ self.__class__.__name__
+
+ old = self.d.set_get_returns_none(1)
+ self.test03_SimpleCursorStuff(get_raises_error=0, set_raises_error=1)
+
+
+ def test03c_SimpleCursorGetReturnsNone2(self):
+ # same test but raise exceptions instead of returning None
+ if verbose:
+ print '\n', '-=' * 30
+ print "Running %s.test03c_SimpleCursorStuffWithoutSetReturnsNone..." % \
+ self.__class__.__name__
+
+ old = self.d.set_get_returns_none(1)
+ self.assertEqual(old, 2)
+ old = self.d.set_get_returns_none(2)
+ self.assertEqual(old, 1)
+ self.test03_SimpleCursorStuff(get_raises_error=0, set_raises_error=0)
+
+ if db.version() >= (4, 6):
+ def test03d_SimpleCursorPriority(self) :
+ c = self.d.cursor()
+ c.set_priority(db.DB_PRIORITY_VERY_LOW) # Positional
+ self.assertEqual(db.DB_PRIORITY_VERY_LOW, c.get_priority())
+ c.set_priority(priority=db.DB_PRIORITY_HIGH) # Keyword
+ self.assertEqual(db.DB_PRIORITY_HIGH, c.get_priority())
+ c.close()
+
+ #----------------------------------------
+
+ def test04_PartialGetAndPut(self):
+ d = self.d
+ if verbose:
+ print '\n', '-=' * 30
+ print "Running %s.test04_PartialGetAndPut..." % \
+ self.__class__.__name__
+
+ key = "partialTest"
+ data = "1" * 1000 + "2" * 1000
+ d.put(key, data)
+ self.assertEqual(d.get(key), data)
+ self.assertEqual(d.get(key, dlen=20, doff=990),
+ ("1" * 10) + ("2" * 10))
+
+ d.put("partialtest2", ("1" * 30000) + "robin" )
+ self.assertEqual(d.get("partialtest2", dlen=5, doff=30000), "robin")
+
+ # There seems to be a bug in DB here... Commented out the test for
+ # now.
+ ##self.assertEqual(d.get("partialtest2", dlen=5, doff=30010), "")
+
+ if self.dbsetflags != db.DB_DUP:
+ # Partial put with duplicate records requires a cursor
+ d.put(key, "0000", dlen=2000, doff=0)
+ self.assertEqual(d.get(key), "0000")
+
+ d.put(key, "1111", dlen=1, doff=2)
+ self.assertEqual(d.get(key), "0011110")
+
+ #----------------------------------------
+
+ def test05_GetSize(self):
+ d = self.d
+ if verbose:
+ print '\n', '-=' * 30
+ print "Running %s.test05_GetSize..." % self.__class__.__name__
+
+ for i in range(1, 50000, 500):
+ key = "size%s" % i
+ #print "before ", i,
+ d.put(key, "1" * i)
+ #print "after",
+ self.assertEqual(d.get_size(key), i)
+ #print "done"
+
+ #----------------------------------------
+
+ def test06_Truncate(self):
+ d = self.d
+ if verbose:
+ print '\n', '-=' * 30
+ print "Running %s.test06_Truncate..." % self.__class__.__name__
+
+ d.put("abcde", "ABCDE");
+ num = d.truncate()
+ self.assertTrue(num >= 1, "truncate returned <= 0 on non-empty database")
+ num = d.truncate()
+ self.assertEqual(num, 0,
+ "truncate on empty DB returned nonzero (%r)" % (num,))
+
+ #----------------------------------------
+
+ def test07_verify(self):
+ # Verify bug solved in 4.7.3pre8
+ self.d.close()
+ d = db.DB(self.env)
+ d.verify(self.filename)
+
+
+ #----------------------------------------
+
+ if db.version() >= (4, 6):
+ def test08_exists(self) :
+ self.d.put("abcde", "ABCDE")
+ self.assertTrue(self.d.exists("abcde") == True,
+ "DB->exists() returns wrong value")
+ self.assertTrue(self.d.exists("x") == False,
+ "DB->exists() returns wrong value")
+
+ #----------------------------------------
+
+ if db.version() >= (4, 7):
+ def test_compact(self) :
+ d = self.d
+ self.assertEqual(0, d.compact(flags=db.DB_FREELIST_ONLY))
+ self.assertEqual(0, d.compact(flags=db.DB_FREELIST_ONLY))
+ d.put("abcde", "ABCDE");
+ d.put("bcde", "BCDE");
+ d.put("abc", "ABC");
+ d.put("monty", "python");
+ d.delete("abc")
+ d.delete("bcde")
+ d.compact(start='abcde', stop='monty', txn=None,
+ compact_fillpercent=42, compact_pages=1,
+ compact_timeout=50000000,
+ flags=db.DB_FREELIST_ONLY|db.DB_FREE_SPACE)
+
+ #----------------------------------------
+
+#----------------------------------------------------------------------
+
+
+class BasicBTreeTestCase(BasicTestCase):
+ dbtype = db.DB_BTREE
+
+
+class BasicHashTestCase(BasicTestCase):
+ dbtype = db.DB_HASH
+
+
+class BasicBTreeWithThreadFlagTestCase(BasicTestCase):
+ dbtype = db.DB_BTREE
+ dbopenflags = db.DB_THREAD
+
+
+class BasicHashWithThreadFlagTestCase(BasicTestCase):
+ dbtype = db.DB_HASH
+ dbopenflags = db.DB_THREAD
+
+
+class BasicWithEnvTestCase(BasicTestCase):
+ dbopenflags = db.DB_THREAD
+ useEnv = 1
+ envflags = db.DB_THREAD | db.DB_INIT_MPOOL | db.DB_INIT_LOCK
+
+ #----------------------------------------
+
+ def test09_EnvRemoveAndRename(self):
+ if not self.env:
+ return
+
+ if verbose:
+ print '\n', '-=' * 30
+ print "Running %s.test09_EnvRemoveAndRename..." % self.__class__.__name__
+
+ # can't rename or remove an open DB
+ self.d.close()
+
+ newname = self.filename + '.renamed'
+ self.env.dbrename(self.filename, None, newname)
+ self.env.dbremove(newname)
+
+ #----------------------------------------
+
+class BasicBTreeWithEnvTestCase(BasicWithEnvTestCase):
+ dbtype = db.DB_BTREE
+
+
+class BasicHashWithEnvTestCase(BasicWithEnvTestCase):
+ dbtype = db.DB_HASH
+
+
+#----------------------------------------------------------------------
+
+class BasicTransactionTestCase(BasicTestCase):
+ if (sys.version_info < (2, 7)) or ((sys.version_info >= (3, 0)) and
+ (sys.version_info < (3, 2))) :
+ def assertIn(self, a, b, msg=None) :
+ return self.assertTrue(a in b, msg=msg)
+
+ dbopenflags = db.DB_THREAD | db.DB_AUTO_COMMIT
+ useEnv = 1
+ envflags = (db.DB_THREAD | db.DB_INIT_MPOOL | db.DB_INIT_LOCK |
+ db.DB_INIT_TXN)
+ envsetflags = db.DB_AUTO_COMMIT
+
+
+ def tearDown(self):
+ self.txn.commit()
+ BasicTestCase.tearDown(self)
+
+
+ def populateDB(self):
+ txn = self.env.txn_begin()
+ BasicTestCase.populateDB(self, _txn=txn)
+
+ self.txn = self.env.txn_begin()
+
+
+ def test06_Transactions(self):
+ d = self.d
+ if verbose:
+ print '\n', '-=' * 30
+ print "Running %s.test06_Transactions..." % self.__class__.__name__
+
+ self.assertEqual(d.get('new rec', txn=self.txn), None)
+ d.put('new rec', 'this is a new record', self.txn)
+ self.assertEqual(d.get('new rec', txn=self.txn),
+ 'this is a new record')
+ self.txn.abort()
+ self.assertEqual(d.get('new rec'), None)
+
+ self.txn = self.env.txn_begin()
+
+ self.assertEqual(d.get('new rec', txn=self.txn), None)
+ d.put('new rec', 'this is a new record', self.txn)
+ self.assertEqual(d.get('new rec', txn=self.txn),
+ 'this is a new record')
+ self.txn.commit()
+ self.assertEqual(d.get('new rec'), 'this is a new record')
+
+ self.txn = self.env.txn_begin()
+ c = d.cursor(self.txn)
+ rec = c.first()
+ count = 0
+ while rec is not None:
+ count = count + 1
+ if verbose and count % 100 == 0:
+ print rec
+ rec = c.next()
+ self.assertEqual(count, self._numKeys+1)
+
+ c.close() # Cursors *MUST* be closed before commit!
+ self.txn.commit()
+
+ # flush pending updates
+ self.env.txn_checkpoint (0, 0, 0)
+
+ statDict = self.env.log_stat(0);
+ self.assertIn('magic', statDict)
+ self.assertIn('version', statDict)
+ self.assertIn('cur_file', statDict)
+ self.assertIn('region_nowait', statDict)
+
+ # must have at least one log file present:
+ logs = self.env.log_archive(db.DB_ARCH_ABS | db.DB_ARCH_LOG)
+ self.assertNotEqual(logs, None)
+ for log in logs:
+ if verbose:
+ print 'log file: ' + log
+ logs = self.env.log_archive(db.DB_ARCH_REMOVE)
+ self.assertTrue(not logs)
+
+ self.txn = self.env.txn_begin()
+
+ #----------------------------------------
+
+ if db.version() >= (4, 6):
+ def test08_exists(self) :
+ txn = self.env.txn_begin()
+ self.d.put("abcde", "ABCDE", txn=txn)
+ txn.commit()
+ txn = self.env.txn_begin()
+ self.assertTrue(self.d.exists("abcde", txn=txn) == True,
+ "DB->exists() returns wrong value")
+ self.assertTrue(self.d.exists("x", txn=txn) == False,
+ "DB->exists() returns wrong value")
+ txn.abort()
+
+ #----------------------------------------
+
+ def test09_TxnTruncate(self):
+ d = self.d
+ if verbose:
+ print '\n', '-=' * 30
+ print "Running %s.test09_TxnTruncate..." % self.__class__.__name__
+
+ d.put("abcde", "ABCDE");
+ txn = self.env.txn_begin()
+ num = d.truncate(txn)
+ self.assertTrue(num >= 1, "truncate returned <= 0 on non-empty database")
+ num = d.truncate(txn)
+ self.assertEqual(num, 0,
+ "truncate on empty DB returned nonzero (%r)" % (num,))
+ txn.commit()
+
+ #----------------------------------------
+
+ def test10_TxnLateUse(self):
+ txn = self.env.txn_begin()
+ txn.abort()
+ try:
+ txn.abort()
+ except db.DBError, e:
+ pass
+ else:
+ raise RuntimeError, "DBTxn.abort() called after DB_TXN no longer valid w/o an exception"
+
+ txn = self.env.txn_begin()
+ txn.commit()
+ try:
+ txn.commit()
+ except db.DBError, e:
+ pass
+ else:
+ raise RuntimeError, "DBTxn.commit() called after DB_TXN no longer valid w/o an exception"
+
+
+ #----------------------------------------
+
+
+ if db.version() >= (4, 4):
+ def test_txn_name(self) :
+ txn=self.env.txn_begin()
+ self.assertEqual(txn.get_name(), "")
+ txn.set_name("XXYY")
+ self.assertEqual(txn.get_name(), "XXYY")
+ txn.set_name("")
+ self.assertEqual(txn.get_name(), "")
+ txn.abort()
+
+ #----------------------------------------
+
+
+ def test_txn_set_timeout(self) :
+ txn=self.env.txn_begin()
+ txn.set_timeout(1234567, db.DB_SET_LOCK_TIMEOUT)
+ txn.set_timeout(2345678, flags=db.DB_SET_TXN_TIMEOUT)
+ txn.abort()
+
+ #----------------------------------------
+
+ def test_get_tx_max(self) :
+ self.assertEqual(self.env.get_tx_max(), 30)
+
+ def test_get_tx_timestamp(self) :
+ self.assertEqual(self.env.get_tx_timestamp(), self._t)
+
+
+
+class BTreeTransactionTestCase(BasicTransactionTestCase):
+ dbtype = db.DB_BTREE
+
+class HashTransactionTestCase(BasicTransactionTestCase):
+ dbtype = db.DB_HASH
+
+
+
+#----------------------------------------------------------------------
+
+class BTreeRecnoTestCase(BasicTestCase):
+ dbtype = db.DB_BTREE
+ dbsetflags = db.DB_RECNUM
+
+ def test09_RecnoInBTree(self):
+ d = self.d
+ if verbose:
+ print '\n', '-=' * 30
+ print "Running %s.test09_RecnoInBTree..." % self.__class__.__name__
+
+ rec = d.get(200)
+ self.assertEqual(type(rec), type(()))
+ self.assertEqual(len(rec), 2)
+ if verbose:
+ print "Record #200 is ", rec
+
+ c = d.cursor()
+ c.set('0200')
+ num = c.get_recno()
+ self.assertEqual(type(num), type(1))
+ if verbose:
+ print "recno of d['0200'] is ", num
+
+ rec = c.current()
+ self.assertEqual(c.set_recno(num), rec)
+
+ c.close()
+
+
+
+class BTreeRecnoWithThreadFlagTestCase(BTreeRecnoTestCase):
+ dbopenflags = db.DB_THREAD
+
+#----------------------------------------------------------------------
+
+class BasicDUPTestCase(BasicTestCase):
+ dbsetflags = db.DB_DUP
+
+ def test10_DuplicateKeys(self):
+ d = self.d
+ if verbose:
+ print '\n', '-=' * 30
+ print "Running %s.test10_DuplicateKeys..." % \
+ self.__class__.__name__
+
+ d.put("dup0", "before")
+ for x in "The quick brown fox jumped over the lazy dog.".split():
+ d.put("dup1", x)
+ d.put("dup2", "after")
+
+ data = d.get("dup1")
+ self.assertEqual(data, "The")
+ if verbose:
+ print data
+
+ c = d.cursor()
+ rec = c.set("dup1")
+ self.assertEqual(rec, ('dup1', 'The'))
+
+ next_reg = c.next()
+ self.assertEqual(next_reg, ('dup1', 'quick'))
+
+ rec = c.set("dup1")
+ count = c.count()
+ self.assertEqual(count, 9)
+
+ next_dup = c.next_dup()
+ self.assertEqual(next_dup, ('dup1', 'quick'))
+
+ rec = c.set('dup1')
+ while rec is not None:
+ if verbose:
+ print rec
+ rec = c.next_dup()
+
+ c.set('dup1')
+ rec = c.next_nodup()
+ self.assertNotEqual(rec[0], 'dup1')
+ if verbose:
+ print rec
+
+ c.close()
+
+
+
+class BTreeDUPTestCase(BasicDUPTestCase):
+ dbtype = db.DB_BTREE
+
+class HashDUPTestCase(BasicDUPTestCase):
+ dbtype = db.DB_HASH
+
+class BTreeDUPWithThreadTestCase(BasicDUPTestCase):
+ dbtype = db.DB_BTREE
+ dbopenflags = db.DB_THREAD
+
+class HashDUPWithThreadTestCase(BasicDUPTestCase):
+ dbtype = db.DB_HASH
+ dbopenflags = db.DB_THREAD
+
+
+#----------------------------------------------------------------------
+
+class BasicMultiDBTestCase(BasicTestCase):
+ dbname = 'first'
+
+ def otherType(self):
+ if self.dbtype == db.DB_BTREE:
+ return db.DB_HASH
+ else:
+ return db.DB_BTREE
+
+ def test11_MultiDB(self):
+ d1 = self.d
+ if verbose:
+ print '\n', '-=' * 30
+ print "Running %s.test11_MultiDB..." % self.__class__.__name__
+
+ d2 = db.DB(self.env)
+ d2.open(self.filename, "second", self.dbtype,
+ self.dbopenflags|db.DB_CREATE)
+ d3 = db.DB(self.env)
+ d3.open(self.filename, "third", self.otherType(),
+ self.dbopenflags|db.DB_CREATE)
+
+ for x in "The quick brown fox jumped over the lazy dog".split():
+ d2.put(x, self.makeData(x))
+
+ for x in string.ascii_letters:
+ d3.put(x, x*70)
+
+ d1.sync()
+ d2.sync()
+ d3.sync()
+ d1.close()
+ d2.close()
+ d3.close()
+
+ self.d = d1 = d2 = d3 = None
+
+ self.d = d1 = db.DB(self.env)
+ d1.open(self.filename, self.dbname, flags = self.dbopenflags)
+ d2 = db.DB(self.env)
+ d2.open(self.filename, "second", flags = self.dbopenflags)
+ d3 = db.DB(self.env)
+ d3.open(self.filename, "third", flags = self.dbopenflags)
+
+ c1 = d1.cursor()
+ c2 = d2.cursor()
+ c3 = d3.cursor()
+
+ count = 0
+ rec = c1.first()
+ while rec is not None:
+ count = count + 1
+ if verbose and (count % 50) == 0:
+ print rec
+ rec = c1.next()
+ self.assertEqual(count, self._numKeys)
+
+ count = 0
+ rec = c2.first()
+ while rec is not None:
+ count = count + 1
+ if verbose:
+ print rec
+ rec = c2.next()
+ self.assertEqual(count, 9)
+
+ count = 0
+ rec = c3.first()
+ while rec is not None:
+ count = count + 1
+ if verbose:
+ print rec
+ rec = c3.next()
+ self.assertEqual(count, len(string.ascii_letters))
+
+
+ c1.close()
+ c2.close()
+ c3.close()
+
+ d2.close()
+ d3.close()
+
+
+
+# Strange things happen if you try to use Multiple DBs per file without a
+# DBEnv with MPOOL and LOCKing...
+
+class BTreeMultiDBTestCase(BasicMultiDBTestCase):
+ dbtype = db.DB_BTREE
+ dbopenflags = db.DB_THREAD
+ useEnv = 1
+ envflags = db.DB_THREAD | db.DB_INIT_MPOOL | db.DB_INIT_LOCK
+
+class HashMultiDBTestCase(BasicMultiDBTestCase):
+ dbtype = db.DB_HASH
+ dbopenflags = db.DB_THREAD
+ useEnv = 1
+ envflags = db.DB_THREAD | db.DB_INIT_MPOOL | db.DB_INIT_LOCK
+
+
+class PrivateObject(unittest.TestCase) :
+ def tearDown(self) :
+ del self.obj
+
+ def test01_DefaultIsNone(self) :
+ self.assertEqual(self.obj.get_private(), None)
+
+ def test02_assignment(self) :
+ a = "example of private object"
+ self.obj.set_private(a)
+ b = self.obj.get_private()
+ self.assertTrue(a is b) # Object identity
+
+ def test03_leak_assignment(self) :
+ a = "example of private object"
+ refcount = sys.getrefcount(a)
+ self.obj.set_private(a)
+ self.assertEqual(refcount+1, sys.getrefcount(a))
+ self.obj.set_private(None)
+ self.assertEqual(refcount, sys.getrefcount(a))
+
+ def test04_leak_GC(self) :
+ a = "example of private object"
+ refcount = sys.getrefcount(a)
+ self.obj.set_private(a)
+ self.obj = None
+ self.assertEqual(refcount, sys.getrefcount(a))
+
+class DBEnvPrivateObject(PrivateObject) :
+ def setUp(self) :
+ self.obj = db.DBEnv()
+
+class DBPrivateObject(PrivateObject) :
+ def setUp(self) :
+ self.obj = db.DB()
+
+class CrashAndBurn(unittest.TestCase) :
+ #def test01_OpenCrash(self) :
+ # # See http://bugs.python.org/issue3307
+ # self.assertRaises(db.DBInvalidArgError, db.DB, None, 65535)
+
+ if db.version() < (4, 8) :
+ def test02_DBEnv_dealloc(self):
+ # http://bugs.python.org/issue3885
+ import gc
+ self.assertRaises(db.DBInvalidArgError, db.DBEnv, ~db.DB_RPCCLIENT)
+ gc.collect()
+
+
+#----------------------------------------------------------------------
+#----------------------------------------------------------------------
+
+def test_suite():
+ suite = unittest.TestSuite()
+
+ suite.addTest(unittest.makeSuite(VersionTestCase))
+ suite.addTest(unittest.makeSuite(BasicBTreeTestCase))
+ suite.addTest(unittest.makeSuite(BasicHashTestCase))
+ suite.addTest(unittest.makeSuite(BasicBTreeWithThreadFlagTestCase))
+ suite.addTest(unittest.makeSuite(BasicHashWithThreadFlagTestCase))
+ suite.addTest(unittest.makeSuite(BasicBTreeWithEnvTestCase))
+ suite.addTest(unittest.makeSuite(BasicHashWithEnvTestCase))
+ suite.addTest(unittest.makeSuite(BTreeTransactionTestCase))
+ suite.addTest(unittest.makeSuite(HashTransactionTestCase))
+ suite.addTest(unittest.makeSuite(BTreeRecnoTestCase))
+ suite.addTest(unittest.makeSuite(BTreeRecnoWithThreadFlagTestCase))
+ suite.addTest(unittest.makeSuite(BTreeDUPTestCase))
+ suite.addTest(unittest.makeSuite(HashDUPTestCase))
+ suite.addTest(unittest.makeSuite(BTreeDUPWithThreadTestCase))
+ suite.addTest(unittest.makeSuite(HashDUPWithThreadTestCase))
+ suite.addTest(unittest.makeSuite(BTreeMultiDBTestCase))
+ suite.addTest(unittest.makeSuite(HashMultiDBTestCase))
+ suite.addTest(unittest.makeSuite(DBEnvPrivateObject))
+ suite.addTest(unittest.makeSuite(DBPrivateObject))
+ suite.addTest(unittest.makeSuite(CrashAndBurn))
+
+ return suite
+
+
+if __name__ == '__main__':
+ unittest.main(defaultTest='test_suite')
diff --git a/lib/python2.7/bsddb/test/test_compare.py b/lib/python2.7/bsddb/test/test_compare.py
new file mode 100644
index 0000000..cb3b463
--- /dev/null
+++ b/lib/python2.7/bsddb/test/test_compare.py
@@ -0,0 +1,447 @@
+"""
+TestCases for python DB duplicate and Btree key comparison function.
+"""
+
+import sys, os, re
+import test_all
+from cStringIO import StringIO
+
+import unittest
+
+from test_all import db, dbshelve, test_support, \
+ get_new_environment_path, get_new_database_path
+
+
+# Needed for python 3. "cmp" vanished in 3.0.1
+def cmp(a, b) :
+ if a==b : return 0
+ if a<b : return -1
+ return 1
+
+lexical_cmp = cmp
+
+def lowercase_cmp(left, right) :
+ return cmp(left.lower(), right.lower())
+
+def make_reverse_comparator(cmp) :
+ def reverse(left, right, delegate=cmp) :
+ return - delegate(left, right)
+ return reverse
+
+_expected_lexical_test_data = ['', 'CCCP', 'a', 'aaa', 'b', 'c', 'cccce', 'ccccf']
+_expected_lowercase_test_data = ['', 'a', 'aaa', 'b', 'c', 'CC', 'cccce', 'ccccf', 'CCCP']
+
+class ComparatorTests(unittest.TestCase) :
+ def comparator_test_helper(self, comparator, expected_data) :
+ data = expected_data[:]
+
+ import sys
+ if sys.version_info < (2, 6) :
+ data.sort(cmp=comparator)
+ else : # Insertion Sort. Please, improve
+ data2 = []
+ for i in data :
+ for j, k in enumerate(data2) :
+ r = comparator(k, i)
+ if r == 1 :
+ data2.insert(j, i)
+ break
+ else :
+ data2.append(i)
+ data = data2
+
+ self.assertEqual(data, expected_data,
+ "comparator `%s' is not right: %s vs. %s"
+ % (comparator, expected_data, data))
+ def test_lexical_comparator(self) :
+ self.comparator_test_helper(lexical_cmp, _expected_lexical_test_data)
+ def test_reverse_lexical_comparator(self) :
+ rev = _expected_lexical_test_data[:]
+ rev.reverse()
+ self.comparator_test_helper(make_reverse_comparator(lexical_cmp),
+ rev)
+ def test_lowercase_comparator(self) :
+ self.comparator_test_helper(lowercase_cmp,
+ _expected_lowercase_test_data)
+
+class AbstractBtreeKeyCompareTestCase(unittest.TestCase) :
+ env = None
+ db = None
+
+ if (sys.version_info < (2, 7)) or ((sys.version_info >= (3,0)) and
+ (sys.version_info < (3, 2))) :
+ def assertLess(self, a, b, msg=None) :
+ return self.assertTrue(a<b, msg=msg)
+
+ def setUp(self) :
+ self.filename = self.__class__.__name__ + '.db'
+ self.homeDir = get_new_environment_path()
+ env = db.DBEnv()
+ env.open(self.homeDir,
+ db.DB_CREATE | db.DB_INIT_MPOOL
+ | db.DB_INIT_LOCK | db.DB_THREAD)
+ self.env = env
+
+ def tearDown(self) :
+ self.closeDB()
+ if self.env is not None:
+ self.env.close()
+ self.env = None
+ test_support.rmtree(self.homeDir)
+
+ def addDataToDB(self, data) :
+ i = 0
+ for item in data:
+ self.db.put(item, str(i))
+ i = i + 1
+
+ def createDB(self, key_comparator) :
+ self.db = db.DB(self.env)
+ self.setupDB(key_comparator)
+ self.db.open(self.filename, "test", db.DB_BTREE, db.DB_CREATE)
+
+ def setupDB(self, key_comparator) :
+ self.db.set_bt_compare(key_comparator)
+
+ def closeDB(self) :
+ if self.db is not None:
+ self.db.close()
+ self.db = None
+
+ def startTest(self) :
+ pass
+
+ def finishTest(self, expected = None) :
+ if expected is not None:
+ self.check_results(expected)
+ self.closeDB()
+
+ def check_results(self, expected) :
+ curs = self.db.cursor()
+ try:
+ index = 0
+ rec = curs.first()
+ while rec:
+ key, ignore = rec
+ self.assertLess(index, len(expected),
+ "to many values returned from cursor")
+ self.assertEqual(expected[index], key,
+ "expected value `%s' at %d but got `%s'"
+ % (expected[index], index, key))
+ index = index + 1
+ rec = curs.next()
+ self.assertEqual(index, len(expected),
+ "not enough values returned from cursor")
+ finally:
+ curs.close()
+
+class BtreeKeyCompareTestCase(AbstractBtreeKeyCompareTestCase) :
+ def runCompareTest(self, comparator, data) :
+ self.startTest()
+ self.createDB(comparator)
+ self.addDataToDB(data)
+ self.finishTest(data)
+
+ def test_lexical_ordering(self) :
+ self.runCompareTest(lexical_cmp, _expected_lexical_test_data)
+
+ def test_reverse_lexical_ordering(self) :
+ expected_rev_data = _expected_lexical_test_data[:]
+ expected_rev_data.reverse()
+ self.runCompareTest(make_reverse_comparator(lexical_cmp),
+ expected_rev_data)
+
+ def test_compare_function_useless(self) :
+ self.startTest()
+ def socialist_comparator(l, r) :
+ return 0
+ self.createDB(socialist_comparator)
+ self.addDataToDB(['b', 'a', 'd'])
+ # all things being equal the first key will be the only key
+ # in the database... (with the last key's value fwiw)
+ self.finishTest(['b'])
+
+
+class BtreeExceptionsTestCase(AbstractBtreeKeyCompareTestCase) :
+ def test_raises_non_callable(self) :
+ self.startTest()
+ self.assertRaises(TypeError, self.createDB, 'abc')
+ self.assertRaises(TypeError, self.createDB, None)
+ self.finishTest()
+
+ def test_set_bt_compare_with_function(self) :
+ self.startTest()
+ self.createDB(lexical_cmp)
+ self.finishTest()
+
+ def check_results(self, results) :
+ pass
+
+ def test_compare_function_incorrect(self) :
+ self.startTest()
+ def bad_comparator(l, r) :
+ return 1
+ # verify that set_bt_compare checks that comparator('', '') == 0
+ self.assertRaises(TypeError, self.createDB, bad_comparator)
+ self.finishTest()
+
+ def verifyStderr(self, method, successRe) :
+ """
+ Call method() while capturing sys.stderr output internally and
+ call self.fail() if successRe.search() does not match the stderr
+ output. This is used to test for uncatchable exceptions.
+ """
+ stdErr = sys.stderr
+ sys.stderr = StringIO()
+ try:
+ method()
+ finally:
+ temp = sys.stderr
+ sys.stderr = stdErr
+ errorOut = temp.getvalue()
+ if not successRe.search(errorOut) :
+ self.fail("unexpected stderr output:\n"+errorOut)
+ if sys.version_info < (3, 0) : # XXX: How to do this in Py3k ???
+ sys.exc_traceback = sys.last_traceback = None
+
+ def _test_compare_function_exception(self) :
+ self.startTest()
+ def bad_comparator(l, r) :
+ if l == r:
+ # pass the set_bt_compare test
+ return 0
+ raise RuntimeError, "i'm a naughty comparison function"
+ self.createDB(bad_comparator)
+ #print "\n*** test should print 2 uncatchable tracebacks ***"
+ self.addDataToDB(['a', 'b', 'c']) # this should raise, but...
+ self.finishTest()
+
+ def test_compare_function_exception(self) :
+ self.verifyStderr(
+ self._test_compare_function_exception,
+ re.compile('(^RuntimeError:.* naughty.*){2}', re.M|re.S)
+ )
+
+ def _test_compare_function_bad_return(self) :
+ self.startTest()
+ def bad_comparator(l, r) :
+ if l == r:
+ # pass the set_bt_compare test
+ return 0
+ return l
+ self.createDB(bad_comparator)
+ #print "\n*** test should print 2 errors about returning an int ***"
+ self.addDataToDB(['a', 'b', 'c']) # this should raise, but...
+ self.finishTest()
+
+ def test_compare_function_bad_return(self) :
+ self.verifyStderr(
+ self._test_compare_function_bad_return,
+ re.compile('(^TypeError:.* return an int.*){2}', re.M|re.S)
+ )
+
+
+ def test_cannot_assign_twice(self) :
+
+ def my_compare(a, b) :
+ return 0
+
+ self.startTest()
+ self.createDB(my_compare)
+ self.assertRaises(RuntimeError, self.db.set_bt_compare, my_compare)
+
+class AbstractDuplicateCompareTestCase(unittest.TestCase) :
+ env = None
+ db = None
+
+ if (sys.version_info < (2, 7)) or ((sys.version_info >= (3,0)) and
+ (sys.version_info < (3, 2))) :
+ def assertLess(self, a, b, msg=None) :
+ return self.assertTrue(a<b, msg=msg)
+
+ def setUp(self) :
+ self.filename = self.__class__.__name__ + '.db'
+ self.homeDir = get_new_environment_path()
+ env = db.DBEnv()
+ env.open(self.homeDir,
+ db.DB_CREATE | db.DB_INIT_MPOOL
+ | db.DB_INIT_LOCK | db.DB_THREAD)
+ self.env = env
+
+ def tearDown(self) :
+ self.closeDB()
+ if self.env is not None:
+ self.env.close()
+ self.env = None
+ test_support.rmtree(self.homeDir)
+
+ def addDataToDB(self, data) :
+ for item in data:
+ self.db.put("key", item)
+
+ def createDB(self, dup_comparator) :
+ self.db = db.DB(self.env)
+ self.setupDB(dup_comparator)
+ self.db.open(self.filename, "test", db.DB_BTREE, db.DB_CREATE)
+
+ def setupDB(self, dup_comparator) :
+ self.db.set_flags(db.DB_DUPSORT)
+ self.db.set_dup_compare(dup_comparator)
+
+ def closeDB(self) :
+ if self.db is not None:
+ self.db.close()
+ self.db = None
+
+ def startTest(self) :
+ pass
+
+ def finishTest(self, expected = None) :
+ if expected is not None:
+ self.check_results(expected)
+ self.closeDB()
+
+ def check_results(self, expected) :
+ curs = self.db.cursor()
+ try:
+ index = 0
+ rec = curs.first()
+ while rec:
+ ignore, data = rec
+ self.assertLess(index, len(expected),
+ "to many values returned from cursor")
+ self.assertEqual(expected[index], data,
+ "expected value `%s' at %d but got `%s'"
+ % (expected[index], index, data))
+ index = index + 1
+ rec = curs.next()
+ self.assertEqual(index, len(expected),
+ "not enough values returned from cursor")
+ finally:
+ curs.close()
+
+class DuplicateCompareTestCase(AbstractDuplicateCompareTestCase) :
+ def runCompareTest(self, comparator, data) :
+ self.startTest()
+ self.createDB(comparator)
+ self.addDataToDB(data)
+ self.finishTest(data)
+
+ def test_lexical_ordering(self) :
+ self.runCompareTest(lexical_cmp, _expected_lexical_test_data)
+
+ def test_reverse_lexical_ordering(self) :
+ expected_rev_data = _expected_lexical_test_data[:]
+ expected_rev_data.reverse()
+ self.runCompareTest(make_reverse_comparator(lexical_cmp),
+ expected_rev_data)
+
+class DuplicateExceptionsTestCase(AbstractDuplicateCompareTestCase) :
+ def test_raises_non_callable(self) :
+ self.startTest()
+ self.assertRaises(TypeError, self.createDB, 'abc')
+ self.assertRaises(TypeError, self.createDB, None)
+ self.finishTest()
+
+ def test_set_dup_compare_with_function(self) :
+ self.startTest()
+ self.createDB(lexical_cmp)
+ self.finishTest()
+
+ def check_results(self, results) :
+ pass
+
+ def test_compare_function_incorrect(self) :
+ self.startTest()
+ def bad_comparator(l, r) :
+ return 1
+ # verify that set_dup_compare checks that comparator('', '') == 0
+ self.assertRaises(TypeError, self.createDB, bad_comparator)
+ self.finishTest()
+
+ def test_compare_function_useless(self) :
+ self.startTest()
+ def socialist_comparator(l, r) :
+ return 0
+ self.createDB(socialist_comparator)
+ # DUPSORT does not allow "duplicate duplicates"
+ self.assertRaises(db.DBKeyExistError, self.addDataToDB, ['b', 'a', 'd'])
+ self.finishTest()
+
+ def verifyStderr(self, method, successRe) :
+ """
+ Call method() while capturing sys.stderr output internally and
+ call self.fail() if successRe.search() does not match the stderr
+ output. This is used to test for uncatchable exceptions.
+ """
+ stdErr = sys.stderr
+ sys.stderr = StringIO()
+ try:
+ method()
+ finally:
+ temp = sys.stderr
+ sys.stderr = stdErr
+ errorOut = temp.getvalue()
+ if not successRe.search(errorOut) :
+ self.fail("unexpected stderr output:\n"+errorOut)
+ if sys.version_info < (3, 0) : # XXX: How to do this in Py3k ???
+ sys.exc_traceback = sys.last_traceback = None
+
+ def _test_compare_function_exception(self) :
+ self.startTest()
+ def bad_comparator(l, r) :
+ if l == r:
+ # pass the set_dup_compare test
+ return 0
+ raise RuntimeError, "i'm a naughty comparison function"
+ self.createDB(bad_comparator)
+ #print "\n*** test should print 2 uncatchable tracebacks ***"
+ self.addDataToDB(['a', 'b', 'c']) # this should raise, but...
+ self.finishTest()
+
+ def test_compare_function_exception(self) :
+ self.verifyStderr(
+ self._test_compare_function_exception,
+ re.compile('(^RuntimeError:.* naughty.*){2}', re.M|re.S)
+ )
+
+ def _test_compare_function_bad_return(self) :
+ self.startTest()
+ def bad_comparator(l, r) :
+ if l == r:
+ # pass the set_dup_compare test
+ return 0
+ return l
+ self.createDB(bad_comparator)
+ #print "\n*** test should print 2 errors about returning an int ***"
+ self.addDataToDB(['a', 'b', 'c']) # this should raise, but...
+ self.finishTest()
+
+ def test_compare_function_bad_return(self) :
+ self.verifyStderr(
+ self._test_compare_function_bad_return,
+ re.compile('(^TypeError:.* return an int.*){2}', re.M|re.S)
+ )
+
+
+ def test_cannot_assign_twice(self) :
+
+ def my_compare(a, b) :
+ return 0
+
+ self.startTest()
+ self.createDB(my_compare)
+ self.assertRaises(RuntimeError, self.db.set_dup_compare, my_compare)
+
+def test_suite() :
+ res = unittest.TestSuite()
+
+ res.addTest(unittest.makeSuite(ComparatorTests))
+ res.addTest(unittest.makeSuite(BtreeExceptionsTestCase))
+ res.addTest(unittest.makeSuite(BtreeKeyCompareTestCase))
+ res.addTest(unittest.makeSuite(DuplicateExceptionsTestCase))
+ res.addTest(unittest.makeSuite(DuplicateCompareTestCase))
+ return res
+
+if __name__ == '__main__':
+ unittest.main(defaultTest = 'suite')
diff --git a/lib/python2.7/bsddb/test/test_compat.py b/lib/python2.7/bsddb/test/test_compat.py
new file mode 100644
index 0000000..7679f19
--- /dev/null
+++ b/lib/python2.7/bsddb/test/test_compat.py
@@ -0,0 +1,184 @@
+"""
+Test cases adapted from the test_bsddb.py module in Python's
+regression test suite.
+"""
+
+import os, string
+import unittest
+
+from test_all import db, hashopen, btopen, rnopen, verbose, \
+ get_new_database_path
+
+
+class CompatibilityTestCase(unittest.TestCase):
+ def setUp(self):
+ self.filename = get_new_database_path()
+
+ def tearDown(self):
+ try:
+ os.remove(self.filename)
+ except os.error:
+ pass
+
+
+ def test01_btopen(self):
+ self.do_bthash_test(btopen, 'btopen')
+
+ def test02_hashopen(self):
+ self.do_bthash_test(hashopen, 'hashopen')
+
+ def test03_rnopen(self):
+ data = "The quick brown fox jumped over the lazy dog.".split()
+ if verbose:
+ print "\nTesting: rnopen"
+
+ f = rnopen(self.filename, 'c')
+ for x in range(len(data)):
+ f[x+1] = data[x]
+
+ getTest = (f[1], f[2], f[3])
+ if verbose:
+ print '%s %s %s' % getTest
+
+ self.assertEqual(getTest[1], 'quick', 'data mismatch!')
+
+ rv = f.set_location(3)
+ if rv != (3, 'brown'):
+ self.fail('recno database set_location failed: '+repr(rv))
+
+ f[25] = 'twenty-five'
+ f.close()
+ del f
+
+ f = rnopen(self.filename, 'w')
+ f[20] = 'twenty'
+
+ def noRec(f):
+ rec = f[15]
+ self.assertRaises(KeyError, noRec, f)
+
+ def badKey(f):
+ rec = f['a string']
+ self.assertRaises(TypeError, badKey, f)
+
+ del f[3]
+
+ rec = f.first()
+ while rec:
+ if verbose:
+ print rec
+ try:
+ rec = f.next()
+ except KeyError:
+ break
+
+ f.close()
+
+
+ def test04_n_flag(self):
+ f = hashopen(self.filename, 'n')
+ f.close()
+
+
+ def do_bthash_test(self, factory, what):
+ if verbose:
+ print '\nTesting: ', what
+
+ f = factory(self.filename, 'c')
+ if verbose:
+ print 'creation...'
+
+ # truth test
+ if f:
+ if verbose: print "truth test: true"
+ else:
+ if verbose: print "truth test: false"
+
+ f['0'] = ''
+ f['a'] = 'Guido'
+ f['b'] = 'van'
+ f['c'] = 'Rossum'
+ f['d'] = 'invented'
+ # 'e' intentionally left out
+ f['f'] = 'Python'
+ if verbose:
+ print '%s %s %s' % (f['a'], f['b'], f['c'])
+
+ if verbose:
+ print 'key ordering...'
+ start = f.set_location(f.first()[0])
+ if start != ('0', ''):
+ self.fail("incorrect first() result: "+repr(start))
+ while 1:
+ try:
+ rec = f.next()
+ except KeyError:
+ self.assertEqual(rec, f.last(), 'Error, last <> last!')
+ f.previous()
+ break
+ if verbose:
+ print rec
+
+ self.assertTrue(f.has_key('f'), 'Error, missing key!')
+
+ # test that set_location() returns the next nearest key, value
+ # on btree databases and raises KeyError on others.
+ if factory == btopen:
+ e = f.set_location('e')
+ if e != ('f', 'Python'):
+ self.fail('wrong key,value returned: '+repr(e))
+ else:
+ try:
+ e = f.set_location('e')
+ except KeyError:
+ pass
+ else:
+ self.fail("set_location on non-existent key did not raise KeyError")
+
+ f.sync()
+ f.close()
+ # truth test
+ try:
+ if f:
+ if verbose: print "truth test: true"
+ else:
+ if verbose: print "truth test: false"
+ except db.DBError:
+ pass
+ else:
+ self.fail("Exception expected")
+
+ del f
+
+ if verbose:
+ print 'modification...'
+ f = factory(self.filename, 'w')
+ f['d'] = 'discovered'
+
+ if verbose:
+ print 'access...'
+ for key in f.keys():
+ word = f[key]
+ if verbose:
+ print word
+
+ def noRec(f):
+ rec = f['no such key']
+ self.assertRaises(KeyError, noRec, f)
+
+ def badKey(f):
+ rec = f[15]
+ self.assertRaises(TypeError, badKey, f)
+
+ f.close()
+
+
+#----------------------------------------------------------------------
+
+
+def test_suite():
+ return unittest.makeSuite(CompatibilityTestCase)
+
+
+if __name__ == '__main__':
+ unittest.main(defaultTest='test_suite')
diff --git a/lib/python2.7/bsddb/test/test_cursor_pget_bug.py b/lib/python2.7/bsddb/test/test_cursor_pget_bug.py
new file mode 100644
index 0000000..22d3dc1
--- /dev/null
+++ b/lib/python2.7/bsddb/test/test_cursor_pget_bug.py
@@ -0,0 +1,54 @@
+import unittest
+import os, glob
+
+from test_all import db, test_support, get_new_environment_path, \
+ get_new_database_path
+
+#----------------------------------------------------------------------
+
+class pget_bugTestCase(unittest.TestCase):
+ """Verify that cursor.pget works properly"""
+ db_name = 'test-cursor_pget.db'
+
+ def setUp(self):
+ self.homeDir = get_new_environment_path()
+ self.env = db.DBEnv()
+ self.env.open(self.homeDir, db.DB_CREATE | db.DB_INIT_MPOOL)
+ self.primary_db = db.DB(self.env)
+ self.primary_db.open(self.db_name, 'primary', db.DB_BTREE, db.DB_CREATE)
+ self.secondary_db = db.DB(self.env)
+ self.secondary_db.set_flags(db.DB_DUP)
+ self.secondary_db.open(self.db_name, 'secondary', db.DB_BTREE, db.DB_CREATE)
+ self.primary_db.associate(self.secondary_db, lambda key, data: data)
+ self.primary_db.put('salad', 'eggs')
+ self.primary_db.put('spam', 'ham')
+ self.primary_db.put('omelet', 'eggs')
+
+
+ def tearDown(self):
+ self.secondary_db.close()
+ self.primary_db.close()
+ self.env.close()
+ del self.secondary_db
+ del self.primary_db
+ del self.env
+ test_support.rmtree(self.homeDir)
+
+ def test_pget(self):
+ cursor = self.secondary_db.cursor()
+
+ self.assertEqual(('eggs', 'salad', 'eggs'), cursor.pget(key='eggs', flags=db.DB_SET))
+ self.assertEqual(('eggs', 'omelet', 'eggs'), cursor.pget(db.DB_NEXT_DUP))
+ self.assertEqual(None, cursor.pget(db.DB_NEXT_DUP))
+
+ self.assertEqual(('ham', 'spam', 'ham'), cursor.pget('ham', 'spam', flags=db.DB_SET))
+ self.assertEqual(None, cursor.pget(db.DB_NEXT_DUP))
+
+ cursor.close()
+
+
+def test_suite():
+ return unittest.makeSuite(pget_bugTestCase)
+
+if __name__ == '__main__':
+ unittest.main(defaultTest='test_suite')
diff --git a/lib/python2.7/bsddb/test/test_db.py b/lib/python2.7/bsddb/test/test_db.py
new file mode 100644
index 0000000..2bc109f
--- /dev/null
+++ b/lib/python2.7/bsddb/test/test_db.py
@@ -0,0 +1,163 @@
+import unittest
+import os, glob
+
+from test_all import db, test_support, get_new_environment_path, \
+ get_new_database_path
+
+#----------------------------------------------------------------------
+
+class DB(unittest.TestCase):
+ def setUp(self):
+ self.path = get_new_database_path()
+ self.db = db.DB()
+
+ def tearDown(self):
+ self.db.close()
+ del self.db
+ test_support.unlink(self.path)
+
+class DB_general(DB) :
+ def test_get_open_flags(self) :
+ self.db.open(self.path, dbtype=db.DB_HASH, flags = db.DB_CREATE)
+ self.assertEqual(db.DB_CREATE, self.db.get_open_flags())
+
+ def test_get_open_flags2(self) :
+ self.db.open(self.path, dbtype=db.DB_HASH, flags = db.DB_CREATE |
+ db.DB_THREAD)
+ self.assertEqual(db.DB_CREATE | db.DB_THREAD, self.db.get_open_flags())
+
+ def test_get_dbname_filename(self) :
+ self.db.open(self.path, dbtype=db.DB_HASH, flags = db.DB_CREATE)
+ self.assertEqual((self.path, None), self.db.get_dbname())
+
+ def test_get_dbname_filename_database(self) :
+ name = "jcea-random-name"
+ self.db.open(self.path, dbname=name, dbtype=db.DB_HASH,
+ flags = db.DB_CREATE)
+ self.assertEqual((self.path, name), self.db.get_dbname())
+
+ def test_bt_minkey(self) :
+ for i in [17, 108, 1030] :
+ self.db.set_bt_minkey(i)
+ self.assertEqual(i, self.db.get_bt_minkey())
+
+ def test_lorder(self) :
+ self.db.set_lorder(1234)
+ self.assertEqual(1234, self.db.get_lorder())
+ self.db.set_lorder(4321)
+ self.assertEqual(4321, self.db.get_lorder())
+ self.assertRaises(db.DBInvalidArgError, self.db.set_lorder, 9182)
+
+ if db.version() >= (4, 6) :
+ def test_priority(self) :
+ flags = [db.DB_PRIORITY_VERY_LOW, db.DB_PRIORITY_LOW,
+ db.DB_PRIORITY_DEFAULT, db.DB_PRIORITY_HIGH,
+ db.DB_PRIORITY_VERY_HIGH]
+ for flag in flags :
+ self.db.set_priority(flag)
+ self.assertEqual(flag, self.db.get_priority())
+
+ def test_get_transactional(self) :
+ self.assertFalse(self.db.get_transactional())
+ self.db.open(self.path, dbtype=db.DB_HASH, flags = db.DB_CREATE)
+ self.assertFalse(self.db.get_transactional())
+
+class DB_hash(DB) :
+ def test_h_ffactor(self) :
+ for ffactor in [4, 16, 256] :
+ self.db.set_h_ffactor(ffactor)
+ self.assertEqual(ffactor, self.db.get_h_ffactor())
+
+ def test_h_nelem(self) :
+ for nelem in [1, 2, 4] :
+ nelem = nelem*1024*1024 # Millions
+ self.db.set_h_nelem(nelem)
+ self.assertEqual(nelem, self.db.get_h_nelem())
+
+ def test_pagesize(self) :
+ for i in xrange(9, 17) : # From 512 to 65536
+ i = 1<<i
+ self.db.set_pagesize(i)
+ self.assertEqual(i, self.db.get_pagesize())
+
+ # The valid values goes from 512 to 65536
+ # Test 131072 bytes...
+ self.assertRaises(db.DBInvalidArgError, self.db.set_pagesize, 1<<17)
+ # Test 256 bytes...
+ self.assertRaises(db.DBInvalidArgError, self.db.set_pagesize, 1<<8)
+
+class DB_txn(DB) :
+ def setUp(self) :
+ self.homeDir = get_new_environment_path()
+ self.env = db.DBEnv()
+ self.env.open(self.homeDir, db.DB_CREATE | db.DB_INIT_MPOOL |
+ db.DB_INIT_LOG | db.DB_INIT_TXN)
+ self.db = db.DB(self.env)
+
+ def tearDown(self) :
+ self.db.close()
+ del self.db
+ self.env.close()
+ del self.env
+ test_support.rmtree(self.homeDir)
+
+ def test_flags(self) :
+ self.db.set_flags(db.DB_CHKSUM)
+ self.assertEqual(db.DB_CHKSUM, self.db.get_flags())
+ self.db.set_flags(db.DB_TXN_NOT_DURABLE)
+ self.assertEqual(db.DB_TXN_NOT_DURABLE | db.DB_CHKSUM,
+ self.db.get_flags())
+
+ def test_get_transactional(self) :
+ self.assertFalse(self.db.get_transactional())
+ # DB_AUTO_COMMIT = Implicit transaction
+ self.db.open("XXX", dbtype=db.DB_HASH,
+ flags = db.DB_CREATE | db.DB_AUTO_COMMIT)
+ self.assertTrue(self.db.get_transactional())
+
+class DB_recno(DB) :
+ def test_re_pad(self) :
+ for i in [' ', '*'] : # Check chars
+ self.db.set_re_pad(i)
+ self.assertEqual(ord(i), self.db.get_re_pad())
+ for i in [97, 65] : # Check integers
+ self.db.set_re_pad(i)
+ self.assertEqual(i, self.db.get_re_pad())
+
+ def test_re_delim(self) :
+ for i in [' ', '*'] : # Check chars
+ self.db.set_re_delim(i)
+ self.assertEqual(ord(i), self.db.get_re_delim())
+ for i in [97, 65] : # Check integers
+ self.db.set_re_delim(i)
+ self.assertEqual(i, self.db.get_re_delim())
+
+ def test_re_source(self) :
+ for i in ["test", "test2", "test3"] :
+ self.db.set_re_source(i)
+ self.assertEqual(i, self.db.get_re_source())
+
+class DB_queue(DB) :
+ def test_re_len(self) :
+ for i in [33, 65, 300, 2000] :
+ self.db.set_re_len(i)
+ self.assertEqual(i, self.db.get_re_len())
+
+ def test_q_extentsize(self) :
+ for i in [1, 60, 100] :
+ self.db.set_q_extentsize(i)
+ self.assertEqual(i, self.db.get_q_extentsize())
+
+def test_suite():
+ suite = unittest.TestSuite()
+
+ suite.addTest(unittest.makeSuite(DB_general))
+ suite.addTest(unittest.makeSuite(DB_txn))
+ suite.addTest(unittest.makeSuite(DB_hash))
+ suite.addTest(unittest.makeSuite(DB_recno))
+ suite.addTest(unittest.makeSuite(DB_queue))
+
+ return suite
+
+if __name__ == '__main__':
+ unittest.main(defaultTest='test_suite')
diff --git a/lib/python2.7/bsddb/test/test_dbenv.py b/lib/python2.7/bsddb/test/test_dbenv.py
new file mode 100644
index 0000000..76ef7db
--- /dev/null
+++ b/lib/python2.7/bsddb/test/test_dbenv.py
@@ -0,0 +1,525 @@
+import unittest
+import os, glob
+
+from test_all import db, test_support, get_new_environment_path, \
+ get_new_database_path
+
+#----------------------------------------------------------------------
+
+class DBEnv(unittest.TestCase):
+ def setUp(self):
+ self.homeDir = get_new_environment_path()
+ self.env = db.DBEnv()
+
+ def tearDown(self):
+ self.env.close()
+ del self.env
+ test_support.rmtree(self.homeDir)
+
+class DBEnv_general(DBEnv) :
+ def test_get_open_flags(self) :
+ flags = db.DB_CREATE | db.DB_INIT_MPOOL
+ self.env.open(self.homeDir, flags)
+ self.assertEqual(flags, self.env.get_open_flags())
+
+ def test_get_open_flags2(self) :
+ flags = db.DB_CREATE | db.DB_INIT_MPOOL | \
+ db.DB_INIT_LOCK | db.DB_THREAD
+ self.env.open(self.homeDir, flags)
+ self.assertEqual(flags, self.env.get_open_flags())
+
+ if db.version() >= (4, 7) :
+ def test_lk_partitions(self) :
+ for i in [10, 20, 40] :
+ self.env.set_lk_partitions(i)
+ self.assertEqual(i, self.env.get_lk_partitions())
+
+ def test_getset_intermediate_dir_mode(self) :
+ self.assertEqual(None, self.env.get_intermediate_dir_mode())
+ for mode in ["rwx------", "rw-rw-rw-", "rw-r--r--"] :
+ self.env.set_intermediate_dir_mode(mode)
+ self.assertEqual(mode, self.env.get_intermediate_dir_mode())
+ self.assertRaises(db.DBInvalidArgError,
+ self.env.set_intermediate_dir_mode, "abcde")
+
+ if db.version() >= (4, 6) :
+ def test_thread(self) :
+ for i in [16, 100, 1000] :
+ self.env.set_thread_count(i)
+ self.assertEqual(i, self.env.get_thread_count())
+
+ def test_cache_max(self) :
+ for size in [64, 128] :
+ size = size*1024*1024 # Megabytes
+ self.env.set_cache_max(0, size)
+ size2 = self.env.get_cache_max()
+ self.assertEqual(0, size2[0])
+ self.assertTrue(size <= size2[1])
+ self.assertTrue(2*size > size2[1])
+
+ if db.version() >= (4, 4) :
+ def test_mutex_stat(self) :
+ self.env.open(self.homeDir, db.DB_CREATE | db.DB_INIT_MPOOL |
+ db.DB_INIT_LOCK)
+ stat = self.env.mutex_stat()
+ self.assertTrue("mutex_inuse_max" in stat)
+
+ def test_lg_filemode(self) :
+ for i in [0600, 0660, 0666] :
+ self.env.set_lg_filemode(i)
+ self.assertEqual(i, self.env.get_lg_filemode())
+
+ def test_mp_max_openfd(self) :
+ for i in [17, 31, 42] :
+ self.env.set_mp_max_openfd(i)
+ self.assertEqual(i, self.env.get_mp_max_openfd())
+
+ def test_mp_max_write(self) :
+ for i in [100, 200, 300] :
+ for j in [1, 2, 3] :
+ j *= 1000000
+ self.env.set_mp_max_write(i, j)
+ v=self.env.get_mp_max_write()
+ self.assertEqual((i, j), v)
+
+ def test_invalid_txn(self) :
+ # This environment doesn't support transactions
+ self.assertRaises(db.DBInvalidArgError, self.env.txn_begin)
+
+ def test_mp_mmapsize(self) :
+ for i in [16, 32, 64] :
+ i *= 1024*1024
+ self.env.set_mp_mmapsize(i)
+ self.assertEqual(i, self.env.get_mp_mmapsize())
+
+ def test_tmp_dir(self) :
+ for i in ["a", "bb", "ccc"] :
+ self.env.set_tmp_dir(i)
+ self.assertEqual(i, self.env.get_tmp_dir())
+
+ def test_flags(self) :
+ self.env.set_flags(db.DB_AUTO_COMMIT, 1)
+ self.assertEqual(db.DB_AUTO_COMMIT, self.env.get_flags())
+ self.env.set_flags(db.DB_TXN_NOSYNC, 1)
+ self.assertEqual(db.DB_AUTO_COMMIT | db.DB_TXN_NOSYNC,
+ self.env.get_flags())
+ self.env.set_flags(db.DB_AUTO_COMMIT, 0)
+ self.assertEqual(db.DB_TXN_NOSYNC, self.env.get_flags())
+ self.env.set_flags(db.DB_TXN_NOSYNC, 0)
+ self.assertEqual(0, self.env.get_flags())
+
+ def test_lk_max_objects(self) :
+ for i in [1000, 2000, 3000] :
+ self.env.set_lk_max_objects(i)
+ self.assertEqual(i, self.env.get_lk_max_objects())
+
+ def test_lk_max_locks(self) :
+ for i in [1000, 2000, 3000] :
+ self.env.set_lk_max_locks(i)
+ self.assertEqual(i, self.env.get_lk_max_locks())
+
+ def test_lk_max_lockers(self) :
+ for i in [1000, 2000, 3000] :
+ self.env.set_lk_max_lockers(i)
+ self.assertEqual(i, self.env.get_lk_max_lockers())
+
+ def test_lg_regionmax(self) :
+ for i in [128, 256, 1000] :
+ i = i*1024*1024
+ self.env.set_lg_regionmax(i)
+ j = self.env.get_lg_regionmax()
+ self.assertTrue(i <= j)
+ self.assertTrue(2*i > j)
+
+ def test_lk_detect(self) :
+ flags= [db.DB_LOCK_DEFAULT, db.DB_LOCK_EXPIRE, db.DB_LOCK_MAXLOCKS,
+ db.DB_LOCK_MINLOCKS, db.DB_LOCK_MINWRITE,
+ db.DB_LOCK_OLDEST, db.DB_LOCK_RANDOM, db.DB_LOCK_YOUNGEST]
+
+ flags.append(db.DB_LOCK_MAXWRITE)
+
+ for i in flags :
+ self.env.set_lk_detect(i)
+ self.assertEqual(i, self.env.get_lk_detect())
+
+ def test_lg_dir(self) :
+ for i in ["a", "bb", "ccc", "dddd"] :
+ self.env.set_lg_dir(i)
+ self.assertEqual(i, self.env.get_lg_dir())
+
+ def test_lg_bsize(self) :
+ log_size = 70*1024
+ self.env.set_lg_bsize(log_size)
+ self.assertTrue(self.env.get_lg_bsize() >= log_size)
+ self.assertTrue(self.env.get_lg_bsize() < 4*log_size)
+ self.env.set_lg_bsize(4*log_size)
+ self.assertTrue(self.env.get_lg_bsize() >= 4*log_size)
+
+ def test_setget_data_dirs(self) :
+ dirs = ("a", "b", "c", "d")
+ for i in dirs :
+ self.env.set_data_dir(i)
+ self.assertEqual(dirs, self.env.get_data_dirs())
+
+ def test_setget_cachesize(self) :
+ cachesize = (0, 512*1024*1024, 3)
+ self.env.set_cachesize(*cachesize)
+ self.assertEqual(cachesize, self.env.get_cachesize())
+
+ cachesize = (0, 1*1024*1024, 5)
+ self.env.set_cachesize(*cachesize)
+ cachesize2 = self.env.get_cachesize()
+ self.assertEqual(cachesize[0], cachesize2[0])
+ self.assertEqual(cachesize[2], cachesize2[2])
+ # Berkeley DB expands the cache 25% accounting overhead,
+ # if the cache is small.
+ self.assertEqual(125, int(100.0*cachesize2[1]/cachesize[1]))
+
+ # You can not change configuration after opening
+ # the environment.
+ self.env.open(self.homeDir, db.DB_CREATE | db.DB_INIT_MPOOL)
+ cachesize = (0, 2*1024*1024, 1)
+ self.assertRaises(db.DBInvalidArgError,
+ self.env.set_cachesize, *cachesize)
+ cachesize3 = self.env.get_cachesize()
+ self.assertEqual(cachesize2[0], cachesize3[0])
+ self.assertEqual(cachesize2[2], cachesize3[2])
+ # In Berkeley DB 5.1, the cachesize can change when opening the Env
+ self.assertTrue(cachesize2[1] <= cachesize3[1])
+
+ def test_set_cachesize_dbenv_db(self) :
+ # You can not configure the cachesize using
+ # the database handle, if you are using an environment.
+ d = db.DB(self.env)
+ self.assertRaises(db.DBInvalidArgError,
+ d.set_cachesize, 0, 1024*1024, 1)
+
+ def test_setget_shm_key(self) :
+ shm_key=137
+ self.env.set_shm_key(shm_key)
+ self.assertEqual(shm_key, self.env.get_shm_key())
+ self.env.set_shm_key(shm_key+1)
+ self.assertEqual(shm_key+1, self.env.get_shm_key())
+
+ # You can not change configuration after opening
+ # the environment.
+ self.env.open(self.homeDir, db.DB_CREATE | db.DB_INIT_MPOOL)
+ # If we try to reconfigure cache after opening the
+ # environment, core dump.
+ self.assertRaises(db.DBInvalidArgError,
+ self.env.set_shm_key, shm_key)
+ self.assertEqual(shm_key+1, self.env.get_shm_key())
+
+ if db.version() >= (4, 4) :
+ def test_mutex_setget_max(self) :
+ v = self.env.mutex_get_max()
+ v2 = v*2+1
+
+ self.env.mutex_set_max(v2)
+ self.assertEqual(v2, self.env.mutex_get_max())
+
+ self.env.mutex_set_max(v)
+ self.assertEqual(v, self.env.mutex_get_max())
+
+ # You can not change configuration after opening
+ # the environment.
+ self.env.open(self.homeDir, db.DB_CREATE)
+ self.assertRaises(db.DBInvalidArgError,
+ self.env.mutex_set_max, v2)
+
+ def test_mutex_setget_increment(self) :
+ v = self.env.mutex_get_increment()
+ v2 = 127
+
+ self.env.mutex_set_increment(v2)
+ self.assertEqual(v2, self.env.mutex_get_increment())
+
+ self.env.mutex_set_increment(v)
+ self.assertEqual(v, self.env.mutex_get_increment())
+
+ # You can not change configuration after opening
+ # the environment.
+ self.env.open(self.homeDir, db.DB_CREATE)
+ self.assertRaises(db.DBInvalidArgError,
+ self.env.mutex_set_increment, v2)
+
+ def test_mutex_setget_tas_spins(self) :
+ self.env.mutex_set_tas_spins(0) # Default = BDB decides
+ v = self.env.mutex_get_tas_spins()
+ v2 = v*2+1
+
+ self.env.mutex_set_tas_spins(v2)
+ self.assertEqual(v2, self.env.mutex_get_tas_spins())
+
+ self.env.mutex_set_tas_spins(v)
+ self.assertEqual(v, self.env.mutex_get_tas_spins())
+
+ # In this case, you can change configuration
+ # after opening the environment.
+ self.env.open(self.homeDir, db.DB_CREATE)
+ self.env.mutex_set_tas_spins(v2)
+
+ def test_mutex_setget_align(self) :
+ v = self.env.mutex_get_align()
+ v2 = 64
+ if v == 64 :
+ v2 = 128
+
+ self.env.mutex_set_align(v2)
+ self.assertEqual(v2, self.env.mutex_get_align())
+
+ # Requires a nonzero power of two
+ self.assertRaises(db.DBInvalidArgError,
+ self.env.mutex_set_align, 0)
+ self.assertRaises(db.DBInvalidArgError,
+ self.env.mutex_set_align, 17)
+
+ self.env.mutex_set_align(2*v2)
+ self.assertEqual(2*v2, self.env.mutex_get_align())
+
+ # You can not change configuration after opening
+ # the environment.
+ self.env.open(self.homeDir, db.DB_CREATE)
+ self.assertRaises(db.DBInvalidArgError,
+ self.env.mutex_set_align, v2)
+
+
+class DBEnv_log(DBEnv) :
+ def setUp(self):
+ DBEnv.setUp(self)
+ self.env.open(self.homeDir, db.DB_CREATE | db.DB_INIT_MPOOL | db.DB_INIT_LOG)
+
+ def test_log_file(self) :
+ log_file = self.env.log_file((1, 1))
+ self.assertEqual("log.0000000001", log_file[-14:])
+
+ if db.version() >= (4, 4) :
+ # The version with transactions is checked in other test object
+ def test_log_printf(self) :
+ msg = "This is a test..."
+ self.env.log_printf(msg)
+ logc = self.env.log_cursor()
+ self.assertTrue(msg in (logc.last()[1]))
+
+ if db.version() >= (4, 7) :
+ def test_log_config(self) :
+ self.env.log_set_config(db.DB_LOG_DSYNC | db.DB_LOG_ZERO, 1)
+ self.assertTrue(self.env.log_get_config(db.DB_LOG_DSYNC))
+ self.assertTrue(self.env.log_get_config(db.DB_LOG_ZERO))
+ self.env.log_set_config(db.DB_LOG_ZERO, 0)
+ self.assertTrue(self.env.log_get_config(db.DB_LOG_DSYNC))
+ self.assertFalse(self.env.log_get_config(db.DB_LOG_ZERO))
+
+
+class DBEnv_log_txn(DBEnv) :
+ def setUp(self):
+ DBEnv.setUp(self)
+ self.env.open(self.homeDir, db.DB_CREATE | db.DB_INIT_MPOOL |
+ db.DB_INIT_LOG | db.DB_INIT_TXN)
+
+ if (db.version() >= (4, 5)) and (db.version() < (5, 2)) :
+ def test_tx_max(self) :
+ txns=[]
+ def tx() :
+ for i in xrange(self.env.get_tx_max()) :
+ txns.append(self.env.txn_begin())
+
+ tx()
+ self.assertRaises(MemoryError, tx)
+
+ # Abort the transactions before garbage collection,
+ # to avoid "warnings".
+ for i in txns :
+ i.abort()
+
+ if db.version() >= (4, 4) :
+ # The version without transactions is checked in other test object
+ def test_log_printf(self) :
+ msg = "This is a test..."
+ txn = self.env.txn_begin()
+ self.env.log_printf(msg, txn=txn)
+ txn.commit()
+ logc = self.env.log_cursor()
+ logc.last() # Skip the commit
+ self.assertTrue(msg in (logc.prev()[1]))
+
+ msg = "This is another test..."
+ txn = self.env.txn_begin()
+ self.env.log_printf(msg, txn=txn)
+ txn.abort() # Do not store the new message
+ logc.last() # Skip the abort
+ self.assertTrue(msg not in (logc.prev()[1]))
+
+ msg = "This is a third test..."
+ txn = self.env.txn_begin()
+ self.env.log_printf(msg, txn=txn)
+ txn.commit() # Do not store the new message
+ logc.last() # Skip the commit
+ self.assertTrue(msg in (logc.prev()[1]))
+
+
+class DBEnv_memp(DBEnv):
+ def setUp(self):
+ DBEnv.setUp(self)
+ self.env.open(self.homeDir, db.DB_CREATE | db.DB_INIT_MPOOL | db.DB_INIT_LOG)
+ self.db = db.DB(self.env)
+ self.db.open("test", db.DB_HASH, db.DB_CREATE, 0660)
+
+ def tearDown(self):
+ self.db.close()
+ del self.db
+ DBEnv.tearDown(self)
+
+ def test_memp_1_trickle(self) :
+ self.db.put("hi", "bye")
+ self.assertTrue(self.env.memp_trickle(100) > 0)
+
+# Preserve the order, do "memp_trickle" test first
+ def test_memp_2_sync(self) :
+ self.db.put("hi", "bye")
+ self.env.memp_sync() # Full flush
+ # Nothing to do...
+ self.assertTrue(self.env.memp_trickle(100) == 0)
+
+ self.db.put("hi", "bye2")
+ self.env.memp_sync((1, 0)) # NOP, probably
+ # Something to do... or not
+ self.assertTrue(self.env.memp_trickle(100) >= 0)
+
+ self.db.put("hi", "bye3")
+ self.env.memp_sync((123, 99)) # Full flush
+ # Nothing to do...
+ self.assertTrue(self.env.memp_trickle(100) == 0)
+
+ def test_memp_stat_1(self) :
+ stats = self.env.memp_stat() # No param
+ self.assertTrue(len(stats)==2)
+ self.assertTrue("cache_miss" in stats[0])
+ stats = self.env.memp_stat(db.DB_STAT_CLEAR) # Positional param
+ self.assertTrue("cache_miss" in stats[0])
+ stats = self.env.memp_stat(flags=0) # Keyword param
+ self.assertTrue("cache_miss" in stats[0])
+
+ def test_memp_stat_2(self) :
+ stats=self.env.memp_stat()[1]
+ self.assertTrue(len(stats))==1
+ self.assertTrue("test" in stats)
+ self.assertTrue("page_in" in stats["test"])
+
+class DBEnv_logcursor(DBEnv):
+ def setUp(self):
+ DBEnv.setUp(self)
+ self.env.open(self.homeDir, db.DB_CREATE | db.DB_INIT_MPOOL |
+ db.DB_INIT_LOG | db.DB_INIT_TXN)
+ txn = self.env.txn_begin()
+ self.db = db.DB(self.env)
+ self.db.open("test", db.DB_HASH, db.DB_CREATE, 0660, txn=txn)
+ txn.commit()
+ for i in ["2", "8", "20"] :
+ txn = self.env.txn_begin()
+ self.db.put(key = i, data = i*int(i), txn=txn)
+ txn.commit()
+
+ def tearDown(self):
+ self.db.close()
+ del self.db
+ DBEnv.tearDown(self)
+
+ def _check_return(self, value) :
+ self.assertTrue(isinstance(value, tuple))
+ self.assertEqual(len(value), 2)
+ self.assertTrue(isinstance(value[0], tuple))
+ self.assertEqual(len(value[0]), 2)
+ self.assertTrue(isinstance(value[0][0], int))
+ self.assertTrue(isinstance(value[0][1], int))
+ self.assertTrue(isinstance(value[1], str))
+
+ # Preserve test order
+ def test_1_first(self) :
+ logc = self.env.log_cursor()
+ v = logc.first()
+ self._check_return(v)
+ self.assertTrue((1, 1) < v[0])
+ self.assertTrue(len(v[1])>0)
+
+ def test_2_last(self) :
+ logc = self.env.log_cursor()
+ lsn_first = logc.first()[0]
+ v = logc.last()
+ self._check_return(v)
+ self.assertTrue(lsn_first < v[0])
+
+ def test_3_next(self) :
+ logc = self.env.log_cursor()
+ lsn_last = logc.last()[0]
+ self.assertEqual(logc.next(), None)
+ lsn_first = logc.first()[0]
+ v = logc.next()
+ self._check_return(v)
+ self.assertTrue(lsn_first < v[0])
+ self.assertTrue(lsn_last > v[0])
+
+ v2 = logc.next()
+ self.assertTrue(v2[0] > v[0])
+ self.assertTrue(lsn_last > v2[0])
+
+ v3 = logc.next()
+ self.assertTrue(v3[0] > v2[0])
+ self.assertTrue(lsn_last > v3[0])
+
+ def test_4_prev(self) :
+ logc = self.env.log_cursor()
+ lsn_first = logc.first()[0]
+ self.assertEqual(logc.prev(), None)
+ lsn_last = logc.last()[0]
+ v = logc.prev()
+ self._check_return(v)
+ self.assertTrue(lsn_first < v[0])
+ self.assertTrue(lsn_last > v[0])
+
+ v2 = logc.prev()
+ self.assertTrue(v2[0] < v[0])
+ self.assertTrue(lsn_first < v2[0])
+
+ v3 = logc.prev()
+ self.assertTrue(v3[0] < v2[0])
+ self.assertTrue(lsn_first < v3[0])
+
+ def test_5_current(self) :
+ logc = self.env.log_cursor()
+ logc.first()
+ v = logc.next()
+ self.assertEqual(v, logc.current())
+
+ def test_6_set(self) :
+ logc = self.env.log_cursor()
+ logc.first()
+ v = logc.next()
+ self.assertNotEqual(v, logc.next())
+ self.assertNotEqual(v, logc.next())
+ self.assertEqual(v, logc.set(v[0]))
+
+ def test_explicit_close(self) :
+ logc = self.env.log_cursor()
+ logc.close()
+ self.assertRaises(db.DBCursorClosedError, logc.next)
+
+ def test_implicit_close(self) :
+ logc = [self.env.log_cursor() for i in xrange(10)]
+ self.env.close() # This close should close too all its tree
+ for i in logc :
+ self.assertRaises(db.DBCursorClosedError, i.next)
+
+def test_suite():
+ suite = unittest.TestSuite()
+
+ suite.addTest(unittest.makeSuite(DBEnv_general))
+ suite.addTest(unittest.makeSuite(DBEnv_memp))
+ suite.addTest(unittest.makeSuite(DBEnv_logcursor))
+ suite.addTest(unittest.makeSuite(DBEnv_log))
+ suite.addTest(unittest.makeSuite(DBEnv_log_txn))
+
+ return suite
+
+if __name__ == '__main__':
+ unittest.main(defaultTest='test_suite')
diff --git a/lib/python2.7/bsddb/test/test_dbobj.py b/lib/python2.7/bsddb/test/test_dbobj.py
new file mode 100644
index 0000000..e301a5a
--- /dev/null
+++ b/lib/python2.7/bsddb/test/test_dbobj.py
@@ -0,0 +1,70 @@
+
+import os, string
+import unittest
+
+from test_all import db, dbobj, test_support, get_new_environment_path, \
+ get_new_database_path
+
+#----------------------------------------------------------------------
+
+class dbobjTestCase(unittest.TestCase):
+ """Verify that dbobj.DB and dbobj.DBEnv work properly"""
+ db_name = 'test-dbobj.db'
+
+ def setUp(self):
+ self.homeDir = get_new_environment_path()
+
+ def tearDown(self):
+ if hasattr(self, 'db'):
+ del self.db
+ if hasattr(self, 'env'):
+ del self.env
+ test_support.rmtree(self.homeDir)
+
+ def test01_both(self):
+ class TestDBEnv(dbobj.DBEnv): pass
+ class TestDB(dbobj.DB):
+ def put(self, key, *args, **kwargs):
+ key = key.upper()
+ # call our parent classes put method with an upper case key
+ return dbobj.DB.put(self, key, *args, **kwargs)
+ self.env = TestDBEnv()
+ self.env.open(self.homeDir, db.DB_CREATE | db.DB_INIT_MPOOL)
+ self.db = TestDB(self.env)
+ self.db.open(self.db_name, db.DB_HASH, db.DB_CREATE)
+ self.db.put('spam', 'eggs')
+ self.assertEqual(self.db.get('spam'), None,
+ "overridden dbobj.DB.put() method failed [1]")
+ self.assertEqual(self.db.get('SPAM'), 'eggs',
+ "overridden dbobj.DB.put() method failed [2]")
+ self.db.close()
+ self.env.close()
+
+ def test02_dbobj_dict_interface(self):
+ self.env = dbobj.DBEnv()
+ self.env.open(self.homeDir, db.DB_CREATE | db.DB_INIT_MPOOL)
+ self.db = dbobj.DB(self.env)
+ self.db.open(self.db_name+'02', db.DB_HASH, db.DB_CREATE)
+ # __setitem__
+ self.db['spam'] = 'eggs'
+ # __len__
+ self.assertEqual(len(self.db), 1)
+ # __getitem__
+ self.assertEqual(self.db['spam'], 'eggs')
+ # __del__
+ del self.db['spam']
+ self.assertEqual(self.db.get('spam'), None, "dbobj __del__ failed")
+ self.db.close()
+ self.env.close()
+
+ def test03_dbobj_type_before_open(self):
+ # Ensure this doesn't cause a segfault.
+ self.assertRaises(db.DBInvalidArgError, db.DB().type)
+
+#----------------------------------------------------------------------
+
+def test_suite():
+ return unittest.makeSuite(dbobjTestCase)
+
+if __name__ == '__main__':
+ unittest.main(defaultTest='test_suite')
diff --git a/lib/python2.7/bsddb/test/test_dbshelve.py b/lib/python2.7/bsddb/test/test_dbshelve.py
new file mode 100644
index 0000000..e5609c5
--- /dev/null
+++ b/lib/python2.7/bsddb/test/test_dbshelve.py
@@ -0,0 +1,398 @@
+"""
+TestCases for checking dbShelve objects.
+"""
+
+import os, string, sys
+import random
+import unittest
+
+
+from test_all import db, dbshelve, test_support, verbose, \
+ get_new_environment_path, get_new_database_path
+
+
+
+
+
+#----------------------------------------------------------------------
+
+# We want the objects to be comparable so we can test dbshelve.values
+# later on.
+class DataClass:
+ def __init__(self):
+ self.value = random.random()
+
+ def __repr__(self) : # For Python 3.0 comparison
+ return "DataClass %f" %self.value
+
+ def __cmp__(self, other): # For Python 2.x comparison
+ return cmp(self.value, other)
+
+
+class DBShelveTestCase(unittest.TestCase):
+ if (sys.version_info < (2, 7)) or ((sys.version_info >= (3, 0)) and
+ (sys.version_info < (3, 2))) :
+ def assertIn(self, a, b, msg=None) :
+ return self.assertTrue(a in b, msg=msg)
+
+
+ def setUp(self):
+ if sys.version_info[0] >= 3 :
+ from test_all import do_proxy_db_py3k
+ self._flag_proxy_db_py3k = do_proxy_db_py3k(False)
+ self.filename = get_new_database_path()
+ self.do_open()
+
+ def tearDown(self):
+ if sys.version_info[0] >= 3 :
+ from test_all import do_proxy_db_py3k
+ do_proxy_db_py3k(self._flag_proxy_db_py3k)
+ self.do_close()
+ test_support.unlink(self.filename)
+
+ def mk(self, key):
+ """Turn key into an appropriate key type for this db"""
+ # override in child class for RECNO
+ if sys.version_info[0] < 3 :
+ return key
+ else :
+ return bytes(key, "iso8859-1") # 8 bits
+
+ def populateDB(self, d):
+ for x in string.ascii_letters:
+ d[self.mk('S' + x)] = 10 * x # add a string
+ d[self.mk('I' + x)] = ord(x) # add an integer
+ d[self.mk('L' + x)] = [x] * 10 # add a list
+
+ inst = DataClass() # add an instance
+ inst.S = 10 * x
+ inst.I = ord(x)
+ inst.L = [x] * 10
+ d[self.mk('O' + x)] = inst
+
+
+ # overridable in derived classes to affect how the shelf is created/opened
+ def do_open(self):
+ self.d = dbshelve.open(self.filename)
+
+ # and closed...
+ def do_close(self):
+ self.d.close()
+
+
+
+ def test01_basics(self):
+ if verbose:
+ print '\n', '-=' * 30
+ print "Running %s.test01_basics..." % self.__class__.__name__
+
+ self.populateDB(self.d)
+ self.d.sync()
+ self.do_close()
+ self.do_open()
+ d = self.d
+
+ l = len(d)
+ k = d.keys()
+ s = d.stat()
+ f = d.fd()
+
+ if verbose:
+ print "length:", l
+ print "keys:", k
+ print "stats:", s
+
+ self.assertEqual(0, d.has_key(self.mk('bad key')))
+ self.assertEqual(1, d.has_key(self.mk('IA')))
+ self.assertEqual(1, d.has_key(self.mk('OA')))
+
+ d.delete(self.mk('IA'))
+ del d[self.mk('OA')]
+ self.assertEqual(0, d.has_key(self.mk('IA')))
+ self.assertEqual(0, d.has_key(self.mk('OA')))
+ self.assertEqual(len(d), l-2)
+
+ values = []
+ for key in d.keys():
+ value = d[key]
+ values.append(value)
+ if verbose:
+ print "%s: %s" % (key, value)
+ self.checkrec(key, value)
+
+ dbvalues = d.values()
+ self.assertEqual(len(dbvalues), len(d.keys()))
+ if sys.version_info < (2, 6) :
+ values.sort()
+ dbvalues.sort()
+ self.assertEqual(values, dbvalues)
+ else : # XXX: Convert all to strings. Please, improve
+ values.sort(key=lambda x : str(x))
+ dbvalues.sort(key=lambda x : str(x))
+ self.assertEqual(repr(values), repr(dbvalues))
+
+ items = d.items()
+ self.assertEqual(len(items), len(values))
+
+ for key, value in items:
+ self.checkrec(key, value)
+
+ self.assertEqual(d.get(self.mk('bad key')), None)
+ self.assertEqual(d.get(self.mk('bad key'), None), None)
+ self.assertEqual(d.get(self.mk('bad key'), 'a string'), 'a string')
+ self.assertEqual(d.get(self.mk('bad key'), [1, 2, 3]), [1, 2, 3])
+
+ d.set_get_returns_none(0)
+ self.assertRaises(db.DBNotFoundError, d.get, self.mk('bad key'))
+ d.set_get_returns_none(1)
+
+ d.put(self.mk('new key'), 'new data')
+ self.assertEqual(d.get(self.mk('new key')), 'new data')
+ self.assertEqual(d[self.mk('new key')], 'new data')
+
+
+
+ def test02_cursors(self):
+ if verbose:
+ print '\n', '-=' * 30
+ print "Running %s.test02_cursors..." % self.__class__.__name__
+
+ self.populateDB(self.d)
+ d = self.d
+
+ count = 0
+ c = d.cursor()
+ rec = c.first()
+ while rec is not None:
+ count = count + 1
+ if verbose:
+ print rec
+ key, value = rec
+ self.checkrec(key, value)
+ # Hack to avoid conversion by 2to3 tool
+ rec = getattr(c, "next")()
+ del c
+
+ self.assertEqual(count, len(d))
+
+ count = 0
+ c = d.cursor()
+ rec = c.last()
+ while rec is not None:
+ count = count + 1
+ if verbose:
+ print rec
+ key, value = rec
+ self.checkrec(key, value)
+ rec = c.prev()
+
+ self.assertEqual(count, len(d))
+
+ c.set(self.mk('SS'))
+ key, value = c.current()
+ self.checkrec(key, value)
+ del c
+
+
+ def test03_append(self):
+ # NOTE: this is overridden in RECNO subclass, don't change its name.
+ if verbose:
+ print '\n', '-=' * 30
+ print "Running %s.test03_append..." % self.__class__.__name__
+
+ self.assertRaises(dbshelve.DBShelveError,
+ self.d.append, 'unit test was here')
+
+
+ def test04_iterable(self) :
+ self.populateDB(self.d)
+ d = self.d
+ keys = d.keys()
+ keyset = set(keys)
+ self.assertEqual(len(keyset), len(keys))
+
+ for key in d :
+ self.assertIn(key, keyset)
+ keyset.remove(key)
+ self.assertEqual(len(keyset), 0)
+
+ def checkrec(self, key, value):
+ # override this in a subclass if the key type is different
+
+ if sys.version_info[0] >= 3 :
+ if isinstance(key, bytes) :
+ key = key.decode("iso8859-1") # 8 bits
+
+ x = key[1]
+ if key[0] == 'S':
+ self.assertEqual(type(value), str)
+ self.assertEqual(value, 10 * x)
+
+ elif key[0] == 'I':
+ self.assertEqual(type(value), int)
+ self.assertEqual(value, ord(x))
+
+ elif key[0] == 'L':
+ self.assertEqual(type(value), list)
+ self.assertEqual(value, [x] * 10)
+
+ elif key[0] == 'O':
+ if sys.version_info[0] < 3 :
+ from types import InstanceType
+ self.assertEqual(type(value), InstanceType)
+ else :
+ self.assertEqual(type(value), DataClass)
+
+ self.assertEqual(value.S, 10 * x)
+ self.assertEqual(value.I, ord(x))
+ self.assertEqual(value.L, [x] * 10)
+
+ else:
+ self.assertTrue(0, 'Unknown key type, fix the test')
+
+#----------------------------------------------------------------------
+
+class BasicShelveTestCase(DBShelveTestCase):
+ def do_open(self):
+ self.d = dbshelve.DBShelf()
+ self.d.open(self.filename, self.dbtype, self.dbflags)
+
+ def do_close(self):
+ self.d.close()
+
+
+class BTreeShelveTestCase(BasicShelveTestCase):
+ dbtype = db.DB_BTREE
+ dbflags = db.DB_CREATE
+
+
+class HashShelveTestCase(BasicShelveTestCase):
+ dbtype = db.DB_HASH
+ dbflags = db.DB_CREATE
+
+
+class ThreadBTreeShelveTestCase(BasicShelveTestCase):
+ dbtype = db.DB_BTREE
+ dbflags = db.DB_CREATE | db.DB_THREAD
+
+
+class ThreadHashShelveTestCase(BasicShelveTestCase):
+ dbtype = db.DB_HASH
+ dbflags = db.DB_CREATE | db.DB_THREAD
+
+
+#----------------------------------------------------------------------
+
+class BasicEnvShelveTestCase(DBShelveTestCase):
+ def do_open(self):
+ self.env = db.DBEnv()
+ self.env.open(self.homeDir,
+ self.envflags | db.DB_INIT_MPOOL | db.DB_CREATE)
+
+ self.filename = os.path.split(self.filename)[1]
+ self.d = dbshelve.DBShelf(self.env)
+ self.d.open(self.filename, self.dbtype, self.dbflags)
+
+
+ def do_close(self):
+ self.d.close()
+ self.env.close()
+
+
+ def setUp(self) :
+ self.homeDir = get_new_environment_path()
+ DBShelveTestCase.setUp(self)
+
+ def tearDown(self):
+ if sys.version_info[0] >= 3 :
+ from test_all import do_proxy_db_py3k
+ do_proxy_db_py3k(self._flag_proxy_db_py3k)
+ self.do_close()
+ test_support.rmtree(self.homeDir)
+
+
+class EnvBTreeShelveTestCase(BasicEnvShelveTestCase):
+ envflags = 0
+ dbtype = db.DB_BTREE
+ dbflags = db.DB_CREATE
+
+
+class EnvHashShelveTestCase(BasicEnvShelveTestCase):
+ envflags = 0
+ dbtype = db.DB_HASH
+ dbflags = db.DB_CREATE
+
+
+class EnvThreadBTreeShelveTestCase(BasicEnvShelveTestCase):
+ envflags = db.DB_THREAD
+ dbtype = db.DB_BTREE
+ dbflags = db.DB_CREATE | db.DB_THREAD
+
+
+class EnvThreadHashShelveTestCase(BasicEnvShelveTestCase):
+ envflags = db.DB_THREAD
+ dbtype = db.DB_HASH
+ dbflags = db.DB_CREATE | db.DB_THREAD
+
+
+#----------------------------------------------------------------------
+# test cases for a DBShelf in a RECNO DB.
+
+class RecNoShelveTestCase(BasicShelveTestCase):
+ dbtype = db.DB_RECNO
+ dbflags = db.DB_CREATE
+
+ def setUp(self):
+ BasicShelveTestCase.setUp(self)
+
+ # pool to assign integer key values out of
+ self.key_pool = list(range(1, 5000))
+ self.key_map = {} # map string keys to the number we gave them
+ self.intkey_map = {} # reverse map of above
+
+ def mk(self, key):
+ if key not in self.key_map:
+ self.key_map[key] = self.key_pool.pop(0)
+ self.intkey_map[self.key_map[key]] = key
+ return self.key_map[key]
+
+ def checkrec(self, intkey, value):
+ key = self.intkey_map[intkey]
+ BasicShelveTestCase.checkrec(self, key, value)
+
+ def test03_append(self):
+ if verbose:
+ print '\n', '-=' * 30
+ print "Running %s.test03_append..." % self.__class__.__name__
+
+ self.d[1] = 'spam'
+ self.d[5] = 'eggs'
+ self.assertEqual(6, self.d.append('spam'))
+ self.assertEqual(7, self.d.append('baked beans'))
+ self.assertEqual('spam', self.d.get(6))
+ self.assertEqual('spam', self.d.get(1))
+ self.assertEqual('baked beans', self.d.get(7))
+ self.assertEqual('eggs', self.d.get(5))
+
+
+#----------------------------------------------------------------------
+
+def test_suite():
+ suite = unittest.TestSuite()
+
+ suite.addTest(unittest.makeSuite(DBShelveTestCase))
+ suite.addTest(unittest.makeSuite(BTreeShelveTestCase))
+ suite.addTest(unittest.makeSuite(HashShelveTestCase))
+ suite.addTest(unittest.makeSuite(ThreadBTreeShelveTestCase))
+ suite.addTest(unittest.makeSuite(ThreadHashShelveTestCase))
+ suite.addTest(unittest.makeSuite(EnvBTreeShelveTestCase))
+ suite.addTest(unittest.makeSuite(EnvHashShelveTestCase))
+ suite.addTest(unittest.makeSuite(EnvThreadBTreeShelveTestCase))
+ suite.addTest(unittest.makeSuite(EnvThreadHashShelveTestCase))
+ suite.addTest(unittest.makeSuite(RecNoShelveTestCase))
+
+ return suite
+
+
+if __name__ == '__main__':
+ unittest.main(defaultTest='test_suite')
diff --git a/lib/python2.7/bsddb/test/test_dbtables.py b/lib/python2.7/bsddb/test/test_dbtables.py
new file mode 100644
index 0000000..250c492
--- /dev/null
+++ b/lib/python2.7/bsddb/test/test_dbtables.py
@@ -0,0 +1,407 @@
+#-----------------------------------------------------------------------
+# A test suite for the table interface built on bsddb.db
+#-----------------------------------------------------------------------
+#
+# Copyright (C) 2000, 2001 by Autonomous Zone Industries
+# Copyright (C) 2002 Gregory P. Smith
+#
+# March 20, 2000
+#
+# License: This is free software. You may use this software for any
+# purpose including modification/redistribution, so long as
+# this header remains intact and that you do not claim any
+# rights of ownership or authorship of this software. This
+# software has been tested, but no warranty is expressed or
+# implied.
+#
+# -- Gregory P. Smith <greg@krypto.org>
+#
+# $Id$
+
+import os, re, sys
+
+if sys.version_info[0] < 3 :
+ try:
+ import cPickle
+ pickle = cPickle
+ except ImportError:
+ import pickle
+else :
+ import pickle
+
+import unittest
+from test_all import db, dbtables, test_support, verbose, \
+ get_new_environment_path, get_new_database_path
+
+#----------------------------------------------------------------------
+
+class TableDBTestCase(unittest.TestCase):
+ db_name = 'test-table.db'
+
+ def setUp(self):
+ import sys
+ if sys.version_info[0] >= 3 :
+ from test_all import do_proxy_db_py3k
+ self._flag_proxy_db_py3k = do_proxy_db_py3k(False)
+
+ self.testHomeDir = get_new_environment_path()
+ self.tdb = dbtables.bsdTableDB(
+ filename='tabletest.db', dbhome=self.testHomeDir, create=1)
+
+ def tearDown(self):
+ self.tdb.close()
+ import sys
+ if sys.version_info[0] >= 3 :
+ from test_all import do_proxy_db_py3k
+ do_proxy_db_py3k(self._flag_proxy_db_py3k)
+ test_support.rmtree(self.testHomeDir)
+
+ def test01(self):
+ tabname = "test01"
+ colname = 'cool numbers'
+ try:
+ self.tdb.Drop(tabname)
+ except dbtables.TableDBError:
+ pass
+ self.tdb.CreateTable(tabname, [colname])
+ import sys
+ if sys.version_info[0] < 3 :
+ self.tdb.Insert(tabname, {colname: pickle.dumps(3.14159, 1)})
+ else :
+ self.tdb.Insert(tabname, {colname: pickle.dumps(3.14159,
+ 1).decode("iso8859-1")}) # 8 bits
+
+ if verbose:
+ self.tdb._db_print()
+
+ values = self.tdb.Select(
+ tabname, [colname], conditions={colname: None})
+
+ import sys
+ if sys.version_info[0] < 3 :
+ colval = pickle.loads(values[0][colname])
+ else :
+ colval = pickle.loads(bytes(values[0][colname], "iso8859-1"))
+ self.assertTrue(colval > 3.141)
+ self.assertTrue(colval < 3.142)
+
+
+ def test02(self):
+ tabname = "test02"
+ col0 = 'coolness factor'
+ col1 = 'but can it fly?'
+ col2 = 'Species'
+
+ import sys
+ if sys.version_info[0] < 3 :
+ testinfo = [
+ {col0: pickle.dumps(8, 1), col1: 'no', col2: 'Penguin'},
+ {col0: pickle.dumps(-1, 1), col1: 'no', col2: 'Turkey'},
+ {col0: pickle.dumps(9, 1), col1: 'yes', col2: 'SR-71A Blackbird'}
+ ]
+ else :
+ testinfo = [
+ {col0: pickle.dumps(8, 1).decode("iso8859-1"),
+ col1: 'no', col2: 'Penguin'},
+ {col0: pickle.dumps(-1, 1).decode("iso8859-1"),
+ col1: 'no', col2: 'Turkey'},
+ {col0: pickle.dumps(9, 1).decode("iso8859-1"),
+ col1: 'yes', col2: 'SR-71A Blackbird'}
+ ]
+
+ try:
+ self.tdb.Drop(tabname)
+ except dbtables.TableDBError:
+ pass
+ self.tdb.CreateTable(tabname, [col0, col1, col2])
+ for row in testinfo :
+ self.tdb.Insert(tabname, row)
+
+ import sys
+ if sys.version_info[0] < 3 :
+ values = self.tdb.Select(tabname, [col2],
+ conditions={col0: lambda x: pickle.loads(x) >= 8})
+ else :
+ values = self.tdb.Select(tabname, [col2],
+ conditions={col0: lambda x:
+ pickle.loads(bytes(x, "iso8859-1")) >= 8})
+
+ self.assertEqual(len(values), 2)
+ if values[0]['Species'] == 'Penguin' :
+ self.assertEqual(values[1]['Species'], 'SR-71A Blackbird')
+ elif values[0]['Species'] == 'SR-71A Blackbird' :
+ self.assertEqual(values[1]['Species'], 'Penguin')
+ else :
+ if verbose:
+ print "values= %r" % (values,)
+ raise RuntimeError("Wrong values returned!")
+
+ def test03(self):
+ tabname = "test03"
+ try:
+ self.tdb.Drop(tabname)
+ except dbtables.TableDBError:
+ pass
+ if verbose:
+ print '...before CreateTable...'
+ self.tdb._db_print()
+ self.tdb.CreateTable(tabname, ['a', 'b', 'c', 'd', 'e'])
+ if verbose:
+ print '...after CreateTable...'
+ self.tdb._db_print()
+ self.tdb.Drop(tabname)
+ if verbose:
+ print '...after Drop...'
+ self.tdb._db_print()
+ self.tdb.CreateTable(tabname, ['a', 'b', 'c', 'd', 'e'])
+
+ try:
+ self.tdb.Insert(tabname,
+ {'a': "",
+ 'e': pickle.dumps([{4:5, 6:7}, 'foo'], 1),
+ 'f': "Zero"})
+ self.fail('Expected an exception')
+ except dbtables.TableDBError:
+ pass
+
+ try:
+ self.tdb.Select(tabname, [], conditions={'foo': '123'})
+ self.fail('Expected an exception')
+ except dbtables.TableDBError:
+ pass
+
+ self.tdb.Insert(tabname,
+ {'a': '42',
+ 'b': "bad",
+ 'c': "meep",
+ 'e': 'Fuzzy wuzzy was a bear'})
+ self.tdb.Insert(tabname,
+ {'a': '581750',
+ 'b': "good",
+ 'd': "bla",
+ 'c': "black",
+ 'e': 'fuzzy was here'})
+ self.tdb.Insert(tabname,
+ {'a': '800000',
+ 'b': "good",
+ 'd': "bla",
+ 'c': "black",
+ 'e': 'Fuzzy wuzzy is a bear'})
+
+ if verbose:
+ self.tdb._db_print()
+
+ # this should return two rows
+ values = self.tdb.Select(tabname, ['b', 'a', 'd'],
+ conditions={'e': re.compile('wuzzy').search,
+ 'a': re.compile('^[0-9]+$').match})
+ self.assertEqual(len(values), 2)
+
+ # now lets delete one of them and try again
+ self.tdb.Delete(tabname, conditions={'b': dbtables.ExactCond('good')})
+ values = self.tdb.Select(
+ tabname, ['a', 'd', 'b'],
+ conditions={'e': dbtables.PrefixCond('Fuzzy')})
+ self.assertEqual(len(values), 1)
+ self.assertEqual(values[0]['d'], None)
+
+ values = self.tdb.Select(tabname, ['b'],
+ conditions={'c': lambda c: c == 'meep'})
+ self.assertEqual(len(values), 1)
+ self.assertEqual(values[0]['b'], "bad")
+
+
+ def test04_MultiCondSelect(self):
+ tabname = "test04_MultiCondSelect"
+ try:
+ self.tdb.Drop(tabname)
+ except dbtables.TableDBError:
+ pass
+ self.tdb.CreateTable(tabname, ['a', 'b', 'c', 'd', 'e'])
+
+ try:
+ self.tdb.Insert(tabname,
+ {'a': "",
+ 'e': pickle.dumps([{4:5, 6:7}, 'foo'], 1),
+ 'f': "Zero"})
+ self.fail('Expected an exception')
+ except dbtables.TableDBError:
+ pass
+
+ self.tdb.Insert(tabname, {'a': "A", 'b': "B", 'c': "C", 'd': "D",
+ 'e': "E"})
+ self.tdb.Insert(tabname, {'a': "-A", 'b': "-B", 'c': "-C", 'd': "-D",
+ 'e': "-E"})
+ self.tdb.Insert(tabname, {'a': "A-", 'b': "B-", 'c': "C-", 'd': "D-",
+ 'e': "E-"})
+
+ if verbose:
+ self.tdb._db_print()
+
+ # This select should return 0 rows. it is designed to test
+ # the bug identified and fixed in sourceforge bug # 590449
+ # (Big Thanks to "Rob Tillotson (n9mtb)" for tracking this down
+ # and supplying a fix!! This one caused many headaches to say
+ # the least...)
+ values = self.tdb.Select(tabname, ['b', 'a', 'd'],
+ conditions={'e': dbtables.ExactCond('E'),
+ 'a': dbtables.ExactCond('A'),
+ 'd': dbtables.PrefixCond('-')
+ } )
+ self.assertEqual(len(values), 0, values)
+
+
+ def test_CreateOrExtend(self):
+ tabname = "test_CreateOrExtend"
+
+ self.tdb.CreateOrExtendTable(
+ tabname, ['name', 'taste', 'filling', 'alcohol content', 'price'])
+ try:
+ self.tdb.Insert(tabname,
+ {'taste': 'crap',
+ 'filling': 'no',
+ 'is it Guinness?': 'no'})
+ self.fail("Insert should've failed due to bad column name")
+ except:
+ pass
+ self.tdb.CreateOrExtendTable(tabname,
+ ['name', 'taste', 'is it Guinness?'])
+
+ # these should both succeed as the table should contain the union of both sets of columns.
+ self.tdb.Insert(tabname, {'taste': 'crap', 'filling': 'no',
+ 'is it Guinness?': 'no'})
+ self.tdb.Insert(tabname, {'taste': 'great', 'filling': 'yes',
+ 'is it Guinness?': 'yes',
+ 'name': 'Guinness'})
+
+
+ def test_CondObjs(self):
+ tabname = "test_CondObjs"
+
+ self.tdb.CreateTable(tabname, ['a', 'b', 'c', 'd', 'e', 'p'])
+
+ self.tdb.Insert(tabname, {'a': "the letter A",
+ 'b': "the letter B",
+ 'c': "is for cookie"})
+ self.tdb.Insert(tabname, {'a': "is for aardvark",
+ 'e': "the letter E",
+ 'c': "is for cookie",
+ 'd': "is for dog"})
+ self.tdb.Insert(tabname, {'a': "the letter A",
+ 'e': "the letter E",
+ 'c': "is for cookie",
+ 'p': "is for Python"})
+
+ values = self.tdb.Select(
+ tabname, ['p', 'e'],
+ conditions={'e': dbtables.PrefixCond('the l')})
+ self.assertEqual(len(values), 2, values)
+ self.assertEqual(values[0]['e'], values[1]['e'], values)
+ self.assertNotEqual(values[0]['p'], values[1]['p'], values)
+
+ values = self.tdb.Select(
+ tabname, ['d', 'a'],
+ conditions={'a': dbtables.LikeCond('%aardvark%')})
+ self.assertEqual(len(values), 1, values)
+ self.assertEqual(values[0]['d'], "is for dog", values)
+ self.assertEqual(values[0]['a'], "is for aardvark", values)
+
+ values = self.tdb.Select(tabname, None,
+ {'b': dbtables.Cond(),
+ 'e':dbtables.LikeCond('%letter%'),
+ 'a':dbtables.PrefixCond('is'),
+ 'd':dbtables.ExactCond('is for dog'),
+ 'c':dbtables.PrefixCond('is for'),
+ 'p':lambda s: not s})
+ self.assertEqual(len(values), 1, values)
+ self.assertEqual(values[0]['d'], "is for dog", values)
+ self.assertEqual(values[0]['a'], "is for aardvark", values)
+
+ def test_Delete(self):
+ tabname = "test_Delete"
+ self.tdb.CreateTable(tabname, ['x', 'y', 'z'])
+
+ # prior to 2001-05-09 there was a bug where Delete() would
+ # fail if it encountered any rows that did not have values in
+ # every column.
+ # Hunted and Squashed by <Donwulff> (Jukka Santala - donwulff@nic.fi)
+ self.tdb.Insert(tabname, {'x': 'X1', 'y':'Y1'})
+ self.tdb.Insert(tabname, {'x': 'X2', 'y':'Y2', 'z': 'Z2'})
+
+ self.tdb.Delete(tabname, conditions={'x': dbtables.PrefixCond('X')})
+ values = self.tdb.Select(tabname, ['y'],
+ conditions={'x': dbtables.PrefixCond('X')})
+ self.assertEqual(len(values), 0)
+
+ def test_Modify(self):
+ tabname = "test_Modify"
+ self.tdb.CreateTable(tabname, ['Name', 'Type', 'Access'])
+
+ self.tdb.Insert(tabname, {'Name': 'Index to MP3 files.doc',
+ 'Type': 'Word', 'Access': '8'})
+ self.tdb.Insert(tabname, {'Name': 'Nifty.MP3', 'Access': '1'})
+ self.tdb.Insert(tabname, {'Type': 'Unknown', 'Access': '0'})
+
+ def set_type(type):
+ if type is None:
+ return 'MP3'
+ return type
+
+ def increment_access(count):
+ return str(int(count)+1)
+
+ def remove_value(value):
+ return None
+
+ self.tdb.Modify(tabname,
+ conditions={'Access': dbtables.ExactCond('0')},
+ mappings={'Access': remove_value})
+ self.tdb.Modify(tabname,
+ conditions={'Name': dbtables.LikeCond('%MP3%')},
+ mappings={'Type': set_type})
+ self.tdb.Modify(tabname,
+ conditions={'Name': dbtables.LikeCond('%')},
+ mappings={'Access': increment_access})
+
+ try:
+ self.tdb.Modify(tabname,
+ conditions={'Name': dbtables.LikeCond('%')},
+ mappings={'Access': 'What is your quest?'})
+ except TypeError:
+ # success, the string value in mappings isn't callable
+ pass
+ else:
+ raise RuntimeError, "why was TypeError not raised for bad callable?"
+
+ # Delete key in select conditions
+ values = self.tdb.Select(
+ tabname, None,
+ conditions={'Type': dbtables.ExactCond('Unknown')})
+ self.assertEqual(len(values), 1, values)
+ self.assertEqual(values[0]['Name'], None, values)
+ self.assertEqual(values[0]['Access'], None, values)
+
+ # Modify value by select conditions
+ values = self.tdb.Select(
+ tabname, None,
+ conditions={'Name': dbtables.ExactCond('Nifty.MP3')})
+ self.assertEqual(len(values), 1, values)
+ self.assertEqual(values[0]['Type'], "MP3", values)
+ self.assertEqual(values[0]['Access'], "2", values)
+
+ # Make sure change applied only to select conditions
+ values = self.tdb.Select(
+ tabname, None, conditions={'Name': dbtables.LikeCond('%doc%')})
+ self.assertEqual(len(values), 1, values)
+ self.assertEqual(values[0]['Type'], "Word", values)
+ self.assertEqual(values[0]['Access'], "9", values)
+
+
+def test_suite():
+ suite = unittest.TestSuite()
+ suite.addTest(unittest.makeSuite(TableDBTestCase))
+ return suite
+
+
+if __name__ == '__main__':
+ unittest.main(defaultTest='test_suite')
diff --git a/lib/python2.7/bsddb/test/test_distributed_transactions.py b/lib/python2.7/bsddb/test/test_distributed_transactions.py
new file mode 100644
index 0000000..9058575
--- /dev/null
+++ b/lib/python2.7/bsddb/test/test_distributed_transactions.py
@@ -0,0 +1,152 @@
+"""TestCases for distributed transactions.
+"""
+
+import os
+import unittest
+
+from test_all import db, test_support, get_new_environment_path, \
+ get_new_database_path
+
+from test_all import verbose
+
+#----------------------------------------------------------------------
+
+class DBTxn_distributed(unittest.TestCase):
+ num_txns=1234
+ nosync=True
+ must_open_db=False
+ def _create_env(self, must_open_db) :
+ self.dbenv = db.DBEnv()
+ self.dbenv.set_tx_max(self.num_txns)
+ self.dbenv.set_lk_max_lockers(self.num_txns*2)
+ self.dbenv.set_lk_max_locks(self.num_txns*2)
+ self.dbenv.set_lk_max_objects(self.num_txns*2)
+ if self.nosync :
+ self.dbenv.set_flags(db.DB_TXN_NOSYNC,True)
+ self.dbenv.open(self.homeDir, db.DB_CREATE | db.DB_THREAD |
+ db.DB_RECOVER |
+ db.DB_INIT_TXN | db.DB_INIT_LOG | db.DB_INIT_MPOOL |
+ db.DB_INIT_LOCK, 0666)
+ self.db = db.DB(self.dbenv)
+ self.db.set_re_len(db.DB_GID_SIZE)
+ if must_open_db :
+ txn=self.dbenv.txn_begin()
+ self.db.open(self.filename,
+ db.DB_QUEUE, db.DB_CREATE | db.DB_THREAD, 0666,
+ txn=txn)
+ txn.commit()
+
+ def setUp(self) :
+ self.homeDir = get_new_environment_path()
+ self.filename = "test"
+ return self._create_env(must_open_db=True)
+
+ def _destroy_env(self):
+ if self.nosync or (db.version()[:2] == (4,6)): # Known bug
+ self.dbenv.log_flush()
+ self.db.close()
+ self.dbenv.close()
+
+ def tearDown(self):
+ self._destroy_env()
+ test_support.rmtree(self.homeDir)
+
+ def _recreate_env(self,must_open_db) :
+ self._destroy_env()
+ self._create_env(must_open_db)
+
+ def test01_distributed_transactions(self) :
+ txns=set()
+ adapt = lambda x : x
+ import sys
+ if sys.version_info[0] >= 3 :
+ adapt = lambda x : bytes(x, "ascii")
+ # Create transactions, "prepare" them, and
+ # let them be garbage collected.
+ for i in xrange(self.num_txns) :
+ txn = self.dbenv.txn_begin()
+ gid = "%%%dd" %db.DB_GID_SIZE
+ gid = adapt(gid %i)
+ self.db.put(i, gid, txn=txn, flags=db.DB_APPEND)
+ txns.add(gid)
+ txn.prepare(gid)
+ del txn
+
+ self._recreate_env(self.must_open_db)
+
+ # Get "to be recovered" transactions but
+ # let them be garbage collected.
+ recovered_txns=self.dbenv.txn_recover()
+ self.assertEqual(self.num_txns,len(recovered_txns))
+ for gid,txn in recovered_txns :
+ self.assertTrue(gid in txns)
+ del txn
+ del recovered_txns
+
+ self._recreate_env(self.must_open_db)
+
+ # Get "to be recovered" transactions. Commit, abort and
+ # discard them.
+ recovered_txns=self.dbenv.txn_recover()
+ self.assertEqual(self.num_txns,len(recovered_txns))
+ discard_txns=set()
+ committed_txns=set()
+ state=0
+ for gid,txn in recovered_txns :
+ if state==0 or state==1:
+ committed_txns.add(gid)
+ txn.commit()
+ elif state==2 :
+ txn.abort()
+ elif state==3 :
+ txn.discard()
+ discard_txns.add(gid)
+ state=-1
+ state+=1
+ del txn
+ del recovered_txns
+
+ self._recreate_env(self.must_open_db)
+
+ # Verify the discarded transactions are still
+ # around, and dispose them.
+ recovered_txns=self.dbenv.txn_recover()
+ self.assertEqual(len(discard_txns),len(recovered_txns))
+ for gid,txn in recovered_txns :
+ txn.abort()
+ del txn
+ del recovered_txns
+
+ self._recreate_env(must_open_db=True)
+
+ # Be sure there are not pending transactions.
+ # Check also database size.
+ recovered_txns=self.dbenv.txn_recover()
+ self.assertTrue(len(recovered_txns)==0)
+ self.assertEqual(len(committed_txns),self.db.stat()["nkeys"])
+
+class DBTxn_distributedSYNC(DBTxn_distributed):
+ nosync=False
+
+class DBTxn_distributed_must_open_db(DBTxn_distributed):
+ must_open_db=True
+
+class DBTxn_distributedSYNC_must_open_db(DBTxn_distributed):
+ nosync=False
+ must_open_db=True
+
+#----------------------------------------------------------------------
+
+def test_suite():
+ suite = unittest.TestSuite()
+ if db.version() >= (4,5) :
+ suite.addTest(unittest.makeSuite(DBTxn_distributed))
+ suite.addTest(unittest.makeSuite(DBTxn_distributedSYNC))
+ if db.version() >= (4,6) :
+ suite.addTest(unittest.makeSuite(DBTxn_distributed_must_open_db))
+ suite.addTest(unittest.makeSuite(DBTxn_distributedSYNC_must_open_db))
+ return suite
+
+
+if __name__ == '__main__':
+ unittest.main(defaultTest='test_suite')
diff --git a/lib/python2.7/bsddb/test/test_early_close.py b/lib/python2.7/bsddb/test/test_early_close.py
new file mode 100644
index 0000000..e925279
--- /dev/null
+++ b/lib/python2.7/bsddb/test/test_early_close.py
@@ -0,0 +1,215 @@
+"""TestCases for checking that it does not segfault when a DBEnv object
+is closed before its DB objects.
+"""
+
+import os, sys
+import unittest
+
+from test_all import db, test_support, verbose, get_new_environment_path, get_new_database_path
+
+# We're going to get warnings in this module about trying to close the db when
+# its env is already closed. Let's just ignore those.
+try:
+ import warnings
+except ImportError:
+ pass
+else:
+ warnings.filterwarnings('ignore',
+ message='DB could not be closed in',
+ category=RuntimeWarning)
+
+
+#----------------------------------------------------------------------
+
+class DBEnvClosedEarlyCrash(unittest.TestCase):
+ def setUp(self):
+ self.homeDir = get_new_environment_path()
+ self.filename = "test"
+
+ def tearDown(self):
+ test_support.rmtree(self.homeDir)
+
+ def test01_close_dbenv_before_db(self):
+ dbenv = db.DBEnv()
+ dbenv.open(self.homeDir,
+ db.DB_INIT_CDB| db.DB_CREATE |db.DB_THREAD|db.DB_INIT_MPOOL,
+ 0666)
+
+ d = db.DB(dbenv)
+ d2 = db.DB(dbenv)
+ d.open(self.filename, db.DB_BTREE, db.DB_CREATE | db.DB_THREAD, 0666)
+
+ self.assertRaises(db.DBNoSuchFileError, d2.open,
+ self.filename+"2", db.DB_BTREE, db.DB_THREAD, 0666)
+
+ d.put("test","this is a test")
+ self.assertEqual(d.get("test"), "this is a test", "put!=get")
+ dbenv.close() # This "close" should close the child db handle also
+ self.assertRaises(db.DBError, d.get, "test")
+
+ def test02_close_dbenv_before_dbcursor(self):
+ dbenv = db.DBEnv()
+ dbenv.open(self.homeDir,
+ db.DB_INIT_CDB| db.DB_CREATE |db.DB_THREAD|db.DB_INIT_MPOOL,
+ 0666)
+
+ d = db.DB(dbenv)
+ d.open(self.filename, db.DB_BTREE, db.DB_CREATE | db.DB_THREAD, 0666)
+
+ d.put("test","this is a test")
+ d.put("test2","another test")
+ d.put("test3","another one")
+ self.assertEqual(d.get("test"), "this is a test", "put!=get")
+ c=d.cursor()
+ c.first()
+ c.next()
+ d.close() # This "close" should close the child db handle also
+ # db.close should close the child cursor
+ self.assertRaises(db.DBError,c.next)
+
+ d = db.DB(dbenv)
+ d.open(self.filename, db.DB_BTREE, db.DB_CREATE | db.DB_THREAD, 0666)
+ c=d.cursor()
+ c.first()
+ c.next()
+ dbenv.close()
+ # The "close" should close the child db handle also, with cursors
+ self.assertRaises(db.DBError, c.next)
+
+ def test03_close_db_before_dbcursor_without_env(self):
+ import os.path
+ path=os.path.join(self.homeDir,self.filename)
+ d = db.DB()
+ d.open(path, db.DB_BTREE, db.DB_CREATE | db.DB_THREAD, 0666)
+
+ d.put("test","this is a test")
+ d.put("test2","another test")
+ d.put("test3","another one")
+ self.assertEqual(d.get("test"), "this is a test", "put!=get")
+ c=d.cursor()
+ c.first()
+ c.next()
+ d.close()
+ # The "close" should close the child db handle also
+ self.assertRaises(db.DBError, c.next)
+
+ def test04_close_massive(self):
+ dbenv = db.DBEnv()
+ dbenv.open(self.homeDir,
+ db.DB_INIT_CDB| db.DB_CREATE |db.DB_THREAD|db.DB_INIT_MPOOL,
+ 0666)
+
+ dbs=[db.DB(dbenv) for i in xrange(16)]
+ cursors=[]
+ for i in dbs :
+ i.open(self.filename, db.DB_BTREE, db.DB_CREATE | db.DB_THREAD, 0666)
+
+ dbs[10].put("test","this is a test")
+ dbs[10].put("test2","another test")
+ dbs[10].put("test3","another one")
+ self.assertEqual(dbs[4].get("test"), "this is a test", "put!=get")
+
+ for i in dbs :
+ cursors.extend([i.cursor() for j in xrange(32)])
+
+ for i in dbs[::3] :
+ i.close()
+ for i in cursors[::3] :
+ i.close()
+
+ # Check for missing exception in DB! (after DB close)
+ self.assertRaises(db.DBError, dbs[9].get, "test")
+
+ # Check for missing exception in DBCursor! (after DB close)
+ self.assertRaises(db.DBError, cursors[101].first)
+
+ cursors[80].first()
+ cursors[80].next()
+ dbenv.close() # This "close" should close the child db handle also
+ # Check for missing exception! (after DBEnv close)
+ self.assertRaises(db.DBError, cursors[80].next)
+
+ def test05_close_dbenv_delete_db_success(self):
+ dbenv = db.DBEnv()
+ dbenv.open(self.homeDir,
+ db.DB_INIT_CDB| db.DB_CREATE |db.DB_THREAD|db.DB_INIT_MPOOL,
+ 0666)
+
+ d = db.DB(dbenv)
+ d.open(self.filename, db.DB_BTREE, db.DB_CREATE | db.DB_THREAD, 0666)
+
+ dbenv.close() # This "close" should close the child db handle also
+
+ del d
+ try:
+ import gc
+ except ImportError:
+ gc = None
+ if gc:
+ # force d.__del__ [DB_dealloc] to be called
+ gc.collect()
+
+ def test06_close_txn_before_dup_cursor(self) :
+ dbenv = db.DBEnv()
+ dbenv.open(self.homeDir,db.DB_INIT_TXN | db.DB_INIT_MPOOL |
+ db.DB_INIT_LOG | db.DB_CREATE)
+ d = db.DB(dbenv)
+ txn = dbenv.txn_begin()
+ d.open(self.filename, dbtype = db.DB_HASH, flags = db.DB_CREATE,
+ txn=txn)
+ d.put("XXX", "yyy", txn=txn)
+ txn.commit()
+ txn = dbenv.txn_begin()
+ c1 = d.cursor(txn)
+ c2 = c1.dup()
+ self.assertEqual(("XXX", "yyy"), c1.first())
+
+ # Not interested in warnings about implicit close.
+ import warnings
+ if sys.version_info < (2, 6) :
+ # Completely resetting the warning state is
+ # problematic with python >=2.6 with -3 (py3k warning),
+ # because some stdlib modules selectively ignores warnings.
+ warnings.simplefilter("ignore")
+ txn.commit()
+ warnings.resetwarnings()
+ else :
+ # When we drop support for python 2.4
+ # we could use: (in 2.5 we need a __future__ statement)
+ #
+ # with warnings.catch_warnings():
+ # warnings.simplefilter("ignore")
+ # txn.commit()
+ #
+ # We can not use "with" as is, because it would be invalid syntax
+ # in python 2.4 and (with no __future__) 2.5.
+ # Here we simulate "with" following PEP 343 :
+ w = warnings.catch_warnings()
+ w.__enter__()
+ try :
+ warnings.simplefilter("ignore")
+ txn.commit()
+ finally :
+ w.__exit__()
+
+ self.assertRaises(db.DBCursorClosedError, c2.first)
+
+ def test07_close_db_before_sequence(self):
+ import os.path
+ path=os.path.join(self.homeDir,self.filename)
+ d = db.DB()
+ d.open(path, db.DB_BTREE, db.DB_CREATE | db.DB_THREAD, 0666)
+ dbs=db.DBSequence(d)
+ d.close() # This "close" should close the child DBSequence also
+ dbs.close() # If not closed, core dump (in Berkeley DB 4.6.*)
+
+#----------------------------------------------------------------------
+
+def test_suite():
+ suite = unittest.TestSuite()
+ suite.addTest(unittest.makeSuite(DBEnvClosedEarlyCrash))
+ return suite
+
+
+if __name__ == '__main__':
+ unittest.main(defaultTest='test_suite')
diff --git a/lib/python2.7/bsddb/test/test_fileid.py b/lib/python2.7/bsddb/test/test_fileid.py
new file mode 100644
index 0000000..095ec83
--- /dev/null
+++ b/lib/python2.7/bsddb/test/test_fileid.py
@@ -0,0 +1,61 @@
+"""TestCase for reseting File ID.
+"""
+
+import os
+import shutil
+import unittest
+
+from test_all import db, test_support, get_new_environment_path, get_new_database_path
+
+class FileidResetTestCase(unittest.TestCase):
+ def setUp(self):
+ self.db_path_1 = get_new_database_path()
+ self.db_path_2 = get_new_database_path()
+ self.db_env_path = get_new_environment_path()
+
+ def test_fileid_reset(self):
+ # create DB 1
+ self.db1 = db.DB()
+ self.db1.open(self.db_path_1, dbtype=db.DB_HASH, flags=(db.DB_CREATE|db.DB_EXCL))
+ self.db1.put('spam', 'eggs')
+ self.db1.close()
+
+ shutil.copy(self.db_path_1, self.db_path_2)
+
+ self.db2 = db.DB()
+ self.db2.open(self.db_path_2, dbtype=db.DB_HASH)
+ self.db2.put('spam', 'spam')
+ self.db2.close()
+
+ self.db_env = db.DBEnv()
+ self.db_env.open(self.db_env_path, db.DB_CREATE|db.DB_INIT_MPOOL)
+
+ # use fileid_reset() here
+ self.db_env.fileid_reset(self.db_path_2)
+
+ self.db1 = db.DB(self.db_env)
+ self.db1.open(self.db_path_1, dbtype=db.DB_HASH, flags=db.DB_RDONLY)
+ self.assertEqual(self.db1.get('spam'), 'eggs')
+
+ self.db2 = db.DB(self.db_env)
+ self.db2.open(self.db_path_2, dbtype=db.DB_HASH, flags=db.DB_RDONLY)
+ self.assertEqual(self.db2.get('spam'), 'spam')
+
+ self.db1.close()
+ self.db2.close()
+
+ self.db_env.close()
+
+ def tearDown(self):
+ test_support.unlink(self.db_path_1)
+ test_support.unlink(self.db_path_2)
+ test_support.rmtree(self.db_env_path)
+
+def test_suite():
+ suite = unittest.TestSuite()
+ if db.version() >= (4, 4):
+ suite.addTest(unittest.makeSuite(FileidResetTestCase))
+ return suite
+
+if __name__ == '__main__':
+ unittest.main(defaultTest='test_suite')
diff --git a/lib/python2.7/bsddb/test/test_get_none.py b/lib/python2.7/bsddb/test/test_get_none.py
new file mode 100644
index 0000000..541044c
--- /dev/null
+++ b/lib/python2.7/bsddb/test/test_get_none.py
@@ -0,0 +1,92 @@
+"""
+TestCases for checking set_get_returns_none.
+"""
+
+import os, string
+import unittest
+
+from test_all import db, verbose, get_new_database_path
+
+
+#----------------------------------------------------------------------
+
+class GetReturnsNoneTestCase(unittest.TestCase):
+ def setUp(self):
+ self.filename = get_new_database_path()
+
+ def tearDown(self):
+ try:
+ os.remove(self.filename)
+ except os.error:
+ pass
+
+
+ def test01_get_returns_none(self):
+ d = db.DB()
+ d.open(self.filename, db.DB_BTREE, db.DB_CREATE)
+ d.set_get_returns_none(1)
+
+ for x in string.ascii_letters:
+ d.put(x, x * 40)
+
+ data = d.get('bad key')
+ self.assertEqual(data, None)
+
+ data = d.get(string.ascii_letters[0])
+ self.assertEqual(data, string.ascii_letters[0]*40)
+
+ count = 0
+ c = d.cursor()
+ rec = c.first()
+ while rec:
+ count = count + 1
+ rec = c.next()
+
+ self.assertEqual(rec, None)
+ self.assertEqual(count, len(string.ascii_letters))
+
+ c.close()
+ d.close()
+
+
+ def test02_get_raises_exception(self):
+ d = db.DB()
+ d.open(self.filename, db.DB_BTREE, db.DB_CREATE)
+ d.set_get_returns_none(0)
+
+ for x in string.ascii_letters:
+ d.put(x, x * 40)
+
+ self.assertRaises(db.DBNotFoundError, d.get, 'bad key')
+ self.assertRaises(KeyError, d.get, 'bad key')
+
+ data = d.get(string.ascii_letters[0])
+ self.assertEqual(data, string.ascii_letters[0]*40)
+
+ count = 0
+ exceptionHappened = 0
+ c = d.cursor()
+ rec = c.first()
+ while rec:
+ count = count + 1
+ try:
+ rec = c.next()
+ except db.DBNotFoundError: # end of the records
+ exceptionHappened = 1
+ break
+
+ self.assertNotEqual(rec, None)
+ self.assertTrue(exceptionHappened)
+ self.assertEqual(count, len(string.ascii_letters))
+
+ c.close()
+ d.close()
+
+#----------------------------------------------------------------------
+
+def test_suite():
+ return unittest.makeSuite(GetReturnsNoneTestCase)
+
+
+if __name__ == '__main__':
+ unittest.main(defaultTest='test_suite')
diff --git a/lib/python2.7/bsddb/test/test_join.py b/lib/python2.7/bsddb/test/test_join.py
new file mode 100644
index 0000000..1f0dfff
--- /dev/null
+++ b/lib/python2.7/bsddb/test/test_join.py
@@ -0,0 +1,99 @@
+"""TestCases for using the DB.join and DBCursor.join_item methods.
+"""
+
+import os
+
+import unittest
+
+from test_all import db, dbshelve, test_support, verbose, \
+ get_new_environment_path, get_new_database_path
+
+#----------------------------------------------------------------------
+
+ProductIndex = [
+ ('apple', "Convenience Store"),
+ ('blueberry', "Farmer's Market"),
+ ('shotgun', "S-Mart"), # Aisle 12
+ ('pear', "Farmer's Market"),
+ ('chainsaw', "S-Mart"), # "Shop smart. Shop S-Mart!"
+ ('strawberry', "Farmer's Market"),
+]
+
+ColorIndex = [
+ ('blue', "blueberry"),
+ ('red', "apple"),
+ ('red', "chainsaw"),
+ ('red', "strawberry"),
+ ('yellow', "peach"),
+ ('yellow', "pear"),
+ ('black', "shotgun"),
+]
+
+class JoinTestCase(unittest.TestCase):
+ keytype = ''
+
+ def setUp(self):
+ self.filename = self.__class__.__name__ + '.db'
+ self.homeDir = get_new_environment_path()
+ self.env = db.DBEnv()
+ self.env.open(self.homeDir, db.DB_CREATE | db.DB_INIT_MPOOL | db.DB_INIT_LOCK )
+
+ def tearDown(self):
+ self.env.close()
+ test_support.rmtree(self.homeDir)
+
+ def test01_join(self):
+ if verbose:
+ print '\n', '-=' * 30
+ print "Running %s.test01_join..." % \
+ self.__class__.__name__
+
+ # create and populate primary index
+ priDB = db.DB(self.env)
+ priDB.open(self.filename, "primary", db.DB_BTREE, db.DB_CREATE)
+ map(lambda t, priDB=priDB: priDB.put(*t), ProductIndex)
+
+ # create and populate secondary index
+ secDB = db.DB(self.env)
+ secDB.set_flags(db.DB_DUP | db.DB_DUPSORT)
+ secDB.open(self.filename, "secondary", db.DB_BTREE, db.DB_CREATE)
+ map(lambda t, secDB=secDB: secDB.put(*t), ColorIndex)
+
+ sCursor = None
+ jCursor = None
+ try:
+ # lets look up all of the red Products
+ sCursor = secDB.cursor()
+ # Don't do the .set() in an assert, or you can get a bogus failure
+ # when running python -O
+ tmp = sCursor.set('red')
+ self.assertTrue(tmp)
+
+ # FIXME: jCursor doesn't properly hold a reference to its
+ # cursors, if they are closed before jcursor is used it
+ # can cause a crash.
+ jCursor = priDB.join([sCursor])
+
+ if jCursor.get(0) != ('apple', "Convenience Store"):
+ self.fail("join cursor positioned wrong")
+ if jCursor.join_item() != 'chainsaw':
+ self.fail("DBCursor.join_item returned wrong item")
+ if jCursor.get(0)[0] != 'strawberry':
+ self.fail("join cursor returned wrong thing")
+ if jCursor.get(0): # there were only three red items to return
+ self.fail("join cursor returned too many items")
+ finally:
+ if jCursor:
+ jCursor.close()
+ if sCursor:
+ sCursor.close()
+ priDB.close()
+ secDB.close()
+
+
+def test_suite():
+ suite = unittest.TestSuite()
+
+ suite.addTest(unittest.makeSuite(JoinTestCase))
+
+ return suite
diff --git a/lib/python2.7/bsddb/test/test_lock.py b/lib/python2.7/bsddb/test/test_lock.py
new file mode 100644
index 0000000..fd87ea2
--- /dev/null
+++ b/lib/python2.7/bsddb/test/test_lock.py
@@ -0,0 +1,184 @@
+"""
+TestCases for testing the locking sub-system.
+"""
+
+import time
+
+import unittest
+from test_all import db, test_support, verbose, have_threads, \
+ get_new_environment_path, get_new_database_path
+
+if have_threads :
+ from threading import Thread
+ import sys
+ if sys.version_info[0] < 3 :
+ from threading import currentThread
+ else :
+ from threading import current_thread as currentThread
+
+#----------------------------------------------------------------------
+
+class LockingTestCase(unittest.TestCase):
+ def setUp(self):
+ self.homeDir = get_new_environment_path()
+ self.env = db.DBEnv()
+ self.env.open(self.homeDir, db.DB_THREAD | db.DB_INIT_MPOOL |
+ db.DB_INIT_LOCK | db.DB_CREATE)
+
+
+ def tearDown(self):
+ self.env.close()
+ test_support.rmtree(self.homeDir)
+
+
+ def test01_simple(self):
+ if verbose:
+ print '\n', '-=' * 30
+ print "Running %s.test01_simple..." % self.__class__.__name__
+
+ anID = self.env.lock_id()
+ if verbose:
+ print "locker ID: %s" % anID
+ lock = self.env.lock_get(anID, "some locked thing", db.DB_LOCK_WRITE)
+ if verbose:
+ print "Acquired lock: %s" % lock
+ self.env.lock_put(lock)
+ if verbose:
+ print "Released lock: %s" % lock
+ self.env.lock_id_free(anID)
+
+
+ def test02_threaded(self):
+ if verbose:
+ print '\n', '-=' * 30
+ print "Running %s.test02_threaded..." % self.__class__.__name__
+
+ threads = []
+ threads.append(Thread(target = self.theThread,
+ args=(db.DB_LOCK_WRITE,)))
+ threads.append(Thread(target = self.theThread,
+ args=(db.DB_LOCK_READ,)))
+ threads.append(Thread(target = self.theThread,
+ args=(db.DB_LOCK_READ,)))
+ threads.append(Thread(target = self.theThread,
+ args=(db.DB_LOCK_WRITE,)))
+ threads.append(Thread(target = self.theThread,
+ args=(db.DB_LOCK_READ,)))
+ threads.append(Thread(target = self.theThread,
+ args=(db.DB_LOCK_READ,)))
+ threads.append(Thread(target = self.theThread,
+ args=(db.DB_LOCK_WRITE,)))
+ threads.append(Thread(target = self.theThread,
+ args=(db.DB_LOCK_WRITE,)))
+ threads.append(Thread(target = self.theThread,
+ args=(db.DB_LOCK_WRITE,)))
+
+ for t in threads:
+ import sys
+ if sys.version_info[0] < 3 :
+ t.setDaemon(True)
+ else :
+ t.daemon = True
+ t.start()
+ for t in threads:
+ t.join()
+
+ def test03_lock_timeout(self):
+ self.env.set_timeout(0, db.DB_SET_LOCK_TIMEOUT)
+ self.assertEqual(self.env.get_timeout(db.DB_SET_LOCK_TIMEOUT), 0)
+ self.env.set_timeout(0, db.DB_SET_TXN_TIMEOUT)
+ self.assertEqual(self.env.get_timeout(db.DB_SET_TXN_TIMEOUT), 0)
+ self.env.set_timeout(123456, db.DB_SET_LOCK_TIMEOUT)
+ self.assertEqual(self.env.get_timeout(db.DB_SET_LOCK_TIMEOUT), 123456)
+ self.env.set_timeout(7890123, db.DB_SET_TXN_TIMEOUT)
+ self.assertEqual(self.env.get_timeout(db.DB_SET_TXN_TIMEOUT), 7890123)
+
+ def test04_lock_timeout2(self):
+ self.env.set_timeout(0, db.DB_SET_LOCK_TIMEOUT)
+ self.env.set_timeout(0, db.DB_SET_TXN_TIMEOUT)
+ self.env.set_timeout(123456, db.DB_SET_LOCK_TIMEOUT)
+ self.env.set_timeout(7890123, db.DB_SET_TXN_TIMEOUT)
+
+ def deadlock_detection() :
+ while not deadlock_detection.end :
+ deadlock_detection.count = \
+ self.env.lock_detect(db.DB_LOCK_EXPIRE)
+ if deadlock_detection.count :
+ while not deadlock_detection.end :
+ pass
+ break
+ time.sleep(0.01)
+
+ deadlock_detection.end=False
+ deadlock_detection.count=0
+ t=Thread(target=deadlock_detection)
+ import sys
+ if sys.version_info[0] < 3 :
+ t.setDaemon(True)
+ else :
+ t.daemon = True
+ t.start()
+ self.env.set_timeout(100000, db.DB_SET_LOCK_TIMEOUT)
+ anID = self.env.lock_id()
+ anID2 = self.env.lock_id()
+ self.assertNotEqual(anID, anID2)
+ lock = self.env.lock_get(anID, "shared lock", db.DB_LOCK_WRITE)
+ start_time=time.time()
+ self.assertRaises(db.DBLockNotGrantedError,
+ self.env.lock_get,anID2, "shared lock", db.DB_LOCK_READ)
+ end_time=time.time()
+ deadlock_detection.end=True
+ # Floating point rounding
+ self.assertTrue((end_time-start_time) >= 0.0999)
+ self.env.lock_put(lock)
+ t.join()
+
+ self.env.lock_id_free(anID)
+ self.env.lock_id_free(anID2)
+
+ if db.version() >= (4,6):
+ self.assertTrue(deadlock_detection.count>0)
+
+ def theThread(self, lockType):
+ import sys
+ if sys.version_info[0] < 3 :
+ name = currentThread().getName()
+ else :
+ name = currentThread().name
+
+ if lockType == db.DB_LOCK_WRITE:
+ lt = "write"
+ else:
+ lt = "read"
+
+ anID = self.env.lock_id()
+ if verbose:
+ print "%s: locker ID: %s" % (name, anID)
+
+ for i in xrange(1000) :
+ lock = self.env.lock_get(anID, "some locked thing", lockType)
+ if verbose:
+ print "%s: Acquired %s lock: %s" % (name, lt, lock)
+
+ self.env.lock_put(lock)
+ if verbose:
+ print "%s: Released %s lock: %s" % (name, lt, lock)
+
+ self.env.lock_id_free(anID)
+
+
+#----------------------------------------------------------------------
+
+def test_suite():
+ suite = unittest.TestSuite()
+
+ if have_threads:
+ suite.addTest(unittest.makeSuite(LockingTestCase))
+ else:
+ suite.addTest(unittest.makeSuite(LockingTestCase, 'test01'))
+
+ return suite
+
+
+if __name__ == '__main__':
+ unittest.main(defaultTest='test_suite')
diff --git a/lib/python2.7/bsddb/test/test_misc.py b/lib/python2.7/bsddb/test/test_misc.py
new file mode 100644
index 0000000..b1e928f
--- /dev/null
+++ b/lib/python2.7/bsddb/test/test_misc.py
@@ -0,0 +1,138 @@
+"""Miscellaneous bsddb module test cases
+"""
+
+import os, sys
+import unittest
+
+from test_all import db, dbshelve, hashopen, test_support, get_new_environment_path, get_new_database_path
+
+#----------------------------------------------------------------------
+
+class MiscTestCase(unittest.TestCase):
+ def setUp(self):
+ self.filename = get_new_database_path()
+ self.homeDir = get_new_environment_path()
+
+ def tearDown(self):
+ test_support.unlink(self.filename)
+ test_support.rmtree(self.homeDir)
+
+ def test01_badpointer(self):
+ dbs = dbshelve.open(self.filename)
+ dbs.close()
+ self.assertRaises(db.DBError, dbs.get, "foo")
+
+ def test02_db_home(self):
+ env = db.DBEnv()
+ # check for crash fixed when db_home is used before open()
+ self.assertTrue(env.db_home is None)
+ env.open(self.homeDir, db.DB_CREATE)
+ if sys.version_info[0] < 3 :
+ self.assertEqual(self.homeDir, env.db_home)
+ else :
+ self.assertEqual(bytes(self.homeDir, "ascii"), env.db_home)
+
+ def test03_repr_closed_db(self):
+ db = hashopen(self.filename)
+ db.close()
+ rp = repr(db)
+ self.assertEqual(rp, "{}")
+
+ def test04_repr_db(self) :
+ db = hashopen(self.filename)
+ d = {}
+ for i in xrange(100) :
+ db[repr(i)] = repr(100*i)
+ d[repr(i)] = repr(100*i)
+ db.close()
+ db = hashopen(self.filename)
+ rp = repr(db)
+ self.assertEqual(rp, repr(d))
+ db.close()
+
+ # http://sourceforge.net/tracker/index.php?func=detail&aid=1708868&group_id=13900&atid=313900
+ #
+ # See the bug report for details.
+ #
+ # The problem was that make_key_dbt() was not allocating a copy of
+ # string keys but FREE_DBT() was always being told to free it when the
+ # database was opened with DB_THREAD.
+ def test05_double_free_make_key_dbt(self):
+ try:
+ db1 = db.DB()
+ db1.open(self.filename, None, db.DB_BTREE,
+ db.DB_CREATE | db.DB_THREAD)
+
+ curs = db1.cursor()
+ t = curs.get("/foo", db.DB_SET)
+ # double free happened during exit from DBC_get
+ finally:
+ db1.close()
+ test_support.unlink(self.filename)
+
+ def test06_key_with_null_bytes(self):
+ try:
+ db1 = db.DB()
+ db1.open(self.filename, None, db.DB_HASH, db.DB_CREATE)
+ db1['a'] = 'eh?'
+ db1['a\x00'] = 'eh zed.'
+ db1['a\x00a'] = 'eh zed eh?'
+ db1['aaa'] = 'eh eh eh!'
+ keys = db1.keys()
+ keys.sort()
+ self.assertEqual(['a', 'a\x00', 'a\x00a', 'aaa'], keys)
+ self.assertEqual(db1['a'], 'eh?')
+ self.assertEqual(db1['a\x00'], 'eh zed.')
+ self.assertEqual(db1['a\x00a'], 'eh zed eh?')
+ self.assertEqual(db1['aaa'], 'eh eh eh!')
+ finally:
+ db1.close()
+ test_support.unlink(self.filename)
+
+ def test07_DB_set_flags_persists(self):
+ try:
+ db1 = db.DB()
+ db1.set_flags(db.DB_DUPSORT)
+ db1.open(self.filename, db.DB_HASH, db.DB_CREATE)
+ db1['a'] = 'eh'
+ db1['a'] = 'A'
+ self.assertEqual([('a', 'A')], db1.items())
+ db1.put('a', 'Aa')
+ self.assertEqual([('a', 'A'), ('a', 'Aa')], db1.items())
+ db1.close()
+ db1 = db.DB()
+ # no set_flags call, we're testing that it reads and obeys
+ # the flags on open.
+ db1.open(self.filename, db.DB_HASH)
+ self.assertEqual([('a', 'A'), ('a', 'Aa')], db1.items())
+ # if it read the flags right this will replace all values
+ # for key 'a' instead of adding a new one. (as a dict should)
+ db1['a'] = 'new A'
+ self.assertEqual([('a', 'new A')], db1.items())
+ finally:
+ db1.close()
+ test_support.unlink(self.filename)
+
+
+ def test08_ExceptionTypes(self) :
+ self.assertTrue(issubclass(db.DBError, Exception))
+ for i, j in db.__dict__.items() :
+ if i.startswith("DB") and i.endswith("Error") :
+ self.assertTrue(issubclass(j, db.DBError), msg=i)
+ if i not in ("DBKeyEmptyError", "DBNotFoundError") :
+ self.assertFalse(issubclass(j, KeyError), msg=i)
+
+ # This two exceptions have two bases
+ self.assertTrue(issubclass(db.DBKeyEmptyError, KeyError))
+ self.assertTrue(issubclass(db.DBNotFoundError, KeyError))
+
+
+#----------------------------------------------------------------------
+
+
+def test_suite():
+ return unittest.makeSuite(MiscTestCase)
+
+
+if __name__ == '__main__':
+ unittest.main(defaultTest='test_suite')
diff --git a/lib/python2.7/bsddb/test/test_pickle.py b/lib/python2.7/bsddb/test/test_pickle.py
new file mode 100644
index 0000000..6a8478d
--- /dev/null
+++ b/lib/python2.7/bsddb/test/test_pickle.py
@@ -0,0 +1,68 @@
+
+import os
+import pickle
+import sys
+
+if sys.version_info[0] < 3 :
+ try:
+ import cPickle
+ except ImportError:
+ cPickle = None
+else :
+ cPickle = None
+
+import unittest
+
+from test_all import db, test_support, get_new_environment_path, get_new_database_path
+
+#----------------------------------------------------------------------
+
+class pickleTestCase(unittest.TestCase):
+ """Verify that DBError can be pickled and unpickled"""
+ db_name = 'test-dbobj.db'
+
+ def setUp(self):
+ self.homeDir = get_new_environment_path()
+
+ def tearDown(self):
+ if hasattr(self, 'db'):
+ del self.db
+ if hasattr(self, 'env'):
+ del self.env
+ test_support.rmtree(self.homeDir)
+
+ def _base_test_pickle_DBError(self, pickle):
+ self.env = db.DBEnv()
+ self.env.open(self.homeDir, db.DB_CREATE | db.DB_INIT_MPOOL)
+ self.db = db.DB(self.env)
+ self.db.open(self.db_name, db.DB_HASH, db.DB_CREATE)
+ self.db.put('spam', 'eggs')
+ self.assertEqual(self.db['spam'], 'eggs')
+ try:
+ self.db.put('spam', 'ham', flags=db.DB_NOOVERWRITE)
+ except db.DBError, egg:
+ pickledEgg = pickle.dumps(egg)
+ #print repr(pickledEgg)
+ rottenEgg = pickle.loads(pickledEgg)
+ if rottenEgg.args != egg.args or type(rottenEgg) != type(egg):
+ raise Exception, (rottenEgg, '!=', egg)
+ else:
+ raise Exception, "where's my DBError exception?!?"
+
+ self.db.close()
+ self.env.close()
+
+ def test01_pickle_DBError(self):
+ self._base_test_pickle_DBError(pickle=pickle)
+
+ if cPickle:
+ def test02_cPickle_DBError(self):
+ self._base_test_pickle_DBError(pickle=cPickle)
+
+#----------------------------------------------------------------------
+
+def test_suite():
+ return unittest.makeSuite(pickleTestCase)
+
+if __name__ == '__main__':
+ unittest.main(defaultTest='test_suite')
diff --git a/lib/python2.7/bsddb/test/test_queue.py b/lib/python2.7/bsddb/test/test_queue.py
new file mode 100644
index 0000000..5fa22ee
--- /dev/null
+++ b/lib/python2.7/bsddb/test/test_queue.py
@@ -0,0 +1,163 @@
+"""
+TestCases for exercising a Queue DB.
+"""
+
+import os, string
+from pprint import pprint
+import unittest
+
+from test_all import db, verbose, get_new_database_path
+
+#----------------------------------------------------------------------
+
+class SimpleQueueTestCase(unittest.TestCase):
+ def setUp(self):
+ self.filename = get_new_database_path()
+
+ def tearDown(self):
+ try:
+ os.remove(self.filename)
+ except os.error:
+ pass
+
+
+ def test01_basic(self):
+ # Basic Queue tests using the deprecated DBCursor.consume method.
+
+ if verbose:
+ print '\n', '-=' * 30
+ print "Running %s.test01_basic..." % self.__class__.__name__
+
+ d = db.DB()
+ d.set_re_len(40) # Queues must be fixed length
+ d.open(self.filename, db.DB_QUEUE, db.DB_CREATE)
+
+ if verbose:
+ print "before appends" + '-' * 30
+ pprint(d.stat())
+
+ for x in string.ascii_letters:
+ d.append(x * 40)
+
+ self.assertEqual(len(d), len(string.ascii_letters))
+
+ d.put(100, "some more data")
+ d.put(101, "and some more ")
+ d.put(75, "out of order")
+ d.put(1, "replacement data")
+
+ self.assertEqual(len(d), len(string.ascii_letters)+3)
+
+ if verbose:
+ print "before close" + '-' * 30
+ pprint(d.stat())
+
+ d.close()
+ del d
+ d = db.DB()
+ d.open(self.filename)
+
+ if verbose:
+ print "after open" + '-' * 30
+ pprint(d.stat())
+
+ # Test "txn" as a positional parameter
+ d.append("one more", None)
+ # Test "txn" as a keyword parameter
+ d.append("another one", txn=None)
+
+ c = d.cursor()
+
+ if verbose:
+ print "after append" + '-' * 30
+ pprint(d.stat())
+
+ rec = c.consume()
+ while rec:
+ if verbose:
+ print rec
+ rec = c.consume()
+ c.close()
+
+ if verbose:
+ print "after consume loop" + '-' * 30
+ pprint(d.stat())
+
+ self.assertEqual(len(d), 0, \
+ "if you see this message then you need to rebuild " \
+ "Berkeley DB 3.1.17 with the patch in patches/qam_stat.diff")
+
+ d.close()
+
+
+
+ def test02_basicPost32(self):
+ # Basic Queue tests using the new DB.consume method in DB 3.2+
+ # (No cursor needed)
+
+ if verbose:
+ print '\n', '-=' * 30
+ print "Running %s.test02_basicPost32..." % self.__class__.__name__
+
+ d = db.DB()
+ d.set_re_len(40) # Queues must be fixed length
+ d.open(self.filename, db.DB_QUEUE, db.DB_CREATE)
+
+ if verbose:
+ print "before appends" + '-' * 30
+ pprint(d.stat())
+
+ for x in string.ascii_letters:
+ d.append(x * 40)
+
+ self.assertEqual(len(d), len(string.ascii_letters))
+
+ d.put(100, "some more data")
+ d.put(101, "and some more ")
+ d.put(75, "out of order")
+ d.put(1, "replacement data")
+
+ self.assertEqual(len(d), len(string.ascii_letters)+3)
+
+ if verbose:
+ print "before close" + '-' * 30
+ pprint(d.stat())
+
+ d.close()
+ del d
+ d = db.DB()
+ d.open(self.filename)
+ #d.set_get_returns_none(true)
+
+ if verbose:
+ print "after open" + '-' * 30
+ pprint(d.stat())
+
+ d.append("one more")
+
+ if verbose:
+ print "after append" + '-' * 30
+ pprint(d.stat())
+
+ rec = d.consume()
+ while rec:
+ if verbose:
+ print rec
+ rec = d.consume()
+
+ if verbose:
+ print "after consume loop" + '-' * 30
+ pprint(d.stat())
+
+ d.close()
+
+
+
+#----------------------------------------------------------------------
+
+def test_suite():
+ return unittest.makeSuite(SimpleQueueTestCase)
+
+
+if __name__ == '__main__':
+ unittest.main(defaultTest='test_suite')
diff --git a/lib/python2.7/bsddb/test/test_recno.py b/lib/python2.7/bsddb/test/test_recno.py
new file mode 100644
index 0000000..b0e30de
--- /dev/null
+++ b/lib/python2.7/bsddb/test/test_recno.py
@@ -0,0 +1,319 @@
+"""TestCases for exercising a Recno DB.
+"""
+
+import os, sys
+import errno
+from pprint import pprint
+import string
+import unittest
+
+from test_all import db, test_support, verbose, get_new_environment_path, get_new_database_path
+
+
+#----------------------------------------------------------------------
+
+class SimpleRecnoTestCase(unittest.TestCase):
+ if (sys.version_info < (2, 7)) or ((sys.version_info >= (3, 0)) and
+ (sys.version_info < (3, 2))) :
+ def assertIsInstance(self, obj, datatype, msg=None) :
+ return self.assertEqual(type(obj), datatype, msg=msg)
+ def assertGreaterEqual(self, a, b, msg=None) :
+ return self.assertTrue(a>=b, msg=msg)
+
+
+ def setUp(self):
+ self.filename = get_new_database_path()
+ self.homeDir = None
+
+ def tearDown(self):
+ test_support.unlink(self.filename)
+ if self.homeDir:
+ test_support.rmtree(self.homeDir)
+
+ def test01_basic(self):
+ d = db.DB()
+
+ get_returns_none = d.set_get_returns_none(2)
+ d.set_get_returns_none(get_returns_none)
+
+ d.open(self.filename, db.DB_RECNO, db.DB_CREATE)
+
+ for x in string.ascii_letters:
+ recno = d.append(x * 60)
+ self.assertIsInstance(recno, int)
+ self.assertGreaterEqual(recno, 1)
+ if verbose:
+ print recno,
+
+ if verbose: print
+
+ stat = d.stat()
+ if verbose:
+ pprint(stat)
+
+ for recno in range(1, len(d)+1):
+ data = d[recno]
+ if verbose:
+ print data
+
+ self.assertIsInstance(data, str)
+ self.assertEqual(data, d.get(recno))
+
+ try:
+ data = d[0] # This should raise a KeyError!?!?!
+ except db.DBInvalidArgError, val:
+ if sys.version_info < (2, 6) :
+ self.assertEqual(val[0], db.EINVAL)
+ else :
+ self.assertEqual(val.args[0], db.EINVAL)
+ if verbose: print val
+ else:
+ self.fail("expected exception")
+
+ # test that has_key raises DB exceptions (fixed in pybsddb 4.3.2)
+ try:
+ d.has_key(0)
+ except db.DBError, val:
+ pass
+ else:
+ self.fail("has_key did not raise a proper exception")
+
+ try:
+ data = d[100]
+ except KeyError:
+ pass
+ else:
+ self.fail("expected exception")
+
+ try:
+ data = d.get(100)
+ except db.DBNotFoundError, val:
+ if get_returns_none:
+ self.fail("unexpected exception")
+ else:
+ self.assertEqual(data, None)
+
+ keys = d.keys()
+ if verbose:
+ print keys
+ self.assertIsInstance(keys, list)
+ self.assertIsInstance(keys[0], int)
+ self.assertEqual(len(keys), len(d))
+
+ items = d.items()
+ if verbose:
+ pprint(items)
+ self.assertIsInstance(items, list)
+ self.assertIsInstance(items[0], tuple)
+ self.assertEqual(len(items[0]), 2)
+ self.assertIsInstance(items[0][0], int)
+ self.assertIsInstance(items[0][1], str)
+ self.assertEqual(len(items), len(d))
+
+ self.assertTrue(d.has_key(25))
+
+ del d[25]
+ self.assertFalse(d.has_key(25))
+
+ d.delete(13)
+ self.assertFalse(d.has_key(13))
+
+ data = d.get_both(26, "z" * 60)
+ self.assertEqual(data, "z" * 60, 'was %r' % data)
+ if verbose:
+ print data
+
+ fd = d.fd()
+ if verbose:
+ print fd
+
+ c = d.cursor()
+ rec = c.first()
+ while rec:
+ if verbose:
+ print rec
+ rec = c.next()
+
+ c.set(50)
+ rec = c.current()
+ if verbose:
+ print rec
+
+ c.put(-1, "a replacement record", db.DB_CURRENT)
+
+ c.set(50)
+ rec = c.current()
+ self.assertEqual(rec, (50, "a replacement record"))
+ if verbose:
+ print rec
+
+ rec = c.set_range(30)
+ if verbose:
+ print rec
+
+ # test that non-existent key lookups work (and that
+ # DBC_set_range doesn't have a memleak under valgrind)
+ rec = c.set_range(999999)
+ self.assertEqual(rec, None)
+ if verbose:
+ print rec
+
+ c.close()
+ d.close()
+
+ d = db.DB()
+ d.open(self.filename)
+ c = d.cursor()
+
+ # put a record beyond the consecutive end of the recno's
+ d[100] = "way out there"
+ self.assertEqual(d[100], "way out there")
+
+ try:
+ data = d[99]
+ except KeyError:
+ pass
+ else:
+ self.fail("expected exception")
+
+ try:
+ d.get(99)
+ except db.DBKeyEmptyError, val:
+ if get_returns_none:
+ self.fail("unexpected DBKeyEmptyError exception")
+ else:
+ if sys.version_info < (2, 6) :
+ self.assertEqual(val[0], db.DB_KEYEMPTY)
+ else :
+ self.assertEqual(val.args[0], db.DB_KEYEMPTY)
+ if verbose: print val
+ else:
+ if not get_returns_none:
+ self.fail("expected exception")
+
+ rec = c.set(40)
+ while rec:
+ if verbose:
+ print rec
+ rec = c.next()
+
+ c.close()
+ d.close()
+
+ def test02_WithSource(self):
+ """
+ A Recno file that is given a "backing source file" is essentially a
+ simple ASCII file. Normally each record is delimited by \n and so is
+ just a line in the file, but you can set a different record delimiter
+ if needed.
+ """
+ homeDir = get_new_environment_path()
+ self.homeDir = homeDir
+ source = os.path.join(homeDir, 'test_recno.txt')
+ if not os.path.isdir(homeDir):
+ os.mkdir(homeDir)
+ f = open(source, 'w') # create the file
+ f.close()
+
+ d = db.DB()
+ # This is the default value, just checking if both int
+ d.set_re_delim(0x0A)
+ d.set_re_delim('\n') # and char can be used...
+ d.set_re_source(source)
+ d.open(self.filename, db.DB_RECNO, db.DB_CREATE)
+
+ data = "The quick brown fox jumped over the lazy dog".split()
+ for datum in data:
+ d.append(datum)
+ d.sync()
+ d.close()
+
+ # get the text from the backing source
+ f = open(source, 'r')
+ text = f.read()
+ f.close()
+ text = text.strip()
+ if verbose:
+ print text
+ print data
+ print text.split('\n')
+
+ self.assertEqual(text.split('\n'), data)
+
+ # open as a DB again
+ d = db.DB()
+ d.set_re_source(source)
+ d.open(self.filename, db.DB_RECNO)
+
+ d[3] = 'reddish-brown'
+ d[8] = 'comatose'
+
+ d.sync()
+ d.close()
+
+ f = open(source, 'r')
+ text = f.read()
+ f.close()
+ text = text.strip()
+ if verbose:
+ print text
+ print text.split('\n')
+
+ self.assertEqual(text.split('\n'),
+ "The quick reddish-brown fox jumped over the comatose dog".split())
+
+ def test03_FixedLength(self):
+ d = db.DB()
+ d.set_re_len(40) # fixed length records, 40 bytes long
+ d.set_re_pad('-') # sets the pad character...
+ d.set_re_pad(45) # ...test both int and char
+ d.open(self.filename, db.DB_RECNO, db.DB_CREATE)
+
+ for x in string.ascii_letters:
+ d.append(x * 35) # These will be padded
+
+ d.append('.' * 40) # this one will be exact
+
+ try: # this one will fail
+ d.append('bad' * 20)
+ except db.DBInvalidArgError, val:
+ if sys.version_info < (2, 6) :
+ self.assertEqual(val[0], db.EINVAL)
+ else :
+ self.assertEqual(val.args[0], db.EINVAL)
+ if verbose: print val
+ else:
+ self.fail("expected exception")
+
+ c = d.cursor()
+ rec = c.first()
+ while rec:
+ if verbose:
+ print rec
+ rec = c.next()
+
+ c.close()
+ d.close()
+
+ def test04_get_size_empty(self) :
+ d = db.DB()
+ d.open(self.filename, dbtype=db.DB_RECNO, flags=db.DB_CREATE)
+
+ row_id = d.append(' ')
+ self.assertEqual(1, d.get_size(key=row_id))
+ row_id = d.append('')
+ self.assertEqual(0, d.get_size(key=row_id))
+
+
+
+
+
+#----------------------------------------------------------------------
+
+
+def test_suite():
+ return unittest.makeSuite(SimpleRecnoTestCase)
+
+
+if __name__ == '__main__':
+ unittest.main(defaultTest='test_suite')
diff --git a/lib/python2.7/bsddb/test/test_replication.py b/lib/python2.7/bsddb/test/test_replication.py
new file mode 100644
index 0000000..12ab2dd
--- /dev/null
+++ b/lib/python2.7/bsddb/test/test_replication.py
@@ -0,0 +1,543 @@
+"""TestCases for distributed transactions.
+"""
+
+import os
+import time
+import unittest
+
+from test_all import db, test_support, have_threads, verbose, \
+ get_new_environment_path, get_new_database_path
+
+
+#----------------------------------------------------------------------
+
+class DBReplication(unittest.TestCase) :
+ def setUp(self) :
+ self.homeDirMaster = get_new_environment_path()
+ self.homeDirClient = get_new_environment_path()
+
+ self.dbenvMaster = db.DBEnv()
+ self.dbenvClient = db.DBEnv()
+
+ # Must use "DB_THREAD" because the Replication Manager will
+ # be executed in other threads but will use the same environment.
+ # http://forums.oracle.com/forums/thread.jspa?threadID=645788&tstart=0
+ self.dbenvMaster.open(self.homeDirMaster, db.DB_CREATE | db.DB_INIT_TXN
+ | db.DB_INIT_LOG | db.DB_INIT_MPOOL | db.DB_INIT_LOCK |
+ db.DB_INIT_REP | db.DB_RECOVER | db.DB_THREAD, 0666)
+ self.dbenvClient.open(self.homeDirClient, db.DB_CREATE | db.DB_INIT_TXN
+ | db.DB_INIT_LOG | db.DB_INIT_MPOOL | db.DB_INIT_LOCK |
+ db.DB_INIT_REP | db.DB_RECOVER | db.DB_THREAD, 0666)
+
+ self.confirmed_master=self.client_startupdone=False
+ def confirmed_master(a,b,c) :
+ if b==db.DB_EVENT_REP_MASTER :
+ self.confirmed_master=True
+
+ def client_startupdone(a,b,c) :
+ if b==db.DB_EVENT_REP_STARTUPDONE :
+ self.client_startupdone=True
+
+ self.dbenvMaster.set_event_notify(confirmed_master)
+ self.dbenvClient.set_event_notify(client_startupdone)
+
+ #self.dbenvMaster.set_verbose(db.DB_VERB_REPLICATION, True)
+ #self.dbenvMaster.set_verbose(db.DB_VERB_FILEOPS_ALL, True)
+ #self.dbenvClient.set_verbose(db.DB_VERB_REPLICATION, True)
+ #self.dbenvClient.set_verbose(db.DB_VERB_FILEOPS_ALL, True)
+
+ self.dbMaster = self.dbClient = None
+
+
+ def tearDown(self):
+ if self.dbClient :
+ self.dbClient.close()
+ if self.dbMaster :
+ self.dbMaster.close()
+
+ # Here we assign dummy event handlers to allow GC of the test object.
+ # Since the dummy handler doesn't use any outer scope variable, it
+ # doesn't keep any reference to the test object.
+ def dummy(*args) :
+ pass
+ self.dbenvMaster.set_event_notify(dummy)
+ self.dbenvClient.set_event_notify(dummy)
+
+ self.dbenvClient.close()
+ self.dbenvMaster.close()
+ test_support.rmtree(self.homeDirClient)
+ test_support.rmtree(self.homeDirMaster)
+
+class DBReplicationManager(DBReplication) :
+ def test01_basic_replication(self) :
+ master_port = test_support.find_unused_port()
+ client_port = test_support.find_unused_port()
+ if db.version() >= (5, 2) :
+ self.site = self.dbenvMaster.repmgr_site("127.0.0.1", master_port)
+ self.site.set_config(db.DB_GROUP_CREATOR, True)
+ self.site.set_config(db.DB_LOCAL_SITE, True)
+ self.site2 = self.dbenvMaster.repmgr_site("127.0.0.1", client_port)
+
+ self.site3 = self.dbenvClient.repmgr_site("127.0.0.1", master_port)
+ self.site3.set_config(db.DB_BOOTSTRAP_HELPER, True)
+ self.site4 = self.dbenvClient.repmgr_site("127.0.0.1", client_port)
+ self.site4.set_config(db.DB_LOCAL_SITE, True)
+
+ d = {
+ db.DB_BOOTSTRAP_HELPER: [False, False, True, False],
+ db.DB_GROUP_CREATOR: [True, False, False, False],
+ db.DB_LEGACY: [False, False, False, False],
+ db.DB_LOCAL_SITE: [True, False, False, True],
+ db.DB_REPMGR_PEER: [False, False, False, False ],
+ }
+
+ for i, j in d.items() :
+ for k, v in \
+ zip([self.site, self.site2, self.site3, self.site4], j) :
+ if v :
+ self.assertTrue(k.get_config(i))
+ else :
+ self.assertFalse(k.get_config(i))
+
+ self.assertNotEqual(self.site.get_eid(), self.site2.get_eid())
+ self.assertNotEqual(self.site3.get_eid(), self.site4.get_eid())
+
+ for i, j in zip([self.site, self.site2, self.site3, self.site4], \
+ [master_port, client_port, master_port, client_port]) :
+ addr = i.get_address()
+ self.assertEqual(addr, ("127.0.0.1", j))
+
+ for i in [self.site, self.site2] :
+ self.assertEqual(i.get_address(),
+ self.dbenvMaster.repmgr_site_by_eid(i.get_eid()).get_address())
+ for i in [self.site3, self.site4] :
+ self.assertEqual(i.get_address(),
+ self.dbenvClient.repmgr_site_by_eid(i.get_eid()).get_address())
+ else :
+ self.dbenvMaster.repmgr_set_local_site("127.0.0.1", master_port)
+ self.dbenvClient.repmgr_set_local_site("127.0.0.1", client_port)
+ self.dbenvMaster.repmgr_add_remote_site("127.0.0.1", client_port)
+ self.dbenvClient.repmgr_add_remote_site("127.0.0.1", master_port)
+
+ self.dbenvMaster.rep_set_nsites(2)
+ self.dbenvClient.rep_set_nsites(2)
+
+ self.dbenvMaster.rep_set_priority(10)
+ self.dbenvClient.rep_set_priority(0)
+
+ self.dbenvMaster.rep_set_timeout(db.DB_REP_CONNECTION_RETRY,100123)
+ self.dbenvClient.rep_set_timeout(db.DB_REP_CONNECTION_RETRY,100321)
+ self.assertEqual(self.dbenvMaster.rep_get_timeout(
+ db.DB_REP_CONNECTION_RETRY), 100123)
+ self.assertEqual(self.dbenvClient.rep_get_timeout(
+ db.DB_REP_CONNECTION_RETRY), 100321)
+
+ self.dbenvMaster.rep_set_timeout(db.DB_REP_ELECTION_TIMEOUT, 100234)
+ self.dbenvClient.rep_set_timeout(db.DB_REP_ELECTION_TIMEOUT, 100432)
+ self.assertEqual(self.dbenvMaster.rep_get_timeout(
+ db.DB_REP_ELECTION_TIMEOUT), 100234)
+ self.assertEqual(self.dbenvClient.rep_get_timeout(
+ db.DB_REP_ELECTION_TIMEOUT), 100432)
+
+ self.dbenvMaster.rep_set_timeout(db.DB_REP_ELECTION_RETRY, 100345)
+ self.dbenvClient.rep_set_timeout(db.DB_REP_ELECTION_RETRY, 100543)
+ self.assertEqual(self.dbenvMaster.rep_get_timeout(
+ db.DB_REP_ELECTION_RETRY), 100345)
+ self.assertEqual(self.dbenvClient.rep_get_timeout(
+ db.DB_REP_ELECTION_RETRY), 100543)
+
+ self.dbenvMaster.repmgr_set_ack_policy(db.DB_REPMGR_ACKS_ALL)
+ self.dbenvClient.repmgr_set_ack_policy(db.DB_REPMGR_ACKS_ALL)
+
+ self.dbenvMaster.repmgr_start(1, db.DB_REP_MASTER);
+ self.dbenvClient.repmgr_start(1, db.DB_REP_CLIENT);
+
+ self.assertEqual(self.dbenvMaster.rep_get_nsites(),2)
+ self.assertEqual(self.dbenvClient.rep_get_nsites(),2)
+ self.assertEqual(self.dbenvMaster.rep_get_priority(),10)
+ self.assertEqual(self.dbenvClient.rep_get_priority(),0)
+ self.assertEqual(self.dbenvMaster.repmgr_get_ack_policy(),
+ db.DB_REPMGR_ACKS_ALL)
+ self.assertEqual(self.dbenvClient.repmgr_get_ack_policy(),
+ db.DB_REPMGR_ACKS_ALL)
+
+ # The timeout is necessary in BDB 4.5, since DB_EVENT_REP_STARTUPDONE
+ # is not generated if the master has no new transactions.
+ # This is solved in BDB 4.6 (#15542).
+ import time
+ timeout = time.time()+60
+ while (time.time()<timeout) and not (self.confirmed_master and self.client_startupdone) :
+ time.sleep(0.02)
+ # self.client_startupdone does not always get set to True within
+ # the timeout. On windows this may be a deep issue, on other
+ # platforms it is likely just a timing issue, especially on slow
+ # virthost buildbots (see issue 3892 for more). Even though
+ # the timeout triggers, the rest of this test method usually passes
+ # (but not all of it always, see below). So we just note the
+ # timeout on stderr and keep soldering on.
+ if time.time()>timeout:
+ import sys
+ print >> sys.stderr, ("XXX: timeout happened before"
+ "startup was confirmed - see issue 3892")
+ startup_timeout = True
+
+ d = self.dbenvMaster.repmgr_site_list()
+ self.assertEqual(len(d), 1)
+ d = d.values()[0] # There is only one
+ self.assertEqual(d[0], "127.0.0.1")
+ self.assertEqual(d[1], client_port)
+ self.assertTrue((d[2]==db.DB_REPMGR_CONNECTED) or \
+ (d[2]==db.DB_REPMGR_DISCONNECTED))
+
+ d = self.dbenvClient.repmgr_site_list()
+ self.assertEqual(len(d), 1)
+ d = d.values()[0] # There is only one
+ self.assertEqual(d[0], "127.0.0.1")
+ self.assertEqual(d[1], master_port)
+ self.assertTrue((d[2]==db.DB_REPMGR_CONNECTED) or \
+ (d[2]==db.DB_REPMGR_DISCONNECTED))
+
+ if db.version() >= (4,6) :
+ d = self.dbenvMaster.repmgr_stat(flags=db.DB_STAT_CLEAR);
+ self.assertTrue("msgs_queued" in d)
+
+ self.dbMaster=db.DB(self.dbenvMaster)
+ txn=self.dbenvMaster.txn_begin()
+ self.dbMaster.open("test", db.DB_HASH, db.DB_CREATE, 0666, txn=txn)
+ txn.commit()
+
+ import time,os.path
+ timeout=time.time()+10
+ while (time.time()<timeout) and \
+ not (os.path.exists(os.path.join(self.homeDirClient,"test"))) :
+ time.sleep(0.01)
+
+ self.dbClient=db.DB(self.dbenvClient)
+ while True :
+ txn=self.dbenvClient.txn_begin()
+ try :
+ self.dbClient.open("test", db.DB_HASH, flags=db.DB_RDONLY,
+ mode=0666, txn=txn)
+ except db.DBRepHandleDeadError :
+ txn.abort()
+ self.dbClient.close()
+ self.dbClient=db.DB(self.dbenvClient)
+ continue
+
+ txn.commit()
+ break
+
+ txn=self.dbenvMaster.txn_begin()
+ self.dbMaster.put("ABC", "123", txn=txn)
+ txn.commit()
+ import time
+ timeout=time.time()+10
+ v=None
+ while (time.time()<timeout) and (v is None) :
+ txn=self.dbenvClient.txn_begin()
+ v=self.dbClient.get("ABC", txn=txn)
+ txn.commit()
+ if v is None :
+ time.sleep(0.02)
+ # If startup did not happen before the timeout above, then this test
+ # sometimes fails. This happens randomly, which causes buildbot
+ # instability, but all the other bsddb tests pass. Since bsddb3 in the
+ # stdlib is currently not getting active maintenance, and is gone in
+ # py3k, we just skip the end of the test in that case.
+ if time.time()>=timeout and startup_timeout:
+ self.skipTest("replication test skipped due to random failure, "
+ "see issue 3892")
+ self.assertTrue(time.time()<timeout)
+ self.assertEqual("123", v)
+
+ txn=self.dbenvMaster.txn_begin()
+ self.dbMaster.delete("ABC", txn=txn)
+ txn.commit()
+ timeout=time.time()+10
+ while (time.time()<timeout) and (v is not None) :
+ txn=self.dbenvClient.txn_begin()
+ v=self.dbClient.get("ABC", txn=txn)
+ txn.commit()
+ if v is None :
+ time.sleep(0.02)
+ self.assertTrue(time.time()<timeout)
+ self.assertEqual(None, v)
+
+class DBBaseReplication(DBReplication) :
+ def setUp(self) :
+ DBReplication.setUp(self)
+ def confirmed_master(a,b,c) :
+ if (b == db.DB_EVENT_REP_MASTER) or (b == db.DB_EVENT_REP_ELECTED) :
+ self.confirmed_master = True
+
+ def client_startupdone(a,b,c) :
+ if b == db.DB_EVENT_REP_STARTUPDONE :
+ self.client_startupdone = True
+
+ self.dbenvMaster.set_event_notify(confirmed_master)
+ self.dbenvClient.set_event_notify(client_startupdone)
+
+ import Queue
+ self.m2c = Queue.Queue()
+ self.c2m = Queue.Queue()
+
+ # There are only two nodes, so we don't need to
+ # do any routing decision
+ def m2c(dbenv, control, rec, lsnp, envid, flags) :
+ self.m2c.put((control, rec))
+
+ def c2m(dbenv, control, rec, lsnp, envid, flags) :
+ self.c2m.put((control, rec))
+
+ self.dbenvMaster.rep_set_transport(13,m2c)
+ self.dbenvMaster.rep_set_priority(10)
+ self.dbenvClient.rep_set_transport(3,c2m)
+ self.dbenvClient.rep_set_priority(0)
+
+ self.assertEqual(self.dbenvMaster.rep_get_priority(),10)
+ self.assertEqual(self.dbenvClient.rep_get_priority(),0)
+
+ #self.dbenvMaster.set_verbose(db.DB_VERB_REPLICATION, True)
+ #self.dbenvMaster.set_verbose(db.DB_VERB_FILEOPS_ALL, True)
+ #self.dbenvClient.set_verbose(db.DB_VERB_REPLICATION, True)
+ #self.dbenvClient.set_verbose(db.DB_VERB_FILEOPS_ALL, True)
+
+ def thread_master() :
+ return self.thread_do(self.dbenvMaster, self.c2m, 3,
+ self.master_doing_election, True)
+
+ def thread_client() :
+ return self.thread_do(self.dbenvClient, self.m2c, 13,
+ self.client_doing_election, False)
+
+ from threading import Thread
+ t_m=Thread(target=thread_master)
+ t_c=Thread(target=thread_client)
+ import sys
+ if sys.version_info[0] < 3 :
+ t_m.setDaemon(True)
+ t_c.setDaemon(True)
+ else :
+ t_m.daemon = True
+ t_c.daemon = True
+
+ self.t_m = t_m
+ self.t_c = t_c
+
+ self.dbMaster = self.dbClient = None
+
+ self.master_doing_election=[False]
+ self.client_doing_election=[False]
+
+
+ def tearDown(self):
+ if self.dbClient :
+ self.dbClient.close()
+ if self.dbMaster :
+ self.dbMaster.close()
+ self.m2c.put(None)
+ self.c2m.put(None)
+ self.t_m.join()
+ self.t_c.join()
+
+ # Here we assign dummy event handlers to allow GC of the test object.
+ # Since the dummy handler doesn't use any outer scope variable, it
+ # doesn't keep any reference to the test object.
+ def dummy(*args) :
+ pass
+ self.dbenvMaster.set_event_notify(dummy)
+ self.dbenvClient.set_event_notify(dummy)
+ self.dbenvMaster.rep_set_transport(13,dummy)
+ self.dbenvClient.rep_set_transport(3,dummy)
+
+ self.dbenvClient.close()
+ self.dbenvMaster.close()
+ test_support.rmtree(self.homeDirClient)
+ test_support.rmtree(self.homeDirMaster)
+
+ def basic_rep_threading(self) :
+ self.dbenvMaster.rep_start(flags=db.DB_REP_MASTER)
+ self.dbenvClient.rep_start(flags=db.DB_REP_CLIENT)
+
+ def thread_do(env, q, envid, election_status, must_be_master) :
+ while True :
+ v=q.get()
+ if v is None : return
+ env.rep_process_message(v[0], v[1], envid)
+
+ self.thread_do = thread_do
+
+ self.t_m.start()
+ self.t_c.start()
+
+ def test01_basic_replication(self) :
+ self.basic_rep_threading()
+
+ # The timeout is necessary in BDB 4.5, since DB_EVENT_REP_STARTUPDONE
+ # is not generated if the master has no new transactions.
+ # This is solved in BDB 4.6 (#15542).
+ import time
+ timeout = time.time()+60
+ while (time.time()<timeout) and not (self.confirmed_master and
+ self.client_startupdone) :
+ time.sleep(0.02)
+ self.assertTrue(time.time()<timeout)
+
+ self.dbMaster=db.DB(self.dbenvMaster)
+ txn=self.dbenvMaster.txn_begin()
+ self.dbMaster.open("test", db.DB_HASH, db.DB_CREATE, 0666, txn=txn)
+ txn.commit()
+
+ import time,os.path
+ timeout=time.time()+10
+ while (time.time()<timeout) and \
+ not (os.path.exists(os.path.join(self.homeDirClient,"test"))) :
+ time.sleep(0.01)
+
+ self.dbClient=db.DB(self.dbenvClient)
+ while True :
+ txn=self.dbenvClient.txn_begin()
+ try :
+ self.dbClient.open("test", db.DB_HASH, flags=db.DB_RDONLY,
+ mode=0666, txn=txn)
+ except db.DBRepHandleDeadError :
+ txn.abort()
+ self.dbClient.close()
+ self.dbClient=db.DB(self.dbenvClient)
+ continue
+
+ txn.commit()
+ break
+
+ d = self.dbenvMaster.rep_stat(flags=db.DB_STAT_CLEAR);
+ self.assertTrue("master_changes" in d)
+
+ txn=self.dbenvMaster.txn_begin()
+ self.dbMaster.put("ABC", "123", txn=txn)
+ txn.commit()
+ import time
+ timeout=time.time()+10
+ v=None
+ while (time.time()<timeout) and (v is None) :
+ txn=self.dbenvClient.txn_begin()
+ v=self.dbClient.get("ABC", txn=txn)
+ txn.commit()
+ if v is None :
+ time.sleep(0.02)
+ self.assertTrue(time.time()<timeout)
+ self.assertEqual("123", v)
+
+ txn=self.dbenvMaster.txn_begin()
+ self.dbMaster.delete("ABC", txn=txn)
+ txn.commit()
+ timeout=time.time()+10
+ while (time.time()<timeout) and (v is not None) :
+ txn=self.dbenvClient.txn_begin()
+ v=self.dbClient.get("ABC", txn=txn)
+ txn.commit()
+ if v is None :
+ time.sleep(0.02)
+ self.assertTrue(time.time()<timeout)
+ self.assertEqual(None, v)
+
+ if db.version() >= (4,7) :
+ def test02_test_request(self) :
+ self.basic_rep_threading()
+ (minimum, maximum) = self.dbenvClient.rep_get_request()
+ self.dbenvClient.rep_set_request(minimum-1, maximum+1)
+ self.assertEqual(self.dbenvClient.rep_get_request(),
+ (minimum-1, maximum+1))
+
+ if db.version() >= (4,6) :
+ def test03_master_election(self) :
+ # Get ready to hold an election
+ #self.dbenvMaster.rep_start(flags=db.DB_REP_MASTER)
+ self.dbenvMaster.rep_start(flags=db.DB_REP_CLIENT)
+ self.dbenvClient.rep_start(flags=db.DB_REP_CLIENT)
+
+ def thread_do(env, q, envid, election_status, must_be_master) :
+ while True :
+ v=q.get()
+ if v is None : return
+ r = env.rep_process_message(v[0],v[1],envid)
+ if must_be_master and self.confirmed_master :
+ self.dbenvMaster.rep_start(flags = db.DB_REP_MASTER)
+ must_be_master = False
+
+ if r[0] == db.DB_REP_HOLDELECTION :
+ def elect() :
+ while True :
+ try :
+ env.rep_elect(2, 1)
+ election_status[0] = False
+ break
+ except db.DBRepUnavailError :
+ pass
+ if not election_status[0] and not self.confirmed_master :
+ from threading import Thread
+ election_status[0] = True
+ t=Thread(target=elect)
+ import sys
+ if sys.version_info[0] < 3 :
+ t.setDaemon(True)
+ else :
+ t.daemon = True
+ t.start()
+
+ self.thread_do = thread_do
+
+ self.t_m.start()
+ self.t_c.start()
+
+ self.dbenvMaster.rep_set_timeout(db.DB_REP_ELECTION_TIMEOUT, 50000)
+ self.dbenvClient.rep_set_timeout(db.DB_REP_ELECTION_TIMEOUT, 50000)
+ self.client_doing_election[0] = True
+ while True :
+ try :
+ self.dbenvClient.rep_elect(2, 1)
+ self.client_doing_election[0] = False
+ break
+ except db.DBRepUnavailError :
+ pass
+
+ self.assertTrue(self.confirmed_master)
+
+ # Race condition showed up after upgrading to Solaris 10 Update 10
+ # https://forums.oracle.com/forums/thread.jspa?messageID=9902860
+ # jcea@jcea.es: See private email from Paula Bingham (Oracle),
+ # in 20110929.
+ while not (self.dbenvClient.rep_stat()["startup_complete"]) :
+ pass
+
+ if db.version() >= (4,7) :
+ def test04_test_clockskew(self) :
+ fast, slow = 1234, 1230
+ self.dbenvMaster.rep_set_clockskew(fast, slow)
+ self.assertEqual((fast, slow),
+ self.dbenvMaster.rep_get_clockskew())
+ self.basic_rep_threading()
+
+#----------------------------------------------------------------------
+
+def test_suite():
+ suite = unittest.TestSuite()
+ if db.version() >= (4, 6) :
+ dbenv = db.DBEnv()
+ try :
+ dbenv.repmgr_get_ack_policy()
+ ReplicationManager_available=True
+ except :
+ ReplicationManager_available=False
+ dbenv.close()
+ del dbenv
+ if ReplicationManager_available :
+ suite.addTest(unittest.makeSuite(DBReplicationManager))
+
+ if have_threads :
+ suite.addTest(unittest.makeSuite(DBBaseReplication))
+
+ return suite
+
+
+if __name__ == '__main__':
+ unittest.main(defaultTest='test_suite')
diff --git a/lib/python2.7/bsddb/test/test_sequence.py b/lib/python2.7/bsddb/test/test_sequence.py
new file mode 100644
index 0000000..f0aa12a
--- /dev/null
+++ b/lib/python2.7/bsddb/test/test_sequence.py
@@ -0,0 +1,136 @@
+import unittest
+import os
+
+from test_all import db, test_support, get_new_environment_path, get_new_database_path
+
+
+class DBSequenceTest(unittest.TestCase):
+ def setUp(self):
+ self.int_32_max = 0x100000000
+ self.homeDir = get_new_environment_path()
+ self.filename = "test"
+
+ self.dbenv = db.DBEnv()
+ self.dbenv.open(self.homeDir, db.DB_CREATE | db.DB_INIT_MPOOL, 0666)
+ self.d = db.DB(self.dbenv)
+ self.d.open(self.filename, db.DB_BTREE, db.DB_CREATE, 0666)
+
+ def tearDown(self):
+ if hasattr(self, 'seq'):
+ self.seq.close()
+ del self.seq
+ if hasattr(self, 'd'):
+ self.d.close()
+ del self.d
+ if hasattr(self, 'dbenv'):
+ self.dbenv.close()
+ del self.dbenv
+
+ test_support.rmtree(self.homeDir)
+
+ def test_get(self):
+ self.seq = db.DBSequence(self.d, flags=0)
+ start_value = 10 * self.int_32_max
+ self.assertEqual(0xA00000000, start_value)
+ self.assertEqual(None, self.seq.initial_value(start_value))
+ self.assertEqual(None, self.seq.open(key='id', txn=None, flags=db.DB_CREATE))
+ self.assertEqual(start_value, self.seq.get(5))
+ self.assertEqual(start_value + 5, self.seq.get())
+
+ def test_remove(self):
+ self.seq = db.DBSequence(self.d, flags=0)
+ self.assertEqual(None, self.seq.open(key='foo', txn=None, flags=db.DB_CREATE))
+ self.assertEqual(None, self.seq.remove(txn=None, flags=0))
+ del self.seq
+
+ def test_get_key(self):
+ self.seq = db.DBSequence(self.d, flags=0)
+ key = 'foo'
+ self.assertEqual(None, self.seq.open(key=key, txn=None, flags=db.DB_CREATE))
+ self.assertEqual(key, self.seq.get_key())
+
+ def test_get_dbp(self):
+ self.seq = db.DBSequence(self.d, flags=0)
+ self.assertEqual(None, self.seq.open(key='foo', txn=None, flags=db.DB_CREATE))
+ self.assertEqual(self.d, self.seq.get_dbp())
+
+ def test_cachesize(self):
+ self.seq = db.DBSequence(self.d, flags=0)
+ cashe_size = 10
+ self.assertEqual(None, self.seq.set_cachesize(cashe_size))
+ self.assertEqual(None, self.seq.open(key='foo', txn=None, flags=db.DB_CREATE))
+ self.assertEqual(cashe_size, self.seq.get_cachesize())
+
+ def test_flags(self):
+ self.seq = db.DBSequence(self.d, flags=0)
+ flag = db.DB_SEQ_WRAP;
+ self.assertEqual(None, self.seq.set_flags(flag))
+ self.assertEqual(None, self.seq.open(key='foo', txn=None, flags=db.DB_CREATE))
+ self.assertEqual(flag, self.seq.get_flags() & flag)
+
+ def test_range(self):
+ self.seq = db.DBSequence(self.d, flags=0)
+ seq_range = (10 * self.int_32_max, 11 * self.int_32_max - 1)
+ self.assertEqual(None, self.seq.set_range(seq_range))
+ self.seq.initial_value(seq_range[0])
+ self.assertEqual(None, self.seq.open(key='foo', txn=None, flags=db.DB_CREATE))
+ self.assertEqual(seq_range, self.seq.get_range())
+
+ def test_stat(self):
+ self.seq = db.DBSequence(self.d, flags=0)
+ self.assertEqual(None, self.seq.open(key='foo', txn=None, flags=db.DB_CREATE))
+ stat = self.seq.stat()
+ for param in ('nowait', 'min', 'max', 'value', 'current',
+ 'flags', 'cache_size', 'last_value', 'wait'):
+ self.assertTrue(param in stat, "parameter %s isn't in stat info" % param)
+
+ if db.version() >= (4,7) :
+ # This code checks a crash solved in Berkeley DB 4.7
+ def test_stat_crash(self) :
+ d=db.DB()
+ d.open(None,dbtype=db.DB_HASH,flags=db.DB_CREATE) # In RAM
+ seq = db.DBSequence(d, flags=0)
+
+ self.assertRaises(db.DBNotFoundError, seq.open,
+ key='id', txn=None, flags=0)
+
+ self.assertRaises(db.DBInvalidArgError, seq.stat)
+
+ d.close()
+
+ def test_64bits(self) :
+ # We don't use both extremes because they are problematic
+ value_plus=(1L<<63)-2
+ self.assertEqual(9223372036854775806L,value_plus)
+ value_minus=(-1L<<63)+1 # Two complement
+ self.assertEqual(-9223372036854775807L,value_minus)
+ self.seq = db.DBSequence(self.d, flags=0)
+ self.assertEqual(None, self.seq.initial_value(value_plus-1))
+ self.assertEqual(None, self.seq.open(key='id', txn=None,
+ flags=db.DB_CREATE))
+ self.assertEqual(value_plus-1, self.seq.get(1))
+ self.assertEqual(value_plus, self.seq.get(1))
+
+ self.seq.remove(txn=None, flags=0)
+
+ self.seq = db.DBSequence(self.d, flags=0)
+ self.assertEqual(None, self.seq.initial_value(value_minus))
+ self.assertEqual(None, self.seq.open(key='id', txn=None,
+ flags=db.DB_CREATE))
+ self.assertEqual(value_minus, self.seq.get(1))
+ self.assertEqual(value_minus+1, self.seq.get(1))
+
+ def test_multiple_close(self):
+ self.seq = db.DBSequence(self.d)
+ self.seq.close() # You can close a Sequence multiple times
+ self.seq.close()
+ self.seq.close()
+
+def test_suite():
+ suite = unittest.TestSuite()
+ suite.addTest(unittest.makeSuite(DBSequenceTest))
+ return suite
+
+
+if __name__ == '__main__':
+ unittest.main(defaultTest='test_suite')
diff --git a/lib/python2.7/bsddb/test/test_thread.py b/lib/python2.7/bsddb/test/test_thread.py
new file mode 100644
index 0000000..42212e9
--- /dev/null
+++ b/lib/python2.7/bsddb/test/test_thread.py
@@ -0,0 +1,517 @@
+"""TestCases for multi-threaded access to a DB.
+"""
+
+import os
+import sys
+import time
+import errno
+from random import random
+
+DASH = '-'
+
+try:
+ WindowsError
+except NameError:
+ class WindowsError(Exception):
+ pass
+
+import unittest
+from test_all import db, dbutils, test_support, verbose, have_threads, \
+ get_new_environment_path, get_new_database_path
+
+if have_threads :
+ from threading import Thread
+ if sys.version_info[0] < 3 :
+ from threading import currentThread
+ else :
+ from threading import current_thread as currentThread
+
+
+#----------------------------------------------------------------------
+
+class BaseThreadedTestCase(unittest.TestCase):
+ dbtype = db.DB_UNKNOWN # must be set in derived class
+ dbopenflags = 0
+ dbsetflags = 0
+ envflags = 0
+
+ def setUp(self):
+ if verbose:
+ dbutils._deadlock_VerboseFile = sys.stdout
+
+ self.homeDir = get_new_environment_path()
+ self.env = db.DBEnv()
+ self.setEnvOpts()
+ self.env.open(self.homeDir, self.envflags | db.DB_CREATE)
+
+ self.filename = self.__class__.__name__ + '.db'
+ self.d = db.DB(self.env)
+ if self.dbsetflags:
+ self.d.set_flags(self.dbsetflags)
+ self.d.open(self.filename, self.dbtype, self.dbopenflags|db.DB_CREATE)
+
+ def tearDown(self):
+ self.d.close()
+ self.env.close()
+ test_support.rmtree(self.homeDir)
+
+ def setEnvOpts(self):
+ pass
+
+ def makeData(self, key):
+ return DASH.join([key] * 5)
+
+
+#----------------------------------------------------------------------
+
+
+class ConcurrentDataStoreBase(BaseThreadedTestCase):
+ dbopenflags = db.DB_THREAD
+ envflags = db.DB_THREAD | db.DB_INIT_CDB | db.DB_INIT_MPOOL
+ readers = 0 # derived class should set
+ writers = 0
+ records = 1000
+
+ def test01_1WriterMultiReaders(self):
+ if verbose:
+ print '\n', '-=' * 30
+ print "Running %s.test01_1WriterMultiReaders..." % \
+ self.__class__.__name__
+
+ keys=range(self.records)
+ import random
+ random.shuffle(keys)
+ records_per_writer=self.records//self.writers
+ readers_per_writer=self.readers//self.writers
+ self.assertEqual(self.records,self.writers*records_per_writer)
+ self.assertEqual(self.readers,self.writers*readers_per_writer)
+ self.assertTrue((records_per_writer%readers_per_writer)==0)
+ readers = []
+
+ for x in xrange(self.readers):
+ rt = Thread(target = self.readerThread,
+ args = (self.d, x),
+ name = 'reader %d' % x,
+ )#verbose = verbose)
+ if sys.version_info[0] < 3 :
+ rt.setDaemon(True)
+ else :
+ rt.daemon = True
+ readers.append(rt)
+
+ writers=[]
+ for x in xrange(self.writers):
+ a=keys[records_per_writer*x:records_per_writer*(x+1)]
+ a.sort() # Generate conflicts
+ b=readers[readers_per_writer*x:readers_per_writer*(x+1)]
+ wt = Thread(target = self.writerThread,
+ args = (self.d, a, b),
+ name = 'writer %d' % x,
+ )#verbose = verbose)
+ writers.append(wt)
+
+ for t in writers:
+ if sys.version_info[0] < 3 :
+ t.setDaemon(True)
+ else :
+ t.daemon = True
+ t.start()
+
+ for t in writers:
+ t.join()
+ for t in readers:
+ t.join()
+
+ def writerThread(self, d, keys, readers):
+ if sys.version_info[0] < 3 :
+ name = currentThread().getName()
+ else :
+ name = currentThread().name
+
+ if verbose:
+ print "%s: creating records %d - %d" % (name, start, stop)
+
+ count=len(keys)//len(readers)
+ count2=count
+ for x in keys :
+ key = '%04d' % x
+ dbutils.DeadlockWrap(d.put, key, self.makeData(key),
+ max_retries=12)
+ if verbose and x % 100 == 0:
+ print "%s: records %d - %d finished" % (name, start, x)
+
+ count2-=1
+ if not count2 :
+ readers.pop().start()
+ count2=count
+
+ if verbose:
+ print "%s: finished creating records" % name
+
+ if verbose:
+ print "%s: thread finished" % name
+
+ def readerThread(self, d, readerNum):
+ if sys.version_info[0] < 3 :
+ name = currentThread().getName()
+ else :
+ name = currentThread().name
+
+ for i in xrange(5) :
+ c = d.cursor()
+ count = 0
+ rec = c.first()
+ while rec:
+ count += 1
+ key, data = rec
+ self.assertEqual(self.makeData(key), data)
+ rec = c.next()
+ if verbose:
+ print "%s: found %d records" % (name, count)
+ c.close()
+
+ if verbose:
+ print "%s: thread finished" % name
+
+
+class BTreeConcurrentDataStore(ConcurrentDataStoreBase):
+ dbtype = db.DB_BTREE
+ writers = 2
+ readers = 10
+ records = 1000
+
+
+class HashConcurrentDataStore(ConcurrentDataStoreBase):
+ dbtype = db.DB_HASH
+ writers = 2
+ readers = 10
+ records = 1000
+
+
+#----------------------------------------------------------------------
+
+class SimpleThreadedBase(BaseThreadedTestCase):
+ dbopenflags = db.DB_THREAD
+ envflags = db.DB_THREAD | db.DB_INIT_MPOOL | db.DB_INIT_LOCK
+ readers = 10
+ writers = 2
+ records = 1000
+
+ def setEnvOpts(self):
+ self.env.set_lk_detect(db.DB_LOCK_DEFAULT)
+
+ def test02_SimpleLocks(self):
+ if verbose:
+ print '\n', '-=' * 30
+ print "Running %s.test02_SimpleLocks..." % self.__class__.__name__
+
+
+ keys=range(self.records)
+ import random
+ random.shuffle(keys)
+ records_per_writer=self.records//self.writers
+ readers_per_writer=self.readers//self.writers
+ self.assertEqual(self.records,self.writers*records_per_writer)
+ self.assertEqual(self.readers,self.writers*readers_per_writer)
+ self.assertTrue((records_per_writer%readers_per_writer)==0)
+
+ readers = []
+ for x in xrange(self.readers):
+ rt = Thread(target = self.readerThread,
+ args = (self.d, x),
+ name = 'reader %d' % x,
+ )#verbose = verbose)
+ if sys.version_info[0] < 3 :
+ rt.setDaemon(True)
+ else :
+ rt.daemon = True
+ readers.append(rt)
+
+ writers = []
+ for x in xrange(self.writers):
+ a=keys[records_per_writer*x:records_per_writer*(x+1)]
+ a.sort() # Generate conflicts
+ b=readers[readers_per_writer*x:readers_per_writer*(x+1)]
+ wt = Thread(target = self.writerThread,
+ args = (self.d, a, b),
+ name = 'writer %d' % x,
+ )#verbose = verbose)
+ writers.append(wt)
+
+ for t in writers:
+ if sys.version_info[0] < 3 :
+ t.setDaemon(True)
+ else :
+ t.daemon = True
+ t.start()
+
+ for t in writers:
+ t.join()
+ for t in readers:
+ t.join()
+
+ def writerThread(self, d, keys, readers):
+ if sys.version_info[0] < 3 :
+ name = currentThread().getName()
+ else :
+ name = currentThread().name
+ if verbose:
+ print "%s: creating records %d - %d" % (name, start, stop)
+
+ count=len(keys)//len(readers)
+ count2=count
+ for x in keys :
+ key = '%04d' % x
+ dbutils.DeadlockWrap(d.put, key, self.makeData(key),
+ max_retries=12)
+
+ if verbose and x % 100 == 0:
+ print "%s: records %d - %d finished" % (name, start, x)
+
+ count2-=1
+ if not count2 :
+ readers.pop().start()
+ count2=count
+
+ if verbose:
+ print "%s: thread finished" % name
+
+ def readerThread(self, d, readerNum):
+ if sys.version_info[0] < 3 :
+ name = currentThread().getName()
+ else :
+ name = currentThread().name
+
+ c = d.cursor()
+ count = 0
+ rec = dbutils.DeadlockWrap(c.first, max_retries=10)
+ while rec:
+ count += 1
+ key, data = rec
+ self.assertEqual(self.makeData(key), data)
+ rec = dbutils.DeadlockWrap(c.next, max_retries=10)
+ if verbose:
+ print "%s: found %d records" % (name, count)
+ c.close()
+
+ if verbose:
+ print "%s: thread finished" % name
+
+
+class BTreeSimpleThreaded(SimpleThreadedBase):
+ dbtype = db.DB_BTREE
+
+
+class HashSimpleThreaded(SimpleThreadedBase):
+ dbtype = db.DB_HASH
+
+
+#----------------------------------------------------------------------
+
+
+class ThreadedTransactionsBase(BaseThreadedTestCase):
+ dbopenflags = db.DB_THREAD | db.DB_AUTO_COMMIT
+ envflags = (db.DB_THREAD |
+ db.DB_INIT_MPOOL |
+ db.DB_INIT_LOCK |
+ db.DB_INIT_LOG |
+ db.DB_INIT_TXN
+ )
+ readers = 0
+ writers = 0
+ records = 2000
+ txnFlag = 0
+
+ def setEnvOpts(self):
+ #self.env.set_lk_detect(db.DB_LOCK_DEFAULT)
+ pass
+
+ def test03_ThreadedTransactions(self):
+ if verbose:
+ print '\n', '-=' * 30
+ print "Running %s.test03_ThreadedTransactions..." % \
+ self.__class__.__name__
+
+ keys=range(self.records)
+ import random
+ random.shuffle(keys)
+ records_per_writer=self.records//self.writers
+ readers_per_writer=self.readers//self.writers
+ self.assertEqual(self.records,self.writers*records_per_writer)
+ self.assertEqual(self.readers,self.writers*readers_per_writer)
+ self.assertTrue((records_per_writer%readers_per_writer)==0)
+
+ readers=[]
+ for x in xrange(self.readers):
+ rt = Thread(target = self.readerThread,
+ args = (self.d, x),
+ name = 'reader %d' % x,
+ )#verbose = verbose)
+ if sys.version_info[0] < 3 :
+ rt.setDaemon(True)
+ else :
+ rt.daemon = True
+ readers.append(rt)
+
+ writers = []
+ for x in xrange(self.writers):
+ a=keys[records_per_writer*x:records_per_writer*(x+1)]
+ b=readers[readers_per_writer*x:readers_per_writer*(x+1)]
+ wt = Thread(target = self.writerThread,
+ args = (self.d, a, b),
+ name = 'writer %d' % x,
+ )#verbose = verbose)
+ writers.append(wt)
+
+ dt = Thread(target = self.deadlockThread)
+ if sys.version_info[0] < 3 :
+ dt.setDaemon(True)
+ else :
+ dt.daemon = True
+ dt.start()
+
+ for t in writers:
+ if sys.version_info[0] < 3 :
+ t.setDaemon(True)
+ else :
+ t.daemon = True
+ t.start()
+
+ for t in writers:
+ t.join()
+ for t in readers:
+ t.join()
+
+ self.doLockDetect = False
+ dt.join()
+
+ def writerThread(self, d, keys, readers):
+ if sys.version_info[0] < 3 :
+ name = currentThread().getName()
+ else :
+ name = currentThread().name
+
+ count=len(keys)//len(readers)
+ while len(keys):
+ try:
+ txn = self.env.txn_begin(None, self.txnFlag)
+ keys2=keys[:count]
+ for x in keys2 :
+ key = '%04d' % x
+ d.put(key, self.makeData(key), txn)
+ if verbose and x % 100 == 0:
+ print "%s: records %d - %d finished" % (name, start, x)
+ txn.commit()
+ keys=keys[count:]
+ readers.pop().start()
+ except (db.DBLockDeadlockError, db.DBLockNotGrantedError), val:
+ if verbose:
+ if sys.version_info < (2, 6) :
+ print "%s: Aborting transaction (%s)" % (name, val[1])
+ else :
+ print "%s: Aborting transaction (%s)" % (name,
+ val.args[1])
+ txn.abort()
+
+ if verbose:
+ print "%s: thread finished" % name
+
+ def readerThread(self, d, readerNum):
+ if sys.version_info[0] < 3 :
+ name = currentThread().getName()
+ else :
+ name = currentThread().name
+
+ finished = False
+ while not finished:
+ try:
+ txn = self.env.txn_begin(None, self.txnFlag)
+ c = d.cursor(txn)
+ count = 0
+ rec = c.first()
+ while rec:
+ count += 1
+ key, data = rec
+ self.assertEqual(self.makeData(key), data)
+ rec = c.next()
+ if verbose: print "%s: found %d records" % (name, count)
+ c.close()
+ txn.commit()
+ finished = True
+ except (db.DBLockDeadlockError, db.DBLockNotGrantedError), val:
+ if verbose:
+ if sys.version_info < (2, 6) :
+ print "%s: Aborting transaction (%s)" % (name, val[1])
+ else :
+ print "%s: Aborting transaction (%s)" % (name,
+ val.args[1])
+ c.close()
+ txn.abort()
+
+ if verbose:
+ print "%s: thread finished" % name
+
+ def deadlockThread(self):
+ self.doLockDetect = True
+ while self.doLockDetect:
+ time.sleep(0.05)
+ try:
+ aborted = self.env.lock_detect(
+ db.DB_LOCK_RANDOM, db.DB_LOCK_CONFLICT)
+ if verbose and aborted:
+ print "deadlock: Aborted %d deadlocked transaction(s)" \
+ % aborted
+ except db.DBError:
+ pass
+
+
+class BTreeThreadedTransactions(ThreadedTransactionsBase):
+ dbtype = db.DB_BTREE
+ writers = 2
+ readers = 10
+ records = 1000
+
+class HashThreadedTransactions(ThreadedTransactionsBase):
+ dbtype = db.DB_HASH
+ writers = 2
+ readers = 10
+ records = 1000
+
+class BTreeThreadedNoWaitTransactions(ThreadedTransactionsBase):
+ dbtype = db.DB_BTREE
+ writers = 2
+ readers = 10
+ records = 1000
+ txnFlag = db.DB_TXN_NOWAIT
+
+class HashThreadedNoWaitTransactions(ThreadedTransactionsBase):
+ dbtype = db.DB_HASH
+ writers = 2
+ readers = 10
+ records = 1000
+ txnFlag = db.DB_TXN_NOWAIT
+
+
+#----------------------------------------------------------------------
+
+def test_suite():
+ suite = unittest.TestSuite()
+
+ if have_threads:
+ suite.addTest(unittest.makeSuite(BTreeConcurrentDataStore))
+ suite.addTest(unittest.makeSuite(HashConcurrentDataStore))
+ suite.addTest(unittest.makeSuite(BTreeSimpleThreaded))
+ suite.addTest(unittest.makeSuite(HashSimpleThreaded))
+ suite.addTest(unittest.makeSuite(BTreeThreadedTransactions))
+ suite.addTest(unittest.makeSuite(HashThreadedTransactions))
+ suite.addTest(unittest.makeSuite(BTreeThreadedNoWaitTransactions))
+ suite.addTest(unittest.makeSuite(HashThreadedNoWaitTransactions))
+
+ else:
+ print "Threads not available, skipping thread tests."
+
+ return suite
+
+
+if __name__ == '__main__':
+ unittest.main(defaultTest='test_suite')