diff options
| author | Russell Keith-Magee <russell@keith-magee.com> | 2009-12-22 15:18:51 +0000 |
|---|---|---|
| committer | Russell Keith-Magee <russell@keith-magee.com> | 2009-12-22 15:18:51 +0000 |
| commit | ff60c5f9de3e8690d1e86f3e9e3f7248a15397c8 (patch) | |
| tree | a4cb0ebdd55fcaf8c8855231b6ad3e1a7bf45bee /tests/regressiontests | |
| parent | 7ef212af149540aa2da577a960d0d87029fd1514 (diff) | |
Fixed #1142 -- Added multiple database support.
This monster of a patch is the result of Alex Gaynor's 2009 Google Summer of Code project.
Congratulations to Alex for a job well done.
Big thanks also go to:
* Justin Bronn for keeping GIS in line with the changes,
* Karen Tracey and Jani Tiainen for their help testing Oracle support
* Brett Hoerner, Jon Loyens, and Craig Kimmerer for their feedback.
* Malcolm Treddinick for his guidance during the GSoC submission process.
* Simon Willison for driving the original design process
* Cal Henderson for complaining about ponies he wanted.
... and everyone else too numerous to mention that helped to bring this feature into fruition.
git-svn-id: http://code.djangoproject.com/svn/django/trunk@11952 bcc190cf-cafb-0310-a4f2-bffc1f526a37
Diffstat (limited to 'tests/regressiontests')
23 files changed, 931 insertions, 96 deletions
diff --git a/tests/regressiontests/admin_scripts/tests.py b/tests/regressiontests/admin_scripts/tests.py index e67126e9a7..cdc9556afe 100644 --- a/tests/regressiontests/admin_scripts/tests.py +++ b/tests/regressiontests/admin_scripts/tests.py @@ -23,17 +23,15 @@ class AdminScriptTestCase(unittest.TestCase): settings_file = open(os.path.join(test_dir, filename), 'w') settings_file.write('# Settings file automatically generated by regressiontests.admin_scripts test case\n') exports = [ - 'DATABASE_ENGINE', - 'DATABASE_NAME', - 'DATABASE_USER', - 'DATABASE_PASSWORD', - 'DATABASE_HOST', - 'DATABASE_PORT', + 'DATABASES', 'ROOT_URLCONF' ] for s in exports: - if hasattr(settings,s): - settings_file.write("%s = '%s'\n" % (s, str(getattr(settings,s)))) + if hasattr(settings, s): + o = getattr(settings, s) + if not isinstance(o, dict): + o = "'%s'" % o + settings_file.write("%s = %s\n" % (s, o)) if apps is None: apps = ['django.contrib.auth', 'django.contrib.contenttypes', 'admin_scripts'] @@ -63,23 +61,25 @@ class AdminScriptTestCase(unittest.TestCase): except OSError: pass - def _ext_backend_path(self): + def _ext_backend_paths(self): """ - Returns the path for the external backend package, or None if no - external backend is detected. + Returns the paths for any external backend packages. """ + paths = [] first_package_re = re.compile(r'(^[^\.]+)\.') - result = first_package_re.findall(settings.DATABASE_ENGINE) - if result: - backend_pkg = __import__(result[0]) - backend_dir = os.path.dirname(backend_pkg.__file__) - return os.path.dirname(backend_dir) + for backend in settings.DATABASES.values(): + result = first_package_re.findall(backend['ENGINE']) + if result and result != 'django': + backend_pkg = __import__(result[0]) + backend_dir = os.path.dirname(backend_pkg.__file__) + paths.append(os.path.dirname(backend_dir)) + return paths def run_test(self, script, args, settings_file=None, apps=None): test_dir = os.path.dirname(os.path.dirname(__file__)) project_dir = os.path.dirname(test_dir) base_dir = os.path.dirname(project_dir) - ext_backend_base_dir = self._ext_backend_path() + ext_backend_base_dirs = self._ext_backend_paths() # Remember the old environment old_django_settings_module = os.environ.get('DJANGO_SETTINGS_MODULE', None) @@ -97,8 +97,7 @@ class AdminScriptTestCase(unittest.TestCase): elif 'DJANGO_SETTINGS_MODULE' in os.environ: del os.environ['DJANGO_SETTINGS_MODULE'] python_path = [test_dir, base_dir] - if ext_backend_base_dir: - python_path.append(ext_backend_base_dir) + python_path.extend(ext_backend_base_dirs) os.environ[python_path_var_name] = os.pathsep.join(python_path) # Build the command line @@ -523,7 +522,7 @@ class DjangoAdminSettingsDirectory(AdminScriptTestCase): A series of tests for django-admin.py when the settings file is in a directory. (see #9751). """ - + def setUp(self): self.write_settings('settings', is_dir=True) diff --git a/tests/regressiontests/aggregation_regress/models.py b/tests/regressiontests/aggregation_regress/models.py index 4476b86d64..7c51cd17a7 100644 --- a/tests/regressiontests/aggregation_regress/models.py +++ b/tests/regressiontests/aggregation_regress/models.py @@ -1,7 +1,7 @@ # coding: utf-8 import pickle -from django.db import connection, models +from django.db import connection, models, DEFAULT_DB_ALIAS from django.conf import settings try: @@ -97,19 +97,19 @@ __test__ = {'API_TESTS': """ {'pages__sum': 3703} # Annotations get combined with extra select clauses ->>> sorted(Book.objects.all().annotate(mean_auth_age=Avg('authors__age')).extra(select={'manufacture_cost' : 'price * .5'}).get(pk=2).__dict__.items()) +>>> sorted((k,v) for k,v in Book.objects.all().annotate(mean_auth_age=Avg('authors__age')).extra(select={'manufacture_cost' : 'price * .5'}).get(pk=2).__dict__.items() if k != '_state') [('contact_id', 3), ('id', 2), ('isbn', u'067232959'), ('manufacture_cost', ...11.545...), ('mean_auth_age', 45.0), ('name', u'Sams Teach Yourself Django in 24 Hours'), ('pages', 528), ('price', Decimal("23.09")), ('pubdate', datetime.date(2008, 3, 3)), ('publisher_id', 2), ('rating', 3.0)] # Order of the annotate/extra in the query doesn't matter ->>> sorted(Book.objects.all().extra(select={'manufacture_cost' : 'price * .5'}).annotate(mean_auth_age=Avg('authors__age')).get(pk=2).__dict__.items()) +>>> sorted((k,v) for k,v in Book.objects.all().extra(select={'manufacture_cost' : 'price * .5'}).annotate(mean_auth_age=Avg('authors__age')).get(pk=2).__dict__.items()if k != '_state') [('contact_id', 3), ('id', 2), ('isbn', u'067232959'), ('manufacture_cost', ...11.545...), ('mean_auth_age', 45.0), ('name', u'Sams Teach Yourself Django in 24 Hours'), ('pages', 528), ('price', Decimal("23.09")), ('pubdate', datetime.date(2008, 3, 3)), ('publisher_id', 2), ('rating', 3.0)] # Values queries can be combined with annotate and extra ->>> sorted(Book.objects.all().annotate(mean_auth_age=Avg('authors__age')).extra(select={'manufacture_cost' : 'price * .5'}).values().get(pk=2).items()) +>>> sorted((k,v) for k,v in Book.objects.all().annotate(mean_auth_age=Avg('authors__age')).extra(select={'manufacture_cost' : 'price * .5'}).values().get(pk=2).items()if k != '_state') [('contact_id', 3), ('id', 2), ('isbn', u'067232959'), ('manufacture_cost', ...11.545...), ('mean_auth_age', 45.0), ('name', u'Sams Teach Yourself Django in 24 Hours'), ('pages', 528), ('price', Decimal("23.09")), ('pubdate', datetime.date(2008, 3, 3)), ('publisher_id', 2), ('rating', 3.0)] # The order of the (empty) values, annotate and extra clauses doesn't matter ->>> sorted(Book.objects.all().values().annotate(mean_auth_age=Avg('authors__age')).extra(select={'manufacture_cost' : 'price * .5'}).get(pk=2).items()) +>>> sorted((k,v) for k,v in Book.objects.all().values().annotate(mean_auth_age=Avg('authors__age')).extra(select={'manufacture_cost' : 'price * .5'}).get(pk=2).items()if k != '_state') [('contact_id', 3), ('id', 2), ('isbn', u'067232959'), ('manufacture_cost', ...11.545...), ('mean_auth_age', 45.0), ('name', u'Sams Teach Yourself Django in 24 Hours'), ('pages', 528), ('price', Decimal("23.09")), ('pubdate', datetime.date(2008, 3, 3)), ('publisher_id', 2), ('rating', 3.0)] # If the annotation precedes the values clause, it won't be included @@ -250,10 +250,10 @@ FieldError: Cannot resolve keyword 'foo' into field. Choices are: authors, conta >>> out = pickle.dumps(qs) # Then check that the round trip works. ->>> query = qs.query.as_sql()[0] +>>> query = qs.query.get_compiler(qs.db).as_sql()[0] >>> select_fields = qs.query.select_fields >>> query2 = pickle.loads(pickle.dumps(qs)) ->>> query2.query.as_sql()[0] == query +>>> query2.query.get_compiler(query2.db).as_sql()[0] == query True >>> query2.query.select_fields = select_fields @@ -327,7 +327,7 @@ def run_stddev_tests(): Stddev and Variance are not guaranteed to be available for SQLite, and are not available for PostgreSQL before 8.2. """ - if settings.DATABASE_ENGINE == 'sqlite3': + if settings.DATABASES[DEFAULT_DB_ALIAS]['ENGINE'] == 'django.db.backends.sqlite3': return False class StdDevPop(object): @@ -380,5 +380,4 @@ if run_stddev_tests(): >>> Book.objects.aggregate(Variance('price', sample=True)) {'price__variance': 700.53...} - """ diff --git a/tests/regressiontests/backends/tests.py b/tests/regressiontests/backends/tests.py index 628fabf04a..e4fa82403a 100644 --- a/tests/regressiontests/backends/tests.py +++ b/tests/regressiontests/backends/tests.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- # Unit and doctests for specific database backends. import unittest -from django.db import backend, connection +from django.db import backend, connection, DEFAULT_DB_ALIAS from django.db.backends.signals import connection_created from django.conf import settings @@ -10,7 +10,7 @@ class Callproc(unittest.TestCase): def test_dbms_session(self): # If the backend is Oracle, test that we can call a standard # stored procedure through our cursor wrapper. - if settings.DATABASE_ENGINE == 'oracle': + if settings.DATABASES[DEFAULT_DB_ALIAS]['ENGINE'] == 'django.db.backends.oracle': convert_unicode = backend.convert_unicode cursor = connection.cursor() cursor.callproc(convert_unicode('DBMS_SESSION.SET_IDENTIFIER'), @@ -18,13 +18,13 @@ class Callproc(unittest.TestCase): return True else: return True - + class LongString(unittest.TestCase): def test_long_string(self): # If the backend is Oracle, test that we can save a text longer # than 4000 chars and read it properly - if settings.DATABASE_ENGINE == 'oracle': + if settings.DATABASES[DEFAULT_DB_ALIAS]['ENGINE'] == 'django.db.backends.oracle': c = connection.cursor() c.execute('CREATE TABLE ltext ("TEXT" NCLOB)') long_str = ''.join([unicode(x) for x in xrange(4000)]) @@ -64,7 +64,7 @@ __test__ = {'API_TESTS': """ # Unfortunately with sqlite3 the in-memory test database cannot be # closed, and so it cannot be re-opened during testing, and so we # sadly disable this test for now. -if settings.DATABASE_ENGINE != 'sqlite3': +if settings.DATABASES[DEFAULT_DB_ALIAS]['ENGINE'] != 'django.db.backends.sqlite3': __test__['API_TESTS'] += """ >>> connection_created.connect(connection_created_test) >>> connection.close() # Ensure the connection is closed diff --git a/tests/regressiontests/datatypes/models.py b/tests/regressiontests/datatypes/models.py index 0ad809a45d..f6cbc9f0f6 100644 --- a/tests/regressiontests/datatypes/models.py +++ b/tests/regressiontests/datatypes/models.py @@ -3,7 +3,7 @@ This is a basic model to test saving and loading boolean and date-related types, which in the past were problematic for some database backends. """ -from django.db import models +from django.db import models, DEFAULT_DB_ALIAS from django.conf import settings class Donut(models.Model): @@ -93,11 +93,11 @@ u'Outstanding' # Regression test for #8354: the MySQL backend should raise an error if given # a timezone-aware datetime object. -if settings.DATABASE_ENGINE == 'mysql': +if settings.DATABASES[DEFAULT_DB_ALIAS]['ENGINE'] == 'django.db.backends.mysql': __test__['API_TESTS'] += """ ->>> from django.utils import tzinfo ->>> dt = datetime.datetime(2008, 8, 31, 16, 20, tzinfo=tzinfo.FixedOffset(0)) ->>> d = Donut(name='Bear claw', consumed_at=dt) +>>> from django.utils import tzinfo +>>> dt = datetime.datetime(2008, 8, 31, 16, 20, tzinfo=tzinfo.FixedOffset(0)) +>>> d = Donut(name='Bear claw', consumed_at=dt) >>> d.save() Traceback (most recent call last): .... diff --git a/tests/regressiontests/defer_regress/models.py b/tests/regressiontests/defer_regress/models.py index d9e7bc6249..c32f5bb493 100644 --- a/tests/regressiontests/defer_regress/models.py +++ b/tests/regressiontests/defer_regress/models.py @@ -31,6 +31,10 @@ class Leaf(models.Model): def __unicode__(self): return self.name +class ResolveThis(models.Model): + num = models.FloatField() + name = models.CharField(max_length=16) + __test__ = {"regression_tests": """ Deferred fields should really be deferred and not accidentally use the field's default value just because they aren't passed to __init__. @@ -134,11 +138,11 @@ False # Regression for #11936 - loading.get_models should not return deferred models by default. >>> from django.db.models.loading import get_models ->>> sorted(get_models(models.get_app('defer_regress')), key=lambda obj: obj.__class__.__name__) -[<class 'regressiontests.defer_regress.models.Item'>, <class 'regressiontests.defer_regress.models.RelatedItem'>, <class 'regressiontests.defer_regress.models.Child'>, <class 'regressiontests.defer_regress.models.Leaf'>] +>>> sorted(get_models(models.get_app('defer_regress')), key=lambda obj: obj._meta.object_name) +[<class 'regressiontests.defer_regress.models.Child'>, <class 'regressiontests.defer_regress.models.Item'>, <class 'regressiontests.defer_regress.models.Leaf'>, <class 'regressiontests.defer_regress.models.RelatedItem'>, <class 'regressiontests.defer_regress.models.ResolveThis'>] ->>> sorted(get_models(models.get_app('defer_regress'), include_deferred=True), key=lambda obj: obj.__class__.__name__) -[<class 'regressiontests.defer_regress.models.Item'>, <class 'regressiontests.defer_regress.models.RelatedItem'>, <class 'regressiontests.defer_regress.models.Child'>, <class 'regressiontests.defer_regress.models.Leaf'>, <class 'regressiontests.defer_regress.models.Item_Deferred_text_value'>, <class 'regressiontests.defer_regress.models.Item_Deferred_name_other_value_text'>, <class 'regressiontests.defer_regress.models.RelatedItem_Deferred_item_id'>, <class 'regressiontests.defer_regress.models.Leaf_Deferred_second_child_value'>, <class 'regressiontests.defer_regress.models.Leaf_Deferred_name_value'>, <class 'regressiontests.defer_regress.models.Item_Deferred_name'>, <class 'regressiontests.defer_regress.models.Item_Deferred_other_value_text_value'>, <class 'regressiontests.defer_regress.models.Leaf_Deferred_value'>] +>>> sorted(get_models(models.get_app('defer_regress'), include_deferred=True), key=lambda obj: obj._meta.object_name) +[<class 'regressiontests.defer_regress.models.Child'>, <class 'regressiontests.defer_regress.models.Item'>, <class 'regressiontests.defer_regress.models.Item_Deferred_name'>, <class 'regressiontests.defer_regress.models.Item_Deferred_name_other_value_text'>, <class 'regressiontests.defer_regress.models.Item_Deferred_other_value_text_value'>, <class 'regressiontests.defer_regress.models.Item_Deferred_text_value'>, <class 'regressiontests.defer_regress.models.Leaf'>, <class 'regressiontests.defer_regress.models.Leaf_Deferred_name_value'>, <class 'regressiontests.defer_regress.models.Leaf_Deferred_second_child_value'>, <class 'regressiontests.defer_regress.models.Leaf_Deferred_value'>, <class 'regressiontests.defer_regress.models.RelatedItem'>, <class 'regressiontests.defer_regress.models.RelatedItem_Deferred_item_id'>, <class 'regressiontests.defer_regress.models.ResolveThis'>, <class 'regressiontests.defer_regress.models.ResolveThis_Deferred_num'>] """ } diff --git a/tests/regressiontests/defer_regress/tests.py b/tests/regressiontests/defer_regress/tests.py new file mode 100644 index 0000000000..860c7f8e31 --- /dev/null +++ b/tests/regressiontests/defer_regress/tests.py @@ -0,0 +1,9 @@ +from django.test import TestCase +from models import ResolveThis + +class DeferRegressionTest(TestCase): + def test_resolve_columns(self): + rt = ResolveThis.objects.create(num=5.0, name='Foobar') + qs = ResolveThis.objects.defer('num') + self.assertEqual(1, qs.count()) + self.assertEqual('Foobar', qs[0].name) diff --git a/tests/regressiontests/delete_regress/models.py b/tests/regressiontests/delete_regress/models.py index 93cadc58fa..ff561c9d20 100644 --- a/tests/regressiontests/delete_regress/models.py +++ b/tests/regressiontests/delete_regress/models.py @@ -1,5 +1,5 @@ from django.conf import settings -from django.db import models, backend, connection, transaction +from django.db import models, backend, connection, transaction, DEFAULT_DB_ALIAS from django.db.models import sql, query from django.test import TransactionTestCase @@ -8,17 +8,18 @@ class Book(models.Model): # Can't run this test under SQLite, because you can't # get two connections to an in-memory database. -if settings.DATABASE_ENGINE != 'sqlite3': +if settings.DATABASES[DEFAULT_DB_ALIAS]['ENGINE'] != 'django.db.backends.sqlite3': class DeleteLockingTest(TransactionTestCase): def setUp(self): # Create a second connection to the database + conn_settings = settings.DATABASES[DEFAULT_DB_ALIAS] self.conn2 = backend.DatabaseWrapper({ - 'DATABASE_HOST': settings.DATABASE_HOST, - 'DATABASE_NAME': settings.DATABASE_NAME, - 'DATABASE_OPTIONS': settings.DATABASE_OPTIONS, - 'DATABASE_PASSWORD': settings.DATABASE_PASSWORD, - 'DATABASE_PORT': settings.DATABASE_PORT, - 'DATABASE_USER': settings.DATABASE_USER, + 'HOST': conn_settings['HOST'], + 'NAME': conn_settings['NAME'], + 'OPTIONS': conn_settings['OPTIONS'], + 'PASSWORD': conn_settings['PASSWORD'], + 'PORT': conn_settings['PORT'], + 'USER': conn_settings['USER'], 'TIME_ZONE': settings.TIME_ZONE, }) diff --git a/tests/regressiontests/expressions_regress/models.py b/tests/regressiontests/expressions_regress/models.py index 7c9ca61ccc..a517780323 100644 --- a/tests/regressiontests/expressions_regress/models.py +++ b/tests/regressiontests/expressions_regress/models.py @@ -2,7 +2,7 @@ Spanning tests for all the operations that F() expressions can perform. """ from django.conf import settings -from django.db import models +from django.db import models, DEFAULT_DB_ALIAS # # Model for testing arithmetic expressions. @@ -121,7 +121,7 @@ Complex expressions of different connection types are possible. """} # Oracle doesn't support the Bitwise OR operator. -if settings.DATABASE_ENGINE != 'oracle': +if settings.DATABASES[DEFAULT_DB_ALIAS]['ENGINE'] != 'django.db.backends.oracle': __test__['API_TESTS'] += """ >>> _ = Number.objects.filter(pk=n.pk).update(integer=42, float=15.5) diff --git a/tests/regressiontests/extra_regress/models.py b/tests/regressiontests/extra_regress/models.py index 76eb549f81..d4d7cb86e5 100644 --- a/tests/regressiontests/extra_regress/models.py +++ b/tests/regressiontests/extra_regress/models.py @@ -16,11 +16,13 @@ class RevisionableModel(models.Model): def __unicode__(self): return u"%s (%s, %s)" % (self.title, self.id, self.base.id) - def save(self, force_insert=False, force_update=False): - super(RevisionableModel, self).save(force_insert, force_update) + def save(self, *args, **kwargs): + super(RevisionableModel, self).save(*args, **kwargs) if not self.base: self.base = self - super(RevisionableModel, self).save() + kwargs.pop('force_insert', None) + kwargs.pop('force_update', None) + super(RevisionableModel, self).save(*args, **kwargs) def new_revision(self): new_revision = copy.copy(self) diff --git a/tests/regressiontests/fixtures_regress/models.py b/tests/regressiontests/fixtures_regress/models.py index 7aa66a6f6a..4294ffb293 100644 --- a/tests/regressiontests/fixtures_regress/models.py +++ b/tests/regressiontests/fixtures_regress/models.py @@ -1,4 +1,4 @@ -from django.db import models +from django.db import models, DEFAULT_DB_ALIAS from django.contrib.auth.models import User from django.conf import settings import os @@ -35,7 +35,8 @@ class Stuff(models.Model): # Oracle doesn't distinguish between None and the empty string. # This hack makes the test case pass using Oracle. name = self.name - if settings.DATABASE_ENGINE == 'oracle' and name == u'': + if (settings.DATABASES[DEFAULT_DB_ALIAS]['ENGINE'] == 'django.db.backends.oracle' + and name == u''): name = None return unicode(name) + u' is owned by ' + unicode(self.owner) diff --git a/tests/regressiontests/generic_inline_admin/tests.py b/tests/regressiontests/generic_inline_admin/tests.py index 75f3e4aaae..0cf1f4ea69 100644 --- a/tests/regressiontests/generic_inline_admin/tests.py +++ b/tests/regressiontests/generic_inline_admin/tests.py @@ -107,7 +107,6 @@ class GenericAdminViewTest(TestCase): self.assertEquals(formset.forms[0].as_p(), '<p><label for="id_generic_inline_admin-media-content_type-object_id-0-url">Url:</label> <input id="id_generic_inline_admin-media-content_type-object_id-0-url" type="text" name="generic_inline_admin-media-content_type-object_id-0-url" value="http://example.com/logo.png" maxlength="200" /><input type="hidden" name="generic_inline_admin-media-content_type-object_id-0-id" value="%s" id="id_generic_inline_admin-media-content_type-object_id-0-id" /></p>' % self.png_media_pk) self.assertEquals(formset.forms[1].as_p(), '<p><label for="id_generic_inline_admin-media-content_type-object_id-1-url">Url:</label> <input id="id_generic_inline_admin-media-content_type-object_id-1-url" type="text" name="generic_inline_admin-media-content_type-object_id-1-url" maxlength="200" /><input type="hidden" name="generic_inline_admin-media-content_type-object_id-1-id" id="id_generic_inline_admin-media-content_type-object_id-1-id" /></p>') - def testGenericInlineFormsetFactory(self): # Regression test for #10522. inline_formset = generic_inlineformset_factory(Media, diff --git a/tests/regressiontests/introspection/tests.py b/tests/regressiontests/introspection/tests.py index de6381fc29..10250294c5 100644 --- a/tests/regressiontests/introspection/tests.py +++ b/tests/regressiontests/introspection/tests.py @@ -1,5 +1,5 @@ from django.conf import settings -from django.db import connection +from django.db import connection, DEFAULT_DB_ALIAS from django.test import TestCase from django.utils import functional @@ -80,7 +80,7 @@ class IntrospectionTests(TestCase): ['IntegerField', 'CharField', 'CharField', 'CharField', 'BigIntegerField']) # Regression test for #9991 - 'real' types in postgres - if settings.DATABASE_ENGINE.startswith('postgresql'): + if settings.DATABASES[DEFAULT_DB_ALIAS]['ENGINE'].startswith('django.db.backends.postgresql'): def test_postgresql_real_type(self): cursor = connection.cursor() cursor.execute("CREATE TABLE django_ixn_real_test_table (number REAL);") diff --git a/tests/regressiontests/model_fields/tests.py b/tests/regressiontests/model_fields/tests.py index b8d0f7ef0e..f31193e269 100644 --- a/tests/regressiontests/model_fields/tests.py +++ b/tests/regressiontests/model_fields/tests.py @@ -44,8 +44,9 @@ class DecimalFieldTests(django.test.TestCase): self.assertEqual(f._format(None), None) def test_get_db_prep_lookup(self): + from django.db import connection f = models.DecimalField(max_digits=5, decimal_places=1) - self.assertEqual(f.get_db_prep_lookup('exact', None), [None]) + self.assertEqual(f.get_db_prep_lookup('exact', None, connection=connection), [None]) def test_filter_with_strings(self): """ @@ -98,13 +99,14 @@ class DateTimeFieldTests(unittest.TestCase): class BooleanFieldTests(unittest.TestCase): def _test_get_db_prep_lookup(self, f): - self.assertEqual(f.get_db_prep_lookup('exact', True), [True]) - self.assertEqual(f.get_db_prep_lookup('exact', '1'), [True]) - self.assertEqual(f.get_db_prep_lookup('exact', 1), [True]) - self.assertEqual(f.get_db_prep_lookup('exact', False), [False]) - self.assertEqual(f.get_db_prep_lookup('exact', '0'), [False]) - self.assertEqual(f.get_db_prep_lookup('exact', 0), [False]) - self.assertEqual(f.get_db_prep_lookup('exact', None), [None]) + from django.db import connection + self.assertEqual(f.get_db_prep_lookup('exact', True, connection=connection), [True]) + self.assertEqual(f.get_db_prep_lookup('exact', '1', connection=connection), [True]) + self.assertEqual(f.get_db_prep_lookup('exact', 1, connection=connection), [True]) + self.assertEqual(f.get_db_prep_lookup('exact', False, connection=connection), [False]) + self.assertEqual(f.get_db_prep_lookup('exact', '0', connection=connection), [False]) + self.assertEqual(f.get_db_prep_lookup('exact', 0, connection=connection), [False]) + self.assertEqual(f.get_db_prep_lookup('exact', None, connection=connection), [None]) def test_booleanfield_get_db_prep_lookup(self): self._test_get_db_prep_lookup(models.BooleanField()) diff --git a/tests/regressiontests/model_inheritance_regress/models.py b/tests/regressiontests/model_inheritance_regress/models.py index 6a804a97c1..c669b2337a 100644 --- a/tests/regressiontests/model_inheritance_regress/models.py +++ b/tests/regressiontests/model_inheritance_regress/models.py @@ -314,7 +314,8 @@ DoesNotExist: ArticleWithAuthor matching query does not exist. # likely to ocurr naturally with model inheritance, so we check it here). # Regression test for #9390. This necessarily pokes at the SQL string for the # query, since the duplicate problems are only apparent at that late stage. ->>> sql = ArticleWithAuthor.objects.order_by('pub_date', 'pk').query.as_sql()[0] +>>> qs = ArticleWithAuthor.objects.order_by('pub_date', 'pk') +>>> sql = qs.query.get_compiler(qs.db).as_sql()[0] >>> fragment = sql[sql.find('ORDER BY'):] >>> pos = fragment.find('pub_date') >>> fragment.find('pub_date', pos + 1) == -1 diff --git a/tests/regressiontests/model_regress/models.py b/tests/regressiontests/model_regress/models.py index 668acd830a..420f2c262c 100644 --- a/tests/regressiontests/model_regress/models.py +++ b/tests/regressiontests/model_regress/models.py @@ -2,7 +2,7 @@ import datetime from django.conf import settings -from django.db import models +from django.db import models, DEFAULT_DB_ALIAS from django.utils import tzinfo CHOICES = ( @@ -149,7 +149,9 @@ datetime.datetime(2000, 1, 1, 6, 1, 1) """} -if settings.DATABASE_ENGINE not in ("mysql", "oracle"): +if settings.DATABASES[DEFAULT_DB_ALIAS]['ENGINE'] not in ( + "django.db.backends.mysql", + "django.db.backends.oracle"): __test__["timezone-tests"] = """ # Saving an updating with timezone-aware datetime Python objects. Regression # test for #10443. @@ -167,4 +169,3 @@ if settings.DATABASE_ENGINE not in ("mysql", "oracle"): 1 """ - diff --git a/tests/regressiontests/multiple_database/__init__.py b/tests/regressiontests/multiple_database/__init__.py new file mode 100644 index 0000000000..e69de29bb2 --- /dev/null +++ b/tests/regressiontests/multiple_database/__init__.py diff --git a/tests/regressiontests/multiple_database/fixtures/multidb-common.json b/tests/regressiontests/multiple_database/fixtures/multidb-common.json new file mode 100644 index 0000000000..33134173b9 --- /dev/null +++ b/tests/regressiontests/multiple_database/fixtures/multidb-common.json @@ -0,0 +1,10 @@ +[ + { + "pk": 1, + "model": "multiple_database.book", + "fields": { + "title": "The Definitive Guide to Django", + "published": "2009-7-8" + } + } +]
\ No newline at end of file diff --git a/tests/regressiontests/multiple_database/fixtures/multidb.default.json b/tests/regressiontests/multiple_database/fixtures/multidb.default.json new file mode 100644 index 0000000000..379b18a803 --- /dev/null +++ b/tests/regressiontests/multiple_database/fixtures/multidb.default.json @@ -0,0 +1,26 @@ +[ + { + "pk": 1, + "model": "multiple_database.person", + "fields": { + "name": "Marty Alchin" + } + }, + { + "pk": 2, + "model": "multiple_database.person", + "fields": { + "name": "George Vilches" + } + }, + { + "pk": 2, + "model": "multiple_database.book", + "fields": { + "title": "Pro Django", + "published": "2008-12-16", + "authors": [["Marty Alchin"]], + "editor": ["George Vilches"] + } + } +] diff --git a/tests/regressiontests/multiple_database/fixtures/multidb.other.json b/tests/regressiontests/multiple_database/fixtures/multidb.other.json new file mode 100644 index 0000000000..c64f490201 --- /dev/null +++ b/tests/regressiontests/multiple_database/fixtures/multidb.other.json @@ -0,0 +1,26 @@ +[ + { + "pk": 1, + "model": "multiple_database.person", + "fields": { + "name": "Mark Pilgrim" + } + }, + { + "pk": 2, + "model": "multiple_database.person", + "fields": { + "name": "Chris Mills" + } + }, + { + "pk": 2, + "model": "multiple_database.book", + "fields": { + "title": "Dive into Python", + "published": "2009-5-4", + "authors": [["Mark Pilgrim"]], + "editor": ["Chris Mills"] + } + } +]
\ No newline at end of file diff --git a/tests/regressiontests/multiple_database/models.py b/tests/regressiontests/multiple_database/models.py new file mode 100644 index 0000000000..33cb7fb745 --- /dev/null +++ b/tests/regressiontests/multiple_database/models.py @@ -0,0 +1,49 @@ +from django.conf import settings +from django.contrib.auth.models import User +from django.contrib.contenttypes.models import ContentType +from django.contrib.contenttypes import generic +from django.db import models, DEFAULT_DB_ALIAS + +class Review(models.Model): + source = models.CharField(max_length=100) + content_type = models.ForeignKey(ContentType) + object_id = models.PositiveIntegerField() + content_object = generic.GenericForeignKey() + + def __unicode__(self): + return self.source + + class Meta: + ordering = ('source',) + +class PersonManager(models.Manager): + def get_by_natural_key(self, name): + return self.get(name=name) + +class Person(models.Model): + objects = PersonManager() + name = models.CharField(max_length=100) + + def __unicode__(self): + return self.name + + class Meta: + ordering = ('name',) + +class Book(models.Model): + title = models.CharField(max_length=100) + published = models.DateField() + authors = models.ManyToManyField(Person) + editor = models.ForeignKey(Person, null=True, related_name='edited') + reviews = generic.GenericRelation(Review) + + def __unicode__(self): + return self.title + + class Meta: + ordering = ('title',) + +class UserProfile(models.Model): + user = models.OneToOneField(User) + flavor = models.CharField(max_length=100) + diff --git a/tests/regressiontests/multiple_database/tests.py b/tests/regressiontests/multiple_database/tests.py new file mode 100644 index 0000000000..300ed5e0a6 --- /dev/null +++ b/tests/regressiontests/multiple_database/tests.py @@ -0,0 +1,705 @@ +import datetime +import pickle + +from django.conf import settings +from django.contrib.auth.models import User +from django.db import connections +from django.test import TestCase + +from models import Book, Person, Review, UserProfile + +try: + # we only have these models if the user is using multi-db, it's safe the + # run the tests without them though. + from models import Article, article_using +except ImportError: + pass + +class QueryTestCase(TestCase): + multi_db = True + + def test_default_creation(self): + "Objects created on the default database don't leak onto other databases" + # Create a book on the default database using create() + Book.objects.create(title="Pro Django", + published=datetime.date(2008, 12, 16)) + + # Create a book on the default database using a save + dive = Book() + dive.title="Dive into Python" + dive.published = datetime.date(2009, 5, 4) + dive.save() + + # Check that book exists on the default database, but not on other database + try: + Book.objects.get(title="Pro Django") + Book.objects.using('default').get(title="Pro Django") + except Book.DoesNotExist: + self.fail('"Dive Into Python" should exist on default database') + + self.assertRaises(Book.DoesNotExist, + Book.objects.using('other').get, + title="Pro Django" + ) + + try: + Book.objects.get(title="Dive into Python") + Book.objects.using('default').get(title="Dive into Python") + except Book.DoesNotExist: + self.fail('"Dive into Python" should exist on default database') + + self.assertRaises(Book.DoesNotExist, + Book.objects.using('other').get, + title="Dive into Python" + ) + + + def test_other_creation(self): + "Objects created on another database don't leak onto the default database" + # Create a book on the second database + Book.objects.using('other').create(title="Pro Django", + published=datetime.date(2008, 12, 16)) + + # Create a book on the default database using a save + dive = Book() + dive.title="Dive into Python" + dive.published = datetime.date(2009, 5, 4) + dive.save(using='other') + + # Check that book exists on the default database, but not on other database + try: + Book.objects.using('other').get(title="Pro Django") + except Book.DoesNotExist: + self.fail('"Dive Into Python" should exist on other database') + + self.assertRaises(Book.DoesNotExist, + Book.objects.get, + title="Pro Django" + ) + self.assertRaises(Book.DoesNotExist, + Book.objects.using('default').get, + title="Pro Django" + ) + + try: + Book.objects.using('other').get(title="Dive into Python") + except Book.DoesNotExist: + self.fail('"Dive into Python" should exist on other database') + + self.assertRaises(Book.DoesNotExist, + Book.objects.get, + title="Dive into Python" + ) + self.assertRaises(Book.DoesNotExist, + Book.objects.using('default').get, + title="Dive into Python" + ) + + def test_basic_queries(self): + "Queries are constrained to a single database" + dive = Book.objects.using('other').create(title="Dive into Python", + published=datetime.date(2009, 5, 4)) + + dive = Book.objects.using('other').get(published=datetime.date(2009, 5, 4)) + self.assertEqual(dive.title, "Dive into Python") + self.assertRaises(Book.DoesNotExist, Book.objects.using('default').get, published=datetime.date(2009, 5, 4)) + + dive = Book.objects.using('other').get(title__icontains="dive") + self.assertEqual(dive.title, "Dive into Python") + self.assertRaises(Book.DoesNotExist, Book.objects.using('default').get, title__icontains="dive") + + dive = Book.objects.using('other').get(title__iexact="dive INTO python") + self.assertEqual(dive.title, "Dive into Python") + self.assertRaises(Book.DoesNotExist, Book.objects.using('default').get, title__iexact="dive INTO python") + + dive = Book.objects.using('other').get(published__year=2009) + self.assertEqual(dive.title, "Dive into Python") + self.assertEqual(dive.published, datetime.date(2009, 5, 4)) + self.assertRaises(Book.DoesNotExist, Book.objects.using('default').get, published__year=2009) + + years = Book.objects.using('other').dates('published', 'year') + self.assertEqual([o.year for o in years], [2009]) + years = Book.objects.using('default').dates('published', 'year') + self.assertEqual([o.year for o in years], []) + + months = Book.objects.using('other').dates('published', 'month') + self.assertEqual([o.month for o in months], [5]) + months = Book.objects.using('default').dates('published', 'month') + self.assertEqual([o.month for o in months], []) + + def test_m2m_separation(self): + "M2M fields are constrained to a single database" + # Create a book and author on the default database + pro = Book.objects.create(title="Pro Django", + published=datetime.date(2008, 12, 16)) + + marty = Person.objects.create(name="Marty Alchin") + + # Create a book and author on the other database + dive = Book.objects.using('other').create(title="Dive into Python", + published=datetime.date(2009, 5, 4)) + + mark = Person.objects.using('other').create(name="Mark Pilgrim") + + # Save the author relations + pro.authors = [marty] + dive.authors = [mark] + + # Inspect the m2m tables directly. + # There should be 1 entry in each database + self.assertEquals(Book.authors.through.objects.using('default').count(), 1) + self.assertEquals(Book.authors.through.objects.using('other').count(), 1) + + # Check that queries work across m2m joins + self.assertEquals(list(Book.objects.using('default').filter(authors__name='Marty Alchin').values_list('title', flat=True)), + [u'Pro Django']) + self.assertEquals(list(Book.objects.using('other').filter(authors__name='Marty Alchin').values_list('title', flat=True)), + []) + + self.assertEquals(list(Book.objects.using('default').filter(authors__name='Mark Pilgrim').values_list('title', flat=True)), + []) + self.assertEquals(list(Book.objects.using('other').filter(authors__name='Mark Pilgrim').values_list('title', flat=True)), + [u'Dive into Python']) + + # Reget the objects to clear caches + dive = Book.objects.using('other').get(title="Dive into Python") + mark = Person.objects.using('other').get(name="Mark Pilgrim") + + # Retrive related object by descriptor. Related objects should be database-baound + self.assertEquals(list(dive.authors.all().values_list('name', flat=True)), + [u'Mark Pilgrim']) + + self.assertEquals(list(mark.book_set.all().values_list('title', flat=True)), + [u'Dive into Python']) + + def test_m2m_forward_operations(self): + "M2M forward manipulations are all constrained to a single DB" + # Create a book and author on the other database + dive = Book.objects.using('other').create(title="Dive into Python", + published=datetime.date(2009, 5, 4)) + + mark = Person.objects.using('other').create(name="Mark Pilgrim") + + # Save the author relations + dive.authors = [mark] + + # Add a second author + john = Person.objects.using('other').create(name="John Smith") + self.assertEquals(list(Book.objects.using('other').filter(authors__name='John Smith').values_list('title', flat=True)), + []) + + + dive.authors.add(john) + self.assertEquals(list(Book.objects.using('other').filter(authors__name='Mark Pilgrim').values_list('title', flat=True)), + [u'Dive into Python']) + self.assertEquals(list(Book.objects.using('other').filter(authors__name='John Smith').values_list('title', flat=True)), + [u'Dive into Python']) + + # Remove the second author + dive.authors.remove(john) + self.assertEquals(list(Book.objects.using('other').filter(authors__name='Mark Pilgrim').values_list('title', flat=True)), + [u'Dive into Python']) + self.assertEquals(list(Book.objects.using('other').filter(authors__name='John Smith').values_list('title', flat=True)), + []) + + # Clear all authors + dive.authors.clear() + self.assertEquals(list(Book.objects.using('other').filter(authors__name='Mark Pilgrim').values_list('title', flat=True)), + []) + self.assertEquals(list(Book.objects.using('other').filter(authors__name='John Smith').values_list('title', flat=True)), + []) + + # Create an author through the m2m interface + dive.authors.create(name='Jane Brown') + self.assertEquals(list(Book.objects.using('other').filter(authors__name='Mark Pilgrim').values_list('title', flat=True)), + []) + self.assertEquals(list(Book.objects.using('other').filter(authors__name='Jane Brown').values_list('title', flat=True)), + [u'Dive into Python']) + + def test_m2m_reverse_operations(self): + "M2M reverse manipulations are all constrained to a single DB" + # Create a book and author on the other database + dive = Book.objects.using('other').create(title="Dive into Python", + published=datetime.date(2009, 5, 4)) + + mark = Person.objects.using('other').create(name="Mark Pilgrim") + + # Save the author relations + dive.authors = [mark] + + # Create a second book on the other database + grease = Book.objects.using('other').create(title="Greasemonkey Hacks", + published=datetime.date(2005, 11, 1)) + + # Add a books to the m2m + mark.book_set.add(grease) + self.assertEquals(list(Person.objects.using('other').filter(book__title='Dive into Python').values_list('name', flat=True)), + [u'Mark Pilgrim']) + self.assertEquals(list(Person.objects.using('other').filter(book__title='Greasemonkey Hacks').values_list('name', flat=True)), + [u'Mark Pilgrim']) + + # Remove a book from the m2m + mark.book_set.remove(grease) + self.assertEquals(list(Person.objects.using('other').filter(book__title='Dive into Python').values_list('name', flat=True)), + [u'Mark Pilgrim']) + self.assertEquals(list(Person.objects.using('other').filter(book__title='Greasemonkey Hacks').values_list('name', flat=True)), + []) + + # Clear the books associated with mark + mark.book_set.clear() + self.assertEquals(list(Person.objects.using('other').filter(book__title='Dive into Python').values_list('name', flat=True)), + []) + self.assertEquals(list(Person.objects.using('other').filter(book__title='Greasemonkey Hacks').values_list('name', flat=True)), + []) + + # Create a book through the m2m interface + mark.book_set.create(title="Dive into HTML5", published=datetime.date(2020, 1, 1)) + self.assertEquals(list(Person.objects.using('other').filter(book__title='Dive into Python').values_list('name', flat=True)), + []) + self.assertEquals(list(Person.objects.using('other').filter(book__title='Dive into HTML5').values_list('name', flat=True)), + [u'Mark Pilgrim']) + +# def test_m2m_cross_database_protection(self): +# "Operations that involve sharing M2M objects across databases raise an error" +# # Create a book and author on the default database +# pro = Book.objects.create(title="Pro Django", +# published=datetime.date(2008, 12, 16)) + +# marty = Person.objects.create(name="Marty Alchin") + +# # Create a book and author on the other database +# dive = Book.objects.using('other').create(title="Dive into Python", +# published=datetime.date(2009, 5, 4)) + +# mark = Person.objects.using('other').create(name="Mark Pilgrim") +# # Set a foreign key set with an object from a different database +# try: +# marty.book_set = [pro, dive] +# self.fail("Shouldn't be able to assign across databases") +# except ValueError: +# pass + +# # Add to an m2m with an object from a different database +# try: +# marty.book_set.add(dive) +# self.fail("Shouldn't be able to assign across databases") +# except ValueError: +# pass + +# # Set a m2m with an object from a different database +# try: +# marty.book_set = [pro, dive] +# self.fail("Shouldn't be able to assign across databases") +# except ValueError: +# pass + +# # Add to a reverse m2m with an object from a different database +# try: +# dive.authors.add(marty) +# self.fail("Shouldn't be able to assign across databases") +# except ValueError: +# pass + +# # Set a reverse m2m with an object from a different database +# try: +# dive.authors = [mark, marty] +# self.fail("Shouldn't be able to assign across databases") +# except ValueError: +# pass + + def test_foreign_key_separation(self): + "FK fields are constrained to a single database" + # Create a book and author on the default database + pro = Book.objects.create(title="Pro Django", + published=datetime.date(2008, 12, 16)) + + marty = Person.objects.create(name="Marty Alchin") + george = Person.objects.create(name="George Vilches") + + # Create a book and author on the other database + dive = Book.objects.using('other').create(title="Dive into Python", + published=datetime.date(2009, 5, 4)) + + mark = Person.objects.using('other').create(name="Mark Pilgrim") + chris = Person.objects.using('other').create(name="Chris Mills") + + # Save the author's favourite books + pro.editor = george + pro.save() + + dive.editor = chris + dive.save() + + pro = Book.objects.using('default').get(title="Pro Django") + self.assertEquals(pro.editor.name, "George Vilches") + + dive = Book.objects.using('other').get(title="Dive into Python") + self.assertEquals(dive.editor.name, "Chris Mills") + + # Check that queries work across foreign key joins + self.assertEquals(list(Person.objects.using('default').filter(edited__title='Pro Django').values_list('name', flat=True)), + [u'George Vilches']) + self.assertEquals(list(Person.objects.using('other').filter(edited__title='Pro Django').values_list('name', flat=True)), + []) + + self.assertEquals(list(Person.objects.using('default').filter(edited__title='Dive into Python').values_list('name', flat=True)), + []) + self.assertEquals(list(Person.objects.using('other').filter(edited__title='Dive into Python').values_list('name', flat=True)), + [u'Chris Mills']) + + # Reget the objects to clear caches + chris = Person.objects.using('other').get(name="Chris Mills") + dive = Book.objects.using('other').get(title="Dive into Python") + + # Retrive related object by descriptor. Related objects should be database-baound + self.assertEquals(list(chris.edited.values_list('title', flat=True)), + [u'Dive into Python']) + + def test_foreign_key_reverse_operations(self): + "FK reverse manipulations are all constrained to a single DB" + dive = Book.objects.using('other').create(title="Dive into Python", + published=datetime.date(2009, 5, 4)) + + mark = Person.objects.using('other').create(name="Mark Pilgrim") + chris = Person.objects.using('other').create(name="Chris Mills") + + # Save the author relations + dive.editor = chris + dive.save() + + # Add a second book edited by chris + html5 = Book.objects.using('other').create(title="Dive into HTML5", published=datetime.date(2010, 3, 15)) + self.assertEquals(list(Person.objects.using('other').filter(edited__title='Dive into HTML5').values_list('name', flat=True)), + []) + + chris.edited.add(html5) + self.assertEquals(list(Person.objects.using('other').filter(edited__title='Dive into HTML5').values_list('name', flat=True)), + [u'Chris Mills']) + self.assertEquals(list(Person.objects.using('other').filter(edited__title='Dive into Python').values_list('name', flat=True)), + [u'Chris Mills']) + + # Remove the second editor + chris.edited.remove(html5) + self.assertEquals(list(Person.objects.using('other').filter(edited__title='Dive into HTML5').values_list('name', flat=True)), + []) + self.assertEquals(list(Person.objects.using('other').filter(edited__title='Dive into Python').values_list('name', flat=True)), + [u'Chris Mills']) + + # Clear all edited books + chris.edited.clear() + self.assertEquals(list(Person.objects.using('other').filter(edited__title='Dive into HTML5').values_list('name', flat=True)), + []) + self.assertEquals(list(Person.objects.using('other').filter(edited__title='Dive into Python').values_list('name', flat=True)), + []) + + # Create an author through the m2m interface + chris.edited.create(title='Dive into Water', published=datetime.date(2010, 3, 15)) + self.assertEquals(list(Person.objects.using('other').filter(edited__title='Dive into HTML5').values_list('name', flat=True)), + []) + self.assertEquals(list(Person.objects.using('other').filter(edited__title='Dive into Water').values_list('name', flat=True)), + [u'Chris Mills']) + self.assertEquals(list(Person.objects.using('other').filter(edited__title='Dive into Python').values_list('name', flat=True)), + []) + +# def test_foreign_key_cross_database_protection(self): +# "Operations that involve sharing FK objects across databases raise an error" +# # Create a book and author on the default database +# pro = Book.objects.create(title="Pro Django", +# published=datetime.date(2008, 12, 16)) + +# marty = Person.objects.create(name="Marty Alchin") + +# # Create a book and author on the other database +# dive = Book.objects.using('other').create(title="Dive into Python", +# published=datetime.date(2009, 5, 4)) + +# mark = Person.objects.using('other').create(name="Mark Pilgrim") + +# # Set a foreign key with an object from a different database +# try: +# dive.editor = marty +# self.fail("Shouldn't be able to assign across databases") +# except ValueError: +# pass + +# # Set a foreign key set with an object from a different database +# try: +# marty.edited = [pro, dive] +# self.fail("Shouldn't be able to assign across databases") +# except ValueError: +# pass + +# # Add to a foreign key set with an object from a different database +# try: +# marty.edited.add(dive) +# self.fail("Shouldn't be able to assign across databases") +# except ValueError: +# pass + +# # BUT! if you assign a FK object when the base object hasn't +# # been saved yet, you implicitly assign the database for the +# # base object. +# chris = Person(name="Chris Mills") +# html5 = Book(title="Dive into HTML5", published=datetime.date(2010, 3, 15)) +# # initially, no db assigned +# self.assertEquals(chris._state.db, None) +# self.assertEquals(html5._state.db, None) + +# # old object comes from 'other', so the new object is set to use 'other'... +# dive.editor = chris +# html5.editor = mark +# # self.assertEquals(chris._state.db, 'other') +# self.assertEquals(html5._state.db, 'other') +# # ... but it isn't saved yet +# self.assertEquals(list(Person.objects.using('other').values_list('name',flat=True)), +# [u'Mark Pilgrim']) +# self.assertEquals(list(Book.objects.using('other').values_list('title',flat=True)), +# [u'Dive into Python']) + +# # When saved (no using required), new objects goes to 'other' +# chris.save() +# html5.save() +# self.assertEquals(list(Person.objects.using('default').values_list('name',flat=True)), +# [u'Marty Alchin']) +# self.assertEquals(list(Person.objects.using('other').values_list('name',flat=True)), +# [u'Chris Mills', u'Mark Pilgrim']) +# self.assertEquals(list(Book.objects.using('default').values_list('title',flat=True)), +# [u'Pro Django']) +# self.assertEquals(list(Book.objects.using('other').values_list('title',flat=True)), +# [u'Dive into HTML5', u'Dive into Python']) + +# # This also works if you assign the FK in the constructor +# water = Book(title="Dive into Water", published=datetime.date(2001, 1, 1), editor=mark) +# self.assertEquals(water._state.db, 'other') +# # ... but it isn't saved yet +# self.assertEquals(list(Book.objects.using('default').values_list('title',flat=True)), +# [u'Pro Django']) +# self.assertEquals(list(Book.objects.using('other').values_list('title',flat=True)), +# [u'Dive into HTML5', u'Dive into Python']) + +# # When saved, the new book goes to 'other' +# water.save() +# self.assertEquals(list(Book.objects.using('default').values_list('title',flat=True)), +# [u'Pro Django']) +# self.assertEquals(list(Book.objects.using('other').values_list('title',flat=True)), +# [u'Dive into HTML5', u'Dive into Python', u'Dive into Water']) + + def test_generic_key_separation(self): + "Generic fields are constrained to a single database" + # Create a book and author on the default database + pro = Book.objects.create(title="Pro Django", + published=datetime.date(2008, 12, 16)) + + review1 = Review.objects.create(source="Python Monthly", content_object=pro) + + # Create a book and author on the other database + dive = Book.objects.using('other').create(title="Dive into Python", + published=datetime.date(2009, 5, 4)) + + review2 = Review.objects.using('other').create(source="Python Weekly", content_object=dive) + + review1 = Review.objects.using('default').get(source="Python Monthly") + self.assertEquals(review1.content_object.title, "Pro Django") + + review2 = Review.objects.using('other').get(source="Python Weekly") + self.assertEquals(review2.content_object.title, "Dive into Python") + + # Reget the objects to clear caches + dive = Book.objects.using('other').get(title="Dive into Python") + + # Retrive related object by descriptor. Related objects should be database-bound + self.assertEquals(list(dive.reviews.all().values_list('source', flat=True)), + [u'Python Weekly']) + + def test_generic_key_reverse_operations(self): + "Generic reverse manipulations are all constrained to a single DB" + dive = Book.objects.using('other').create(title="Dive into Python", + published=datetime.date(2009, 5, 4)) + + temp = Book.objects.using('other').create(title="Temp", + published=datetime.date(2009, 5, 4)) + + review1 = Review.objects.using('other').create(source="Python Weekly", content_object=dive) + review2 = Review.objects.using('other').create(source="Python Monthly", content_object=temp) + + self.assertEquals(list(Review.objects.using('default').filter(object_id=dive.pk).values_list('source', flat=True)), + []) + self.assertEquals(list(Review.objects.using('other').filter(object_id=dive.pk).values_list('source', flat=True)), + [u'Python Weekly']) + + # Add a second review + dive.reviews.add(review2) + self.assertEquals(list(Review.objects.using('default').filter(object_id=dive.pk).values_list('source', flat=True)), + []) + self.assertEquals(list(Review.objects.using('other').filter(object_id=dive.pk).values_list('source', flat=True)), + [u'Python Monthly', u'Python Weekly']) + + # Remove the second author + dive.reviews.remove(review1) + self.assertEquals(list(Review.objects.using('default').filter(object_id=dive.pk).values_list('source', flat=True)), + []) + self.assertEquals(list(Review.objects.using('other').filter(object_id=dive.pk).values_list('source', flat=True)), + [u'Python Monthly']) + + # Clear all reviews + dive.reviews.clear() + self.assertEquals(list(Review.objects.using('default').filter(object_id=dive.pk).values_list('source', flat=True)), + []) + self.assertEquals(list(Review.objects.using('other').filter(object_id=dive.pk).values_list('source', flat=True)), + []) + + # Create an author through the generic interface + dive.reviews.create(source='Python Daily') + self.assertEquals(list(Review.objects.using('default').filter(object_id=dive.pk).values_list('source', flat=True)), + []) + self.assertEquals(list(Review.objects.using('other').filter(object_id=dive.pk).values_list('source', flat=True)), + [u'Python Daily']) + +# def test_generic_key_cross_database_protection(self): +## "Operations that involve sharing FK objects across databases raise an error" +## # Create a book and author on the default database +## pro = Book.objects.create(title="Pro Django", +## published=datetime.date(2008, 12, 16)) + +## review1 = Review.objects.create(source="Python Monthly", content_object=pro) + +## # Create a book and author on the other database +## dive = Book.objects.using('other').create(title="Dive into Python", +## published=datetime.date(2009, 5, 4)) + +## review2 = Review.objects.using('other').create(source="Python Weekly", content_object=dive) + +## # Set a foreign key with an object from a different database +## try: +## review1.content_object = dive +## self.fail("Shouldn't be able to assign across databases") +## except ValueError: +## pass + +# # Add to a foreign key set with an object from a different database +# try: +# dive.reviews.add(review1) +# self.fail("Shouldn't be able to assign across databases") +# except ValueError: +# pass + +# # BUT! if you assign a FK object when the base object hasn't +# # been saved yet, you implicitly assign the database for the +# # base object. +# review3 = Review(source="Python Daily") +# # initially, no db assigned +# self.assertEquals(review3._state.db, None) + +# # Dive comes from 'other', so review3 is set to use 'other'... +# review3.content_object = dive +# self.assertEquals(review3._state.db, 'other') +# # ... but it isn't saved yet +# self.assertEquals(list(Review.objects.using('default').filter(object_id=pro.pk).values_list('source', flat=True)), +# [u'Python Monthly']) +# self.assertEquals(list(Review.objects.using('other').filter(object_id=dive.pk).values_list('source',flat=True)), +# [u'Python Weekly']) + +# # When saved, John goes to 'other' +# review3.save() +# self.assertEquals(list(Review.objects.using('default').filter(object_id=pro.pk).values_list('source', flat=True)), +# [u'Python Monthly']) +# self.assertEquals(list(Review.objects.using('other').filter(object_id=dive.pk).values_list('source',flat=True)), +# [u'Python Daily', u'Python Weekly']) + + def test_ordering(self): + "get_next_by_XXX commands stick to a single database" + pro = Book.objects.create(title="Pro Django", + published=datetime.date(2008, 12, 16)) + + dive = Book.objects.using('other').create(title="Dive into Python", + published=datetime.date(2009, 5, 4)) + + learn = Book.objects.using('other').create(title="Learning Python", + published=datetime.date(2008, 7, 16)) + + self.assertEquals(learn.get_next_by_published().title, "Dive into Python") + self.assertEquals(dive.get_previous_by_published().title, "Learning Python") + + def test_raw(self): + "test the raw() method across databases" + dive = Book.objects.using('other').create(title="Dive into Python", + published=datetime.date(2009, 5, 4)) + val = Book.objects.db_manager("other").raw('SELECT id FROM "multiple_database_book"') + self.assertEqual(map(lambda o: o.pk, val), [dive.pk]) + + val = Book.objects.raw('SELECT id FROM "multiple_database_book"').using('other') + self.assertEqual(map(lambda o: o.pk, val), [dive.pk]) + + +class UserProfileTestCase(TestCase): + def setUp(self): + self.old_auth_profile_module = getattr(settings, 'AUTH_PROFILE_MODULE', None) + settings.AUTH_PROFILE_MODULE = 'multiple_database.UserProfile' + + def tearDown(self): + settings.AUTH_PROFILE_MODULE = self.old_auth_profile_module + + def test_user_profiles(self): + + alice = User.objects.create_user('alice', 'alice@example.com') + bob = User.objects.db_manager('other').create_user('bob', 'bob@example.com') + + alice_profile = UserProfile(user=alice, flavor='chocolate') + alice_profile.save() + + bob_profile = UserProfile(user=bob, flavor='crunchy frog') + bob_profile.save() + + self.assertEquals(alice.get_profile().flavor, 'chocolate') + self.assertEquals(bob.get_profile().flavor, 'crunchy frog') + +class FixtureTestCase(TestCase): + multi_db = True + fixtures = ['multidb-common', 'multidb'] + + def test_fixture_loading(self): + "Multi-db fixtures are loaded correctly" + # Check that "Pro Django" exists on the default database, but not on other database + try: + Book.objects.get(title="Pro Django") + Book.objects.using('default').get(title="Pro Django") + except Book.DoesNotExist: + self.fail('"Pro Django" should exist on default database') + + self.assertRaises(Book.DoesNotExist, + Book.objects.using('other').get, + title="Pro Django" + ) + + # Check that "Dive into Python" exists on the default database, but not on other database + try: + Book.objects.using('other').get(title="Dive into Python") + except Book.DoesNotExist: + self.fail('"Dive into Python" should exist on other database') + + self.assertRaises(Book.DoesNotExist, + Book.objects.get, + title="Dive into Python" + ) + self.assertRaises(Book.DoesNotExist, + Book.objects.using('default').get, + title="Dive into Python" + ) + + # Check that "Definitive Guide" exists on the both databases + try: + Book.objects.get(title="The Definitive Guide to Django") + Book.objects.using('default').get(title="The Definitive Guide to Django") + Book.objects.using('other').get(title="The Definitive Guide to Django") + except Book.DoesNotExist: + self.fail('"The Definitive Guide to Django" should exist on both databases') + + +class PickleQuerySetTestCase(TestCase): + multi_db = True + + def test_pickling(self): + for db in connections: + Book.objects.using(db).create(title='Dive into Python', published=datetime.date(2009, 5, 4)) + qs = Book.objects.all() + self.assertEqual(qs.db, pickle.loads(pickle.dumps(qs)).db) diff --git a/tests/regressiontests/queries/models.py b/tests/regressiontests/queries/models.py index ddac509ede..aababffbab 100644 --- a/tests/regressiontests/queries/models.py +++ b/tests/regressiontests/queries/models.py @@ -7,7 +7,7 @@ import pickle import sys from django.conf import settings -from django.db import models +from django.db import models, DEFAULT_DB_ALIAS from django.db.models.query import Q, ITER_CHUNK_SIZE # Python 2.3 doesn't have sorted() @@ -822,8 +822,8 @@ We can do slicing beyond what is currently in the result cache, too. Bug #7045 -- extra tables used to crash SQL construction on the second use. >>> qs = Ranking.objects.extra(tables=['django_site']) ->>> s = qs.query.as_sql() ->>> s = qs.query.as_sql() # test passes if this doesn't raise an exception. +>>> s = qs.query.get_compiler(qs.db).as_sql() +>>> s = qs.query.get_compiler(qs.db).as_sql() # test passes if this doesn't raise an exception. Bug #7098 -- Make sure semi-deprecated ordering by related models syntax still works. @@ -912,9 +912,9 @@ We should also be able to pickle things that use select_related(). The only tricky thing here is to ensure that we do the related selections properly after unpickling. >>> qs = Item.objects.select_related() ->>> query = qs.query.as_sql()[0] +>>> query = qs.query.get_compiler(qs.db).as_sql()[0] >>> query2 = pickle.loads(pickle.dumps(qs.query)) ->>> query2.as_sql()[0] == query +>>> query2.get_compiler(qs.db).as_sql()[0] == query True Check pickling of deferred-loading querysets @@ -1051,7 +1051,7 @@ sufficient that this query runs without error. Calling order_by() with no parameters removes any existing ordering on the model. But it should still be possible to add new ordering after that. >>> qs = Author.objects.order_by().order_by('name') ->>> 'ORDER BY' in qs.query.as_sql()[0] +>>> 'ORDER BY' in qs.query.get_compiler(qs.db).as_sql()[0] True Incorrect SQL was being generated for certain types of exclude() queries that @@ -1085,7 +1085,8 @@ performance problems on backends like MySQL. Nested queries should not evaluate the inner query as part of constructing the SQL (so we should see a nested query here, indicated by two "SELECT" calls). ->>> Annotation.objects.filter(notes__in=Note.objects.filter(note="xyzzy")).query.as_sql()[0].count('SELECT') +>>> qs = Annotation.objects.filter(notes__in=Note.objects.filter(note="xyzzy")) +>>> qs.query.get_compiler(qs.db).as_sql()[0].count('SELECT') 2 Bug #10181 -- Avoid raising an EmptyResultSet if an inner query is provably @@ -1222,20 +1223,20 @@ FieldError: Infinite loop caused by ordering. # In Oracle, we expect a null CharField to return u'' instead of None. -if settings.DATABASE_ENGINE == "oracle": +if settings.DATABASES[DEFAULT_DB_ALIAS]['ENGINE'] == "django.db.backends.oracle": __test__["API_TESTS"] = __test__["API_TESTS"].replace("<NONE_OR_EMPTY_UNICODE>", "u''") else: __test__["API_TESTS"] = __test__["API_TESTS"].replace("<NONE_OR_EMPTY_UNICODE>", "None") -if settings.DATABASE_ENGINE == "mysql": +if settings.DATABASES[DEFAULT_DB_ALIAS]['ENGINE'] == "django.db.backends.mysql": __test__["API_TESTS"] += """ When grouping without specifying ordering, we add an explicit "ORDER BY NULL" portion in MySQL to prevent unnecessary sorting. >>> query = Tag.objects.values_list('parent_id', flat=True).order_by().query >>> query.group_by = ['parent_id'] ->>> sql = query.as_sql()[0] +>>> sql = query.get_compiler(DEFAULT_DB_ALIAS).as_sql()[0] >>> fragment = "ORDER BY " >>> pos = sql.find(fragment) >>> sql.find(fragment, pos + 1) == -1 diff --git a/tests/regressiontests/serializers_regress/tests.py b/tests/regressiontests/serializers_regress/tests.py index f0781d78b1..11b53a5277 100644 --- a/tests/regressiontests/serializers_regress/tests.py +++ b/tests/regressiontests/serializers_regress/tests.py @@ -13,7 +13,7 @@ from cStringIO import StringIO from django.utils.functional import curry from django.core import serializers -from django.db import transaction +from django.db import transaction, DEFAULT_DB_ALIAS from django.core import management from django.conf import settings @@ -260,19 +260,19 @@ The end."""), (fk_obj, 452, FKDataToField, None), (fk_obj, 460, FKDataToO2O, 300), - + (im2m_obj, 470, M2MIntermediateData, None), - + #testing post- and prereferences and extra fields (im_obj, 480, Intermediate, {'right': 300, 'left': 470}), - (im_obj, 481, Intermediate, {'right': 300, 'left': 490}), - (im_obj, 482, Intermediate, {'right': 500, 'left': 470}), - (im_obj, 483, Intermediate, {'right': 500, 'left': 490}), - (im_obj, 484, Intermediate, {'right': 300, 'left': 470, 'extra': "extra"}), - (im_obj, 485, Intermediate, {'right': 300, 'left': 490, 'extra': "extra"}), - (im_obj, 486, Intermediate, {'right': 500, 'left': 470, 'extra': "extra"}), - (im_obj, 487, Intermediate, {'right': 500, 'left': 490, 'extra': "extra"}), - + (im_obj, 481, Intermediate, {'right': 300, 'left': 490}), + (im_obj, 482, Intermediate, {'right': 500, 'left': 470}), + (im_obj, 483, Intermediate, {'right': 500, 'left': 490}), + (im_obj, 484, Intermediate, {'right': 300, 'left': 470, 'extra': "extra"}), + (im_obj, 485, Intermediate, {'right': 300, 'left': 490, 'extra': "extra"}), + (im_obj, 486, Intermediate, {'right': 500, 'left': 470, 'extra': "extra"}), + (im_obj, 487, Intermediate, {'right': 500, 'left': 490, 'extra': "extra"}), + (im2m_obj, 490, M2MIntermediateData, []), (data_obj, 500, Anchor, "Anchor 3"), @@ -331,7 +331,7 @@ The end."""), # Because Oracle treats the empty string as NULL, Oracle is expected to fail # when field.empty_strings_allowed is True and the value is None; skip these # tests. -if settings.DATABASE_ENGINE == 'oracle': +if settings.DATABASES[DEFAULT_DB_ALIAS]['ENGINE'] == 'django.db.backends.oracle': test_data = [data for data in test_data if not (data[0] == data_obj and data[2]._meta.get_field('data').empty_strings_allowed and @@ -340,7 +340,7 @@ if settings.DATABASE_ENGINE == 'oracle': # Regression test for #8651 -- a FK to an object iwth PK of 0 # This won't work on MySQL since it won't let you create an object # with a primary key of 0, -if settings.DATABASE_ENGINE != 'mysql': +if settings.DATABASES[DEFAULT_DB_ALIAS]['ENGINE'] != 'django.db.backends.mysql': test_data.extend([ (data_obj, 0, Anchor, "Anchor 0"), (fk_obj, 465, FKData, 0), |
