summaryrefslogtreecommitdiff
path: root/django/db/backends/postgresql/compiler.py
diff options
context:
space:
mode:
authorSimon Charette <charette.s@gmail.com>2024-11-17 00:30:00 -0500
committerSarah Boyce <42296566+sarahboyce@users.noreply.github.com>2024-12-11 13:56:18 +0100
commita16eedcf9c69d8a11d94cac1811018c5b996d491 (patch)
treeff30bd7f076156079e6636f93b8a5bc05e7be9f9 /django/db/backends/postgresql/compiler.py
parent2638b75554d2624dca3062a8da113a47f855f2a2 (diff)
Fixed #35936 -- Used unnest for bulk inserts on Postgres when possible.
This should make bulk_create significantly faster on Postgres when provided only literal values. Thanks James Sewell for writing about this technique, Tom Forbes for validating the performance benefits, David Sanders and Mariusz Felisiak for the review.
Diffstat (limited to 'django/db/backends/postgresql/compiler.py')
-rw-r--r--django/db/backends/postgresql/compiler.py50
1 files changed, 50 insertions, 0 deletions
diff --git a/django/db/backends/postgresql/compiler.py b/django/db/backends/postgresql/compiler.py
new file mode 100644
index 0000000000..2394d90f55
--- /dev/null
+++ b/django/db/backends/postgresql/compiler.py
@@ -0,0 +1,50 @@
+from django.db.models.sql.compiler import (
+ SQLAggregateCompiler,
+ SQLCompiler,
+ SQLDeleteCompiler,
+)
+from django.db.models.sql.compiler import SQLInsertCompiler as BaseSQLInsertCompiler
+from django.db.models.sql.compiler import SQLUpdateCompiler
+
+__all__ = [
+ "SQLAggregateCompiler",
+ "SQLCompiler",
+ "SQLDeleteCompiler",
+ "SQLInsertCompiler",
+ "SQLUpdateCompiler",
+]
+
+
+class InsertUnnest(list):
+ """
+ Sentinel value to signal DatabaseOperations.bulk_insert_sql() that the
+ UNNEST strategy should be used for the bulk insert.
+ """
+
+ def __str__(self):
+ return "UNNEST(%s)" % ", ".join(self)
+
+
+class SQLInsertCompiler(BaseSQLInsertCompiler):
+ def assemble_as_sql(self, fields, value_rows):
+ # Specialize bulk-insertion of literal non-array values through
+ # UNNEST to reduce the time spent planning the query.
+ if (
+ # The optimization is not worth doing if there is a single
+ # row as it will result in the same number of placeholders.
+ len(value_rows) <= 1
+ # Lack of fields denote the usage of the DEFAULT keyword
+ # for the insertion of empty rows.
+ or any(field is None for field in fields)
+ # Compilable cannot be combined in an array of literal values.
+ or any(any(hasattr(value, "as_sql") for value in row) for row in value_rows)
+ ):
+ return super().assemble_as_sql(fields, value_rows)
+ db_types = [field.db_type(self.connection) for field in fields]
+ # Abort if any of the fields are arrays as UNNEST indiscriminately
+ # flatten them instead of reducing their nesting by one.
+ if any(db_type.endswith("[]") for db_type in db_types):
+ return super().assemble_as_sql(fields, value_rows)
+ return InsertUnnest(["(%%s)::%s[]" % db_type for db_type in db_types]), [
+ list(map(list, zip(*value_rows)))
+ ]