diff options
| author | django-bot <ops@djangoproject.com> | 2025-07-22 20:41:41 -0700 |
|---|---|---|
| committer | nessita <124304+nessita@users.noreply.github.com> | 2025-07-23 20:17:55 -0300 |
| commit | 69a93a88edb56ba47f624dac7a21aacc47ea474f (patch) | |
| tree | f57507a4435d032493cae40e06ecb254790b67b2 /django/db/models | |
| parent | 55b0cc21310b76ce4018dd793ba50556eaf0af06 (diff) | |
Refs #36500 -- Rewrapped long docstrings and block comments via a script.
Rewrapped long docstrings and block comments to 79 characters + newline
using script from https://github.com/medmunds/autofix-w505.
Diffstat (limited to 'django/db/models')
| -rw-r--r-- | django/db/models/aggregates.py | 8 | ||||
| -rw-r--r-- | django/db/models/base.py | 51 | ||||
| -rw-r--r-- | django/db/models/constraints.py | 8 | ||||
| -rw-r--r-- | django/db/models/deletion.py | 9 | ||||
| -rw-r--r-- | django/db/models/expressions.py | 18 | ||||
| -rw-r--r-- | django/db/models/fields/__init__.py | 7 | ||||
| -rw-r--r-- | django/db/models/fields/files.py | 17 | ||||
| -rw-r--r-- | django/db/models/fields/related.py | 12 | ||||
| -rw-r--r-- | django/db/models/fields/related_descriptors.py | 57 | ||||
| -rw-r--r-- | django/db/models/fields/related_lookups.py | 13 | ||||
| -rw-r--r-- | django/db/models/fields/tuple_lookups.py | 3 | ||||
| -rw-r--r-- | django/db/models/functions/datetime.py | 3 | ||||
| -rw-r--r-- | django/db/models/functions/json.py | 4 | ||||
| -rw-r--r-- | django/db/models/functions/text.py | 3 | ||||
| -rw-r--r-- | django/db/models/options.py | 26 | ||||
| -rw-r--r-- | django/db/models/query.py | 51 | ||||
| -rw-r--r-- | django/db/models/query_utils.py | 9 | ||||
| -rw-r--r-- | django/db/models/sql/compiler.py | 19 | ||||
| -rw-r--r-- | django/db/models/sql/datastructures.py | 7 | ||||
| -rw-r--r-- | django/db/models/sql/query.py | 57 | ||||
| -rw-r--r-- | django/db/models/sql/subqueries.py | 6 |
21 files changed, 214 insertions, 174 deletions
diff --git a/django/db/models/aggregates.py b/django/db/models/aggregates.py index 444d72addb..1cf82416cb 100644 --- a/django/db/models/aggregates.py +++ b/django/db/models/aggregates.py @@ -353,10 +353,10 @@ class StringAgg(Aggregate): extra_context["template"] = template c = self.copy() - # The creation of the delimiter SQL and the ordering of the parameters must be - # handled explicitly, as MySQL puts the delimiter at the end of the aggregate - # using the `SEPARATOR` declaration (rather than treating as an expression like - # other database backends). + # The creation of the delimiter SQL and the ordering of the parameters + # must be handled explicitly, as MySQL puts the delimiter at the end of + # the aggregate using the `SEPARATOR` declaration (rather than treating + # as an expression like other database backends). delimiter_params = [] if c.delimiter: delimiter_sql, delimiter_params = compiler.compile(c.delimiter) diff --git a/django/db/models/base.py b/django/db/models/base.py index 901743147d..7c20319da6 100644 --- a/django/db/models/base.py +++ b/django/db/models/base.py @@ -493,10 +493,10 @@ class Model(AltersData, metaclass=ModelBase): # Set up the storage for instance state self._state = ModelState() - # There is a rather weird disparity here; if kwargs, it's set, then args - # overrides it. It should be one or the other; don't duplicate the work - # The reason for the kwargs check is that standard iterator passes in by - # args, and instantiation for iteration is 33% faster. + # There is a rather weird disparity here; if kwargs, it's set, then + # args overrides it. It should be one or the other; don't duplicate the + # work The reason for the kwargs check is that standard iterator passes + # in by args, and instantiation for iteration is 33% faster. if len(args) > len(opts.concrete_fields): # Daft, but matches old exception sans the err msg. raise IndexError("Number of args exceeds number of fields") @@ -504,9 +504,9 @@ class Model(AltersData, metaclass=ModelBase): if not kwargs: fields_iter = iter(opts.concrete_fields) # The ordering of the zip calls matter - zip throws StopIteration - # when an iter throws it. So if the first iter throws it, the second - # is *not* consumed. We rely on this, so don't change the order - # without changing the logic. + # when an iter throws it. So if the first iter throws it, the + # second is *not* consumed. We rely on this, so don't change the + # order without changing the logic. for val, field in zip(args, fields_iter): if val is _DEFERRED: continue @@ -540,7 +540,8 @@ class Model(AltersData, metaclass=ModelBase): is_related_object = True except KeyError: try: - # Object instance wasn't passed in -- must be an ID. + # Object instance wasn't passed in -- must be an + # ID. val = kwargs.pop(field.attname) except KeyError: val = field.get_default() @@ -1079,7 +1080,8 @@ class Model(AltersData, metaclass=ModelBase): and all(f.has_default() or f.has_db_default() for f in meta.pk_fields) ): force_insert = True - # If possible, try an UPDATE. If that doesn't update anything, do an INSERT. + # If possible, try an UPDATE. If that doesn't update anything, do an + # INSERT. if pk_set and not force_insert: base_qs = cls._base_manager.using(using) values = [ @@ -1142,21 +1144,22 @@ class Model(AltersData, metaclass=ModelBase): if not values: # We can end up here when saving a model in inheritance chain where # update_fields doesn't target any field in current model. In that - # case we just say the update succeeded. Another case ending up here - # is a model with just PK - in that case check that the PK still - # exists. + # case we just say the update succeeded. Another case ending up + # here is a model with just PK - in that case check that the PK + # still exists. return update_fields is not None or filtered.exists() if self._meta.select_on_save and not forced_update: return ( filtered.exists() and - # It may happen that the object is deleted from the DB right after - # this check, causing the subsequent UPDATE to return zero matching - # rows. The same result can occur in some rare cases when the - # database returns zero despite the UPDATE being executed - # successfully (a row is matched and updated). In order to - # distinguish these two cases, the object's existence in the - # database is again checked for if the UPDATE query returns 0. + # It may happen that the object is deleted from the DB right + # after this check, causing the subsequent UPDATE to return + # zero matching rows. The same result can occur in some rare + # cases when the database returns zero despite the UPDATE being + # executed successfully (a row is matched and updated). In + # order to distinguish these two cases, the object's existence + # in the database is again checked for if the UPDATE query + # returns 0. (filtered._update(values) > 0 or filtered.exists()) ) return filtered._update(values) > 0 @@ -1347,7 +1350,8 @@ class Model(AltersData, metaclass=ModelBase): Hook for doing any extra model-wide validation after clean() has been called on every field by self.clean_fields. Any ValidationError raised by this method will not be associated with a particular field; it will - have a special-case association with the field defined by NON_FIELD_ERRORS. + have a special-case association with the field defined by + NON_FIELD_ERRORS. """ pass @@ -1878,7 +1882,9 @@ class Model(AltersData, metaclass=ModelBase): @classmethod def _check_m2m_through_same_relationship(cls): - """Check if no relationship model is used by more than one m2m field.""" + """ + Check if no relationship model is used by more than one m2m field. + """ errors = [] seen_intermediary_signatures = [] @@ -2003,7 +2009,8 @@ class Model(AltersData, metaclass=ModelBase): @classmethod def _check_column_name_clashes(cls): - # Store a list of column names which have already been used by other fields. + # Store a list of column names which have already been used by other + # fields. used_column_names = [] errors = [] diff --git a/django/db/models/constraints.py b/django/db/models/constraints.py index ae2709abb8..73ab23bdfa 100644 --- a/django/db/models/constraints.py +++ b/django/db/models/constraints.py @@ -593,8 +593,8 @@ class UniqueConstraint(BaseConstraint): ].features.interprets_empty_strings_as_nulls ) ): - # A composite constraint containing NULL value cannot cause - # a violation since NULL != NULL in SQL. + # A composite constraint containing NULL value cannot + # cause a violation since NULL != NULL in SQL. return lookup_kwargs[field.name] = lookup_value lookup_args = [] @@ -646,8 +646,8 @@ class UniqueConstraint(BaseConstraint): and self.violation_error_message == self.default_violation_error_message ): - # When fields are defined, use the unique_error_message() as - # a default for backward compatibility. + # When fields are defined, use the unique_error_message() + # as a default for backward compatibility. validation_error_message = instance.unique_error_message( model, self.fields ) diff --git a/django/db/models/deletion.py b/django/db/models/deletion.py index 9221364ff4..b1939f8b35 100644 --- a/django/db/models/deletion.py +++ b/django/db/models/deletion.py @@ -82,8 +82,9 @@ def DO_NOTHING(collector, field, sub_objs, using): def get_candidate_relations_to_delete(opts): - # The candidate relations are the ones that come from N-1 and 1-1 relations. - # N-N (i.e., many-to-many) relations aren't candidates for deletion. + # The candidate relations are the ones that come from N-1 and 1-1 + # relations. N-N (i.e., many-to-many) relations aren't candidates for + # deletion. return ( f for f in opts.get_fields(include_hidden=True) @@ -434,8 +435,8 @@ class Collector: self.data[model] = sorted(instances, key=attrgetter("pk")) # if possible, bring the models in an order suitable for databases that - # don't support transactions or cannot defer constraint checks until the - # end of a transaction. + # don't support transactions or cannot defer constraint checks until + # the end of a transaction. self.sort() # number of objects deleted for each model label deleted_counter = Counter() diff --git a/django/db/models/expressions.py b/django/db/models/expressions.py index bf89a4f561..012a7c346b 100644 --- a/django/db/models/expressions.py +++ b/django/db/models/expressions.py @@ -289,7 +289,8 @@ class BaseExpression: in this query * reuse: a set of reusable joins for multijoins * summarize: a terminal aggregate clause - * for_save: whether this expression about to be used in a save or update + * for_save: whether this expression about to be used in a save or + update Return: an Expression to be added to the query. """ @@ -349,9 +350,9 @@ class BaseExpression: As a guess, if the output fields of all source fields match then simply infer the same type here. - If a source's output field resolves to None, exclude it from this check. - If all sources are None, then an error is raised higher up the stack in - the output_field property. + If a source's output field resolves to None, exclude it from this + check. If all sources are None, then an error is raised higher up the + stack in the output_field property. """ # This guess is mostly a bad idea, but there is quite a lot of code # (especially 3rd party Func subclasses) that depend on it, we'd need a @@ -500,7 +501,8 @@ class BaseExpression: return sql, params def get_expression_for_validation(self): - # Ignore expressions that cannot be used during a constraint validation. + # Ignore expressions that cannot be used during a constraint + # validation. if not getattr(self, "constraint_validation_compatible", True): try: (expression,) = self.get_source_expressions() @@ -1264,7 +1266,8 @@ class Star(Expression): class DatabaseDefault(Expression): """ - Expression to use DEFAULT keyword during insert otherwise the underlying expression. + Expression to use DEFAULT keyword during insert otherwise the underlying + expression. """ def __init__(self, expression, output_field=None): @@ -1625,7 +1628,8 @@ class When(Expression): ): c = super().resolve_expression(query, allow_joins, reuse, summarize, for_save) if for_save and c.condition is not None: - # Resolve condition with for_save=False, since it's used as a filter. + # Resolve condition with for_save=False, since it's used as a + # filter. c.condition = self.condition.resolve_expression( query, allow_joins, reuse, summarize, for_save=False ) diff --git a/django/db/models/fields/__init__.py b/django/db/models/fields/__init__.py index 69289627f0..e7add282a6 100644 --- a/django/db/models/fields/__init__.py +++ b/django/db/models/fields/__init__.py @@ -997,7 +997,8 @@ class Field(RegisterLookupMixin): def get_db_prep_value(self, value, connection, prepared=False): """ - Return field's value prepared for interacting with the database backend. + Return field's value prepared for interacting with the database + backend. Used by the default implementations of get_db_prep_save(). """ @@ -1927,8 +1928,8 @@ class EmailField(CharField): def deconstruct(self): name, path, args, kwargs = super().deconstruct() - # We do not exclude max_length if it matches default as we want to change - # the default in future. + # We do not exclude max_length if it matches default as we want to + # change the default in future. return name, path, args, kwargs def formfield(self, **kwargs): diff --git a/django/db/models/fields/files.py b/django/db/models/fields/files.py index 8f807b1156..5216ff565f 100644 --- a/django/db/models/fields/files.py +++ b/django/db/models/fields/files.py @@ -176,11 +176,11 @@ class FileDescriptor(DeferredAttribute): # instance.file needs to ultimately return some instance of `File`, # probably a subclass. Additionally, this returned object needs to have # the FieldFile API so that users can easily do things like - # instance.file.path and have that delegated to the file storage engine. - # Easy enough if we're strict about assignment in __set__, but if you - # peek below you can see that we're not. So depending on the current - # value of the field we have to dynamically construct some sort of - # "thing" to return. + # instance.file.path and have that delegated to the file storage + # engine. Easy enough if we're strict about assignment in __set__, but + # if you peek below you can see that we're not. So depending on the + # current value of the field we have to dynamically construct some sort + # of "thing" to return. # The instance dict contains whatever was originally assigned # in __set__. @@ -204,8 +204,8 @@ class FileDescriptor(DeferredAttribute): instance.__dict__[self.field.attname] = attr # Other types of files may be assigned as well, but they need to have - # the FieldFile interface added to them. Thus, we wrap any other type of - # File inside a FieldFile (well, the field's attr_class, which is + # the FieldFile interface added to them. Thus, we wrap any other type + # of File inside a FieldFile (well, the field's attr_class, which is # usually FieldFile). elif isinstance(file, File) and not isinstance(file, FieldFile): file_copy = self.field.attr_class(instance, self.field, file.name) @@ -215,7 +215,8 @@ class FileDescriptor(DeferredAttribute): # Finally, because of the (some would say boneheaded) way pickle works, # the underlying FieldFile might not actually itself have an associated - # file. So we need to reset the details of the FieldFile in those cases. + # file. So we need to reset the details of the FieldFile in those + # cases. elif isinstance(file, FieldFile) and not hasattr(file, "field"): file.instance = instance file.field = self.field diff --git a/django/db/models/fields/related.py b/django/db/models/fields/related.py index bad71a5fd6..a59dcac68c 100644 --- a/django/db/models/fields/related.py +++ b/django/db/models/fields/related.py @@ -663,8 +663,8 @@ class ForeignObject(RelatedField): frozenset(uc.fields) <= foreign_fields for uc in remote_opts.total_unique_constraints ) - # If the model defines a composite primary key and the foreign key - # refers to it, the target is unique. + # If the model defines a composite primary key and the foreign + # key refers to it, the target is unique. or ( frozenset(field.name for field in remote_opts.pk_fields) == foreign_fields @@ -746,8 +746,8 @@ class ForeignObject(RelatedField): kwargs["to"] = self.remote_field.model.lower() else: kwargs["to"] = self.remote_field.model._meta.label_lower - # If swappable is True, then see if we're actually pointing to the target - # of a swap. + # If swappable is True, then see if we're actually pointing to the + # target of a swap. swappable_setting = self.swappable_setting if swappable_setting is not None: # If it's already a settings reference, error @@ -1825,8 +1825,8 @@ class ManyToManyField(RelatedField): kwargs["through"] = self.remote_field.through._meta.label if through_fields := getattr(self.remote_field, "through_fields", None): kwargs["through_fields"] = through_fields - # If swappable is True, then see if we're actually pointing to the target - # of a swap. + # If swappable is True, then see if we're actually pointing to the + # target of a swap. swappable_setting = self.swappable_setting if swappable_setting is not None: # If it's already a settings reference, error. diff --git a/django/db/models/fields/related_descriptors.py b/django/db/models/fields/related_descriptors.py index 8da7aaef91..3e2150e0f6 100644 --- a/django/db/models/fields/related_descriptors.py +++ b/django/db/models/fields/related_descriptors.py @@ -309,17 +309,17 @@ class ForwardManyToOneDescriptor: ) remote_field = self.field.remote_field - # If we're setting the value of a OneToOneField to None, we need to clear - # out the cache on any old related object. Otherwise, deleting the - # previously-related object will also cause this object to be deleted, - # which is wrong. + # If we're setting the value of a OneToOneField to None, we need to + # clear out the cache on any old related object. Otherwise, deleting + # the previously-related object will also cause this object to be + # deleted, which is wrong. if value is None: - # Look up the previously-related object, which may still be available - # since we've not yet cleared out the related field. - # Use the cache directly, instead of the accessor; if we haven't + # Look up the previously-related object, which may still be + # available since we've not yet cleared out the related field. Use + # the cache directly, instead of the accessor; if we haven't # populated the cache, then we don't care - we're only accessing - # the object to invalidate the accessor cache, so there's no - # need to populate the cache just to expire it again. + # the object to invalidate the accessor cache, so there's no need + # to populate the cache just to expire it again. related = self.field.get_cached_value(instance, default=None) # If we've got an old related object, we need to clear out its @@ -357,7 +357,8 @@ class ForwardManyToOneDescriptor: class ForwardOneToOneDescriptor(ForwardManyToOneDescriptor): """ - Accessor to the related object on the forward side of a one-to-one relation. + Accessor to the related object on the forward side of a one-to-one + relation. In the example:: @@ -531,7 +532,8 @@ class ReverseOneToOneDescriptor: - ``self`` is the descriptor managing the ``restaurant`` attribute - ``instance`` is the ``place`` instance - - ``value`` is the ``restaurant`` instance on the right of the equal sign + - ``value`` is the ``restaurant`` instance on the right of the equal + sign Keep in mind that ``Restaurant`` holds the foreign key to ``Place``. """ @@ -586,12 +588,13 @@ class ReverseOneToOneDescriptor: for index, field in enumerate(self.related.field.local_related_fields): setattr(value, field.attname, related_pk[index]) - # Set the related instance cache used by __get__ to avoid an SQL query - # when accessing the attribute we just set. + # Set the related instance cache used by __get__ to avoid an SQL + # query when accessing the attribute we just set. self.related.set_cached_value(instance, value) - # Set the forward accessor cache on the related object to the current - # instance to avoid an extra SQL query if it's accessed later on. + # Set the forward accessor cache on the related object to the + # current instance to avoid an extra SQL query if it's accessed + # later on. self.related.field.set_cached_value(value, instance) def __reduce__(self): @@ -1076,8 +1079,8 @@ def create_forward_many_to_many_manager(superclass, rel, reverse): def _build_remove_filters(self, removed_vals): filters = Q.create([(self.source_field_name, self.related_val)]) - # No need to add a subquery condition if removed_vals is a QuerySet without - # filters. + # No need to add a subquery condition if removed_vals is a QuerySet + # without filters. removed_vals_filters = ( not isinstance(removed_vals, QuerySet) or removed_vals._has_filters() ) @@ -1145,8 +1148,8 @@ def create_forward_many_to_many_manager(superclass, rel, reverse): # M2M: need to annotate the query in order to get the primary model # that the secondary model was actually related to. We know that - # there will already be a join on the join table, so we can just add - # the select. + # there will already be a join on the join table, so we can just + # add the select. # For non-autocreated 'through' models, can't assume we are # dealing with PK values. @@ -1475,10 +1478,10 @@ def create_forward_many_to_many_manager(superclass, rel, reverse): def _add_items( self, source_field_name, target_field_name, *objs, through_defaults=None ): - # source_field_name: the PK fieldname in join table for the source object - # target_field_name: the PK fieldname in join table for the target object - # *objs - objects to add. Either object instances, or primary keys - # of object instances. + # source_field_name: the PK fieldname in join table for the source + # object target_field_name: the PK fieldname in join table for the + # target object *objs - objects to add. Either object instances, or + # primary keys of object instances. if not objs: return @@ -1544,10 +1547,10 @@ def create_forward_many_to_many_manager(superclass, rel, reverse): ) def _remove_items(self, source_field_name, target_field_name, *objs): - # source_field_name: the PK colname in join table for the source object - # target_field_name: the PK colname in join table for the target object - # *objs - objects to remove. Either object instances, or primary - # keys of object instances. + # source_field_name: the PK colname in join table for the source + # object target_field_name: the PK colname in join table for the + # target object *objs - objects to remove. Either object instances, + # or primary keys of object instances. if not objs: return diff --git a/django/db/models/fields/related_lookups.py b/django/db/models/fields/related_lookups.py index 9fc7db7c34..639c29d7ba 100644 --- a/django/db/models/fields/related_lookups.py +++ b/django/db/models/fields/related_lookups.py @@ -28,8 +28,9 @@ def get_normalized_value(value, lhs): try: value_list.append(getattr(value, source.attname)) except AttributeError: - # A case like Restaurant.objects.filter(place=restaurant_instance), - # where place is a OneToOneField and the primary key of Restaurant. + # A case like + # Restaurant.objects.filter(place=restaurant_instance), where + # place is a OneToOneField and the primary key of Restaurant. pk = value.pk return pk if isinstance(pk, tuple) else (pk,) return tuple(value_list) @@ -101,10 +102,10 @@ class RelatedLookupMixin: ): # If we get here, we are dealing with single-column relations. self.rhs = get_normalized_value(self.rhs, self.lhs)[0] - # We need to run the related field's get_prep_value(). Consider case - # ForeignKey to IntegerField given value 'abc'. The ForeignKey itself - # doesn't have validation for non-integers, so we must run validation - # using the target field. + # We need to run the related field's get_prep_value(). Consider + # case ForeignKey to IntegerField given value 'abc'. The ForeignKey + # itself doesn't have validation for non-integers, so we must run + # validation using the target field. if self.prepare_rhs and hasattr(self.lhs.output_field, "path_infos"): # Get the target field. We can safely assume there is only one # as we don't get to the direct value branch otherwise. diff --git a/django/db/models/fields/tuple_lookups.py b/django/db/models/fields/tuple_lookups.py index 62818a37c4..b861bbe9cc 100644 --- a/django/db/models/fields/tuple_lookups.py +++ b/django/db/models/fields/tuple_lookups.py @@ -370,7 +370,8 @@ class TupleIn(TupleLookupMixin, In): return super(TupleLookupMixin, self).as_sql(compiler, connection) # e.g.: (a, b, c) in [(x1, y1, z1), (x2, y2, z2)] as SQL: - # WHERE (a = x1 AND b = y1 AND c = z1) OR (a = x2 AND b = y2 AND c = z2) + # WHERE (a = x1 AND b = y1 AND c = z1) OR (a = x2 AND b = y2 AND c = + # z2) root = WhereNode([], connector=OR) lhs = self.lhs diff --git a/django/db/models/functions/datetime.py b/django/db/models/functions/datetime.py index 361e4ce385..b536690c8a 100644 --- a/django/db/models/functions/datetime.py +++ b/django/db/models/functions/datetime.py @@ -96,7 +96,8 @@ class Extract(TimezoneMixin, Transform): "Extract input expression must be DateField, DateTimeField, " "TimeField, or DurationField." ) - # Passing dates to functions expecting datetimes is most likely a mistake. + # Passing dates to functions expecting datetimes is most likely a + # mistake. if type(field) is DateField and copy.lookup_name in ( "hour", "minute", diff --git a/django/db/models/functions/json.py b/django/db/models/functions/json.py index 3a4c9c81b3..fee7dd05f4 100644 --- a/django/db/models/functions/json.py +++ b/django/db/models/functions/json.py @@ -98,8 +98,8 @@ class JSONObject(Func): def as_postgresql(self, compiler, connection, **extra_context): # Casting keys to text is only required when using JSONB_BUILD_OBJECT - # or when using JSON_OBJECT on PostgreSQL 16+ with server-side bindings. - # This is done in all cases for consistency. + # or when using JSON_OBJECT on PostgreSQL 16+ with server-side + # bindings. This is done in all cases for consistency. copy = self.copy() copy.set_source_expressions( [ diff --git a/django/db/models/functions/text.py b/django/db/models/functions/text.py index 9c48659bf9..28660c5e66 100644 --- a/django/db/models/functions/text.py +++ b/django/db/models/functions/text.py @@ -110,7 +110,8 @@ class ConcatPair(Func): ) def coalesce(self): - # null on either side results in null for expression, wrap with coalesce + # null on either side results in null for expression, wrap with + # coalesce c = self.copy() c.set_source_expressions( [ diff --git a/django/db/models/options.py b/django/db/models/options.py index 296309236f..0e229dea3a 100644 --- a/django/db/models/options.py +++ b/django/db/models/options.py @@ -348,9 +348,10 @@ class Options: # being referenced, because there will be new relationships in the # cache. Otherwise, expire the cache of references *to* this field. # The mechanism for getting at the related model is slightly odd - - # ideally, we'd just ask for field.related_model. However, related_model - # is a cached property, and all the models haven't been loaded yet, so - # we need to make sure we don't cache a string reference. + # ideally, we'd just ask for field.related_model. However, + # related_model is a cached property, and all the models haven't been + # loaded yet, so we need to make sure we don't cache a string + # reference. if ( field.is_relation and hasattr(field.remote_field, "model") @@ -427,8 +428,8 @@ class Options: except ValueError: # setting not in the format app_label.model_name # raising ImproperlyConfigured here causes problems with - # test cleanup code - instead it is raised in get_user_model - # or as part of validation. + # test cleanup code - instead it is raised in + # get_user_model or as part of validation. return swapped_for if ( @@ -534,10 +535,10 @@ class Options: # For legacy reasons, the fields property should only contain forward # fields that are not private or with a m2m cardinality. Therefore we # pass these three filters as filters to the generator. - # The third filter is a longwinded way of checking f.related_model - we don't - # use that property directly because related_model is a cached property, - # and all the models may not have been loaded yet; we don't want to cache - # the string reference to the related_model. + # The third filter is a longwinded way of checking f.related_model - we + # don't use that property directly because related_model is a cached + # property, and all the models may not have been loaded yet; we don't + # want to cache the string reference to the related_model. def is_not_an_m2m_field(f): return not (f.is_relation and f.many_to_many) @@ -707,7 +708,8 @@ class Options: def all_parents(self): """ Return all the ancestors of this model as a tuple ordered by MRO. - Useful for determining if something is an ancestor, regardless of lineage. + Useful for determining if something is an ancestor, regardless of + lineage. """ result = OrderedSet(self.parents) for parent in self.parents: @@ -800,8 +802,8 @@ class Options: """ This method is used by each model to find its reverse objects. As this method is very expensive and is accessed frequently (it looks up every - field in a model, in every app), it is computed on first access and then - is set as a property on every model. + field in a model, in every app), it is computed on first access and + then is set as a property on every model. """ related_objects_graph = defaultdict(list) diff --git a/django/db/models/query.py b/django/db/models/query.py index 8163b5b973..3e3753ee5a 100644 --- a/django/db/models/query.py +++ b/django/db/models/query.py @@ -755,8 +755,9 @@ class QuerySet(AltersData): Insert each of the instances into the database. Do *not* call save() on each of the instances, do not send any pre/post_save signals, and do not set the primary key attribute if it is an - autoincrement field (except if features.can_return_rows_from_bulk_insert=True). - Multi-table models are not supported. + autoincrement field (except if + features.can_return_rows_from_bulk_insert=True). Multi-table models are + not supported. """ # When you bulk insert you don't get the primary keys back (if it's an # autoincrement, except if can_return_rows_from_bulk_insert=True), so @@ -774,8 +775,9 @@ class QuerySet(AltersData): raise ValueError("Batch size must be a positive integer.") # Check that the parents share the same concrete model with the our # model to detect the inheritance pattern ConcreteGrandParent -> - # MultiTableParent -> ProxyChild. Simply checking self.model._meta.proxy - # would not identify that case as involving multiple tables. + # MultiTableParent -> ProxyChild. Simply checking + # self.model._meta.proxy would not identify that case as involving + # multiple tables. for parent in self.model._meta.all_parents: if parent._meta.concrete_model is not self.model._meta.concrete_model: raise ValueError("Can't bulk create a multi-table inherited model") @@ -1302,10 +1304,10 @@ class QuerySet(AltersData): def _update(self, values): """ - A version of update() that accepts field objects instead of field names. - Used primarily for model saving and not intended for use by general - code (it requires too much poking around at model internals to be - useful at that level). + A version of update() that accepts field objects instead of field + names. Used primarily for model saving and not intended for use by + general code (it requires too much poking around at model internals to + be useful at that level). """ if self.query.is_sliced: raise TypeError("Cannot update a query once a slice has been taken.") @@ -2365,9 +2367,9 @@ def prefetch_related_objects(model_instances, *related_lookups): # Prepare objects: good_objects = True for obj in obj_list: - # Since prefetching can re-use instances, it is possible to have - # the same instance multiple times in obj_list, so obj might - # already be prepared. + # Since prefetching can re-use instances, it is possible to + # have the same instance multiple times in obj_list, so obj + # might already be prepared. if not hasattr(obj, "_prefetched_objects_cache"): try: obj._prefetched_objects_cache = {} @@ -2376,7 +2378,8 @@ def prefetch_related_objects(model_instances, *related_lookups): # values_list(flat=True), for example (TypeError) or # a QuerySet subclass that isn't returning Model # instances (AttributeError), either in Django or a 3rd - # party. prefetch_related() doesn't make sense, so quit. + # party. prefetch_related() doesn't make sense, so + # quit. good_objects = False break if not good_objects: @@ -2384,8 +2387,9 @@ def prefetch_related_objects(model_instances, *related_lookups): # Descend down tree - # We assume that objects retrieved are homogeneous (which is the premise - # of prefetch_related), so what applies to first object applies to all. + # We assume that objects retrieved are homogeneous (which is the + # premise of prefetch_related), so what applies to first object + # applies to all. first_obj = obj_list[0] to_attr = lookup.get_current_to_attr(level)[0] prefetcher, descriptor, attr_found, is_fetched = get_prefetcher( @@ -2462,8 +2466,8 @@ def prefetch_related_objects(model_instances, *related_lookups): if new_obj is None: continue # We special-case `list` rather than something more generic - # like `Iterable` because we don't want to accidentally match - # user models that define __iter__. + # like `Iterable` because we don't want to accidentally + # match user models that define __iter__. if isinstance(new_obj, list): new_obj_list.extend(new_obj) else: @@ -2528,8 +2532,8 @@ def get_prefetcher(instance, through_attr, to_attr): if through_attr == to_attr: is_fetched = rel_obj_descriptor.is_cached else: - # descriptor doesn't support prefetching, so we go ahead and get - # the attribute on the instance rather than the class to + # descriptor doesn't support prefetching, so we go ahead and + # get the attribute on the instance rather than the class to # support many related managers rel_obj = getattr(instance, through_attr) if hasattr(rel_obj, "get_prefetch_querysets"): @@ -2556,12 +2560,14 @@ def prefetch_one_level(instances, prefetcher, lookup, level): # prefetcher must have a method get_prefetch_querysets() which takes a list # of instances, and returns a tuple: - # (queryset of instances of self.model that are related to passed in instances, + # (queryset of instances of self.model that are related to passed in + # instances, # callable that gets value to be matched for returned instances, # callable that gets value to be matched for passed in instances, # boolean that is True for singly related objects, # cache or field name to assign to, - # boolean that is True when the previous argument is a cache name vs a field name). + # boolean that is True when the previous argument is a cache name vs a + # field name). # The 'values to be matched' must be hashable as they will be used # in a dictionary. @@ -2601,8 +2607,9 @@ def prefetch_one_level(instances, prefetcher, lookup, level): to_attr, as_attr = lookup.get_current_to_attr(level) # Make sure `to_attr` does not conflict with a field. if as_attr and instances: - # We assume that objects retrieved are homogeneous (which is the premise - # of prefetch_related), so what applies to first object applies to all. + # We assume that objects retrieved are homogeneous (which is the + # premise of prefetch_related), so what applies to first object applies + # to all. model = instances[0].__class__ try: model._meta.get_field(to_attr) diff --git a/django/db/models/query_utils.py b/django/db/models/query_utils.py index 3e644a3c26..5da3d81672 100644 --- a/django/db/models/query_utils.py +++ b/django/db/models/query_utils.py @@ -302,8 +302,9 @@ class RegisterLookupMixin: @staticmethod def merge_dicts(dicts): """ - Merge dicts in reverse to preference the order of the original list. e.g., - merge_dicts([a, b]) will preference the keys in 'a' over those in 'b'. + Merge dicts in reverse to preference the order of the original list. + e.g., merge_dicts([a, b]) will preference the keys in 'a' over those in + 'b'. """ merged = {} for d in reversed(dicts): @@ -435,8 +436,8 @@ def check_rel_lookup_compatibility(model, target_opts, field): # Restaurant.objects.filter(pk__in=Restaurant.objects.all()). # If we didn't have the primary key check, then pk__in (== place__in) would # give Place's opts as the target opts, but Restaurant isn't compatible - # with that. This logic applies only to primary keys, as when doing __in=qs, - # we are going to turn this into __in=qs.values('pk') later on. + # with that. This logic applies only to primary keys, as when doing + # __in=qs, we are going to turn this into __in=qs.values('pk') later on. return check(target_opts) or ( getattr(field, "primary_key", False) and check(field.model._meta) ) diff --git a/django/db/models/sql/compiler.py b/django/db/models/sql/compiler.py index b0b2ac5583..f72ba907ad 100644 --- a/django/db/models/sql/compiler.py +++ b/django/db/models/sql/compiler.py @@ -52,10 +52,11 @@ class SQLCompiler: # they would return an empty result set. self.elide_empty = elide_empty self.quote_cache = {"*": "*"} - # The select, klass_info, and annotations are needed by QuerySet.iterator() - # these are set as a side-effect of executing the query. Note that we calculate - # separately a list of extra select columns needed for grammatical correctness - # of the query, but these columns are not included in self.select. + # The select, klass_info, and annotations are needed by + # QuerySet.iterator() these are set as a side-effect of executing the + # query. Note that we calculate separately a list of extra select + # columns needed for grammatical correctness of the query, but these + # columns are not included in self.select. self.select = None self.annotation_col_map = None self.klass_info = None @@ -946,9 +947,9 @@ class SQLCompiler: # If the query is used as a subquery, the extra selects would # result in more columns than the left-hand side expression is # expecting. This can happen when a subquery uses a combination - # of order_by() and distinct(), forcing the ordering expressions - # to be selected as well. Wrap the query in another subquery - # to exclude extraneous selects. + # of order_by() and distinct(), forcing the ordering + # expressions to be selected as well. Wrap the query in another + # subquery to exclude extraneous selects. sub_selects = [] sub_params = [] for index, (select, _, alias) in enumerate(self.select, start=1): @@ -2107,8 +2108,8 @@ class SQLUpdateCompiler(SQLCompiler): # If the result_type is NO_RESULTS then the aux_row_count is None. aux_row_count = query.get_compiler(self.using).execute_sql(result_type) if is_empty and aux_row_count: - # Returns the row count for any related updates as the number of - # rows updated. + # Returns the row count for any related updates as the number + # of rows updated. row_count = aux_row_count is_empty = False return row_count diff --git a/django/db/models/sql/datastructures.py b/django/db/models/sql/datastructures.py index be6934485c..ffdd36c0c8 100644 --- a/django/db/models/sql/datastructures.py +++ b/django/db/models/sql/datastructures.py @@ -37,8 +37,8 @@ class Join: - table_alias (possible alias for the table, can be None) - join_type (can be None for those entries that aren't joined from anything) - - parent_alias (which table is this join's parent, can be None similarly - to join_type) + - parent_alias (which table is this join's parent, can be None + similarly to join_type) - as_sql() - relabeled_clone() """ @@ -76,7 +76,8 @@ class Join: def as_sql(self, compiler, connection): """ Generate the full - LEFT OUTER JOIN sometable ON sometable.somecol = othertable.othercol, params + LEFT OUTER JOIN sometable ON sometable.somecol = + othertable.othercol, params clause for this join. """ join_conditions = [] diff --git a/django/db/models/sql/query.py b/django/db/models/sql/query.py index 20dbf7cfaa..5e87f65e7c 100644 --- a/django/db/models/sql/query.py +++ b/django/db/models/sql/query.py @@ -308,10 +308,9 @@ class Query(BaseExpression): self.alias_map = {} # Whether to provide alias to columns during reference resolving. self.alias_cols = alias_cols - # Sometimes the query contains references to aliases in outer queries (as - # a result of split_exclude). Correct alias quoting needs to know these - # aliases too. - # Map external tables to whether they are aliased. + # Sometimes the query contains references to aliases in outer queries + # (as a result of split_exclude). Correct alias quoting needs to know + # these aliases too. Map external tables to whether they are aliased. self.external_aliases = {} self.table_map = {} # Maps table names to list of aliases. self.used_aliases = set() @@ -593,8 +592,8 @@ class Query(BaseExpression): and not inner_query.annotation_select_mask ): # In case of Model.objects[0:3].count(), there would be no - # field selected in the inner query, yet we must use a subquery. - # So, make sure at least one field is selected. + # field selected in the inner query, yet we must use a + # subquery. So, make sure at least one field is selected. inner_query.select = ( self.model._meta.pk.get_col(inner_query.get_initial_alias()), ) @@ -932,10 +931,11 @@ class Query(BaseExpression): an outer join. If 'unconditional' is False, only promote the join if it is nullable or the parent join is an outer join. - The children promotion is done to avoid join chains that contain a LOUTER - b INNER c. So, if we have currently a INNER b INNER c and a->b is promoted, - then we must also promote b->c automatically, or otherwise the promotion - of a->b doesn't actually change anything in the query results. + The children promotion is done to avoid join chains that contain a + LOUTER b INNER c. So, if we have currently a INNER b INNER c and a->b + is promoted, then we must also promote b->c automatically, or otherwise + the promotion of a->b doesn't actually change anything in the query + results. """ aliases = list(aliases) while aliases: @@ -1228,7 +1228,8 @@ class Query(BaseExpression): if FORBIDDEN_ALIAS_PATTERN.search(alias): raise ValueError( "Column aliases cannot contain whitespace characters, quotation marks, " - # RemovedInDjango70Warning: When the deprecation ends, replace with: + # RemovedInDjango70Warning: When the deprecation ends, replace + # with: # "semicolons, percent signs, or SQL comments." "semicolons, or SQL comments." ) @@ -1256,7 +1257,8 @@ class Query(BaseExpression): def resolve_expression(self, query, *args, **kwargs): clone = self.clone() - # Subqueries need to use a different set of aliases than the outer query. + # Subqueries need to use a different set of aliases than the outer + # query. clone.bump_prefix(query) clone.subquery = True clone.where.resolve_expression(query, *args, **kwargs) @@ -1893,9 +1895,9 @@ class Query(BaseExpression): Return the final field involved in the joins, the target field (used for any 'where' constraint), the final 'opts' value, the joins, the field path traveled to generate the joins, and a transform function - that takes a field and alias and is equivalent to `field.get_col(alias)` - in the simple case but wraps field transforms if they were included in - names. + that takes a field and alias and is equivalent to + `field.get_col(alias)` in the simple case but wraps field transforms if + they were included in names. The target field is the field containing the concrete value. Final field can be something different, for example foreign key pointing to @@ -2052,7 +2054,8 @@ class Query(BaseExpression): # Summarize currently means we are doing an aggregate() query # which is executed as a wrapped subquery if any of the # aggregate() elements reference an existing annotation. In - # that case we need to return a Ref to the subquery's annotation. + # that case we need to return a Ref to the subquery's + # annotation. if name not in self.annotation_select: raise FieldError( "Cannot aggregate over the '%s' alias. Use annotate() " @@ -2127,8 +2130,8 @@ class Query(BaseExpression): alias = col.alias if alias in can_reuse: pk = select_field.model._meta.pk - # Need to add a restriction so that outer query's filters are in effect for - # the subquery, too. + # Need to add a restriction so that outer query's filters are in + # effect for the subquery, too. query.bump_prefix(self) lookup_class = select_field.get_lookup("exact") # Note that the query.select[0].alias is different from alias @@ -2204,7 +2207,8 @@ class Query(BaseExpression): """ Return True if adding filters to this instance is still possible. - Typically, this means no limits or offsets have been put on the results. + Typically, this means no limits or offsets have been put on the + results. """ return not self.is_sliced @@ -2440,8 +2444,8 @@ class Query(BaseExpression): """ # Fields on related models are stored in the literal double-underscore # format, so that we can use a set datastructure. We do the foo__bar - # splitting and handling when computing the SQL column names (as part of - # get_columns()). + # splitting and handling when computing the SQL column names (as part + # of get_columns()). existing, defer = self.deferred_loading if defer: # Add to existing deferred names. @@ -2630,8 +2634,8 @@ class Query(BaseExpression): cols in split_exclude(). Return a lookup usable for doing outerq.filter(lookup=self) and a - boolean indicating if the joins in the prefix contain a LEFT OUTER join. - _""" + boolean indicating if the joins in the prefix contain a LEFT OUTER + join. _""" all_paths = [] for _, paths in names_with_path: all_paths.extend(paths) @@ -2678,9 +2682,10 @@ class Query(BaseExpression): if extra_restriction: self.where.add(extra_restriction, AND) else: - # TODO: It might be possible to trim more joins from the start of the - # inner query if it happens to have a longer join chain containing the - # values in select_fields. Lets punt this one for now. + # TODO: It might be possible to trim more joins from the start of + # the inner query if it happens to have a longer join chain + # containing the values in select_fields. Lets punt this one for + # now. select_fields = [r[1] for r in join_field.related_fields] select_alias = lookup_tables[trimmed_paths] # The found starting point is likely a join_class instead of a diff --git a/django/db/models/sql/subqueries.py b/django/db/models/sql/subqueries.py index 9cb971b38f..2705114a54 100644 --- a/django/db/models/sql/subqueries.py +++ b/django/db/models/sql/subqueries.py @@ -1,5 +1,6 @@ """ -Query subclasses which provide extra functionality beyond simple data retrieval. +Query subclasses which provide extra functionality beyond simple data +retrieval. """ from django.core.exceptions import FieldError @@ -116,7 +117,8 @@ class UpdateQuery(Query): if field.generated: continue if hasattr(val, "resolve_expression"): - # Resolve expressions here so that annotations are no longer needed + # Resolve expressions here so that annotations are no longer + # needed val = val.resolve_expression(self, allow_joins=False, for_save=True) self.values.append((field, model, val)) |
