plain.models
Model your data and store it in a database.
# app/users/models.py
from plain import models
from plain.passwords.models import PasswordField
class User(models.Model):
email = models.EmailField(unique=True)
password = PasswordField()
is_staff = models.BooleanField(default=False)
created_at = models.DateTimeField(auto_now_add=True)
def __str__(self):
return self.email
Create, update, and delete instances of your models:
from .models import User
# Create a new user
user = User.objects.create(
email="[email protected]",
password="password",
)
# Update a user
user.email = "[email protected]"
user.save()
# Delete a user
user.delete()
# Query for users
staff_users = User.objects.filter(is_staff=True)
Installation
# app/settings.py
INSTALLED_PACKAGES = [
...
"plain.models",
]
To connect to a database, you can provide a DATABASE_URL
environment variable.
DATABASE_URL=postgresql://user:password@localhost:5432/dbname
Or you can manually define the DATABASES
setting.
# app/settings.py
DATABASES = {
"default": {
"ENGINE": "plain.models.backends.postgresql",
"NAME": "dbname",
"USER": "user",
"PASSWORD": "password",
"HOST": "localhost",
"PORT": "5432",
}
}
Multiple backends are supported, including Postgres, MySQL, and SQLite.
Querying
Migrations
Fields
Validation
Indexes and constraints
Managers
Forms
1"""
2The main QuerySet implementation. This provides the public API for the ORM.
3"""
4
5import copy
6import operator
7import warnings
8from itertools import chain, islice
9
10import plain.runtime
11from plain import exceptions
12from plain.exceptions import ValidationError
13from plain.models import (
14 sql,
15 transaction,
16)
17from plain.models.constants import LOOKUP_SEP, OnConflict
18from plain.models.db import (
19 PLAIN_VERSION_PICKLE_KEY,
20 IntegrityError,
21 NotSupportedError,
22 connections,
23 router,
24)
25from plain.models.expressions import Case, F, Value, When
26from plain.models.fields import (
27 AutoField,
28 DateField,
29 DateTimeField,
30 Field,
31)
32from plain.models.functions import Cast, Trunc
33from plain.models.query_utils import FilteredRelation, Q
34from plain.models.sql.constants import CURSOR, GET_ITERATOR_CHUNK_SIZE
35from plain.models.utils import (
36 AltersData,
37 create_namedtuple_class,
38 resolve_callables,
39)
40from plain.runtime import settings
41from plain.utils import timezone
42from plain.utils.functional import cached_property, partition
43
44# The maximum number of results to fetch in a get() query.
45MAX_GET_RESULTS = 21
46
47# The maximum number of items to display in a QuerySet.__repr__
48REPR_OUTPUT_SIZE = 20
49
50
51class BaseIterable:
52 def __init__(
53 self, queryset, chunked_fetch=False, chunk_size=GET_ITERATOR_CHUNK_SIZE
54 ):
55 self.queryset = queryset
56 self.chunked_fetch = chunked_fetch
57 self.chunk_size = chunk_size
58
59
60class ModelIterable(BaseIterable):
61 """Iterable that yields a model instance for each row."""
62
63 def __iter__(self):
64 queryset = self.queryset
65 db = queryset.db
66 compiler = queryset.query.get_compiler(using=db)
67 # Execute the query. This will also fill compiler.select, klass_info,
68 # and annotations.
69 results = compiler.execute_sql(
70 chunked_fetch=self.chunked_fetch, chunk_size=self.chunk_size
71 )
72 select, klass_info, annotation_col_map = (
73 compiler.select,
74 compiler.klass_info,
75 compiler.annotation_col_map,
76 )
77 model_cls = klass_info["model"]
78 select_fields = klass_info["select_fields"]
79 model_fields_start, model_fields_end = select_fields[0], select_fields[-1] + 1
80 init_list = [
81 f[0].target.attname for f in select[model_fields_start:model_fields_end]
82 ]
83 related_populators = get_related_populators(klass_info, select, db)
84 known_related_objects = [
85 (
86 field,
87 related_objs,
88 operator.attrgetter(
89 *[
90 field.attname
91 if from_field == "self"
92 else queryset.model._meta.get_field(from_field).attname
93 for from_field in field.from_fields
94 ]
95 ),
96 )
97 for field, related_objs in queryset._known_related_objects.items()
98 ]
99 for row in compiler.results_iter(results):
100 obj = model_cls.from_db(
101 db, init_list, row[model_fields_start:model_fields_end]
102 )
103 for rel_populator in related_populators:
104 rel_populator.populate(row, obj)
105 if annotation_col_map:
106 for attr_name, col_pos in annotation_col_map.items():
107 setattr(obj, attr_name, row[col_pos])
108
109 # Add the known related objects to the model.
110 for field, rel_objs, rel_getter in known_related_objects:
111 # Avoid overwriting objects loaded by, e.g., select_related().
112 if field.is_cached(obj):
113 continue
114 rel_obj_id = rel_getter(obj)
115 try:
116 rel_obj = rel_objs[rel_obj_id]
117 except KeyError:
118 pass # May happen in qs1 | qs2 scenarios.
119 else:
120 setattr(obj, field.name, rel_obj)
121
122 yield obj
123
124
125class RawModelIterable(BaseIterable):
126 """
127 Iterable that yields a model instance for each row from a raw queryset.
128 """
129
130 def __iter__(self):
131 # Cache some things for performance reasons outside the loop.
132 db = self.queryset.db
133 query = self.queryset.query
134 connection = connections[db]
135 compiler = connection.ops.compiler("SQLCompiler")(query, connection, db)
136 query_iterator = iter(query)
137
138 try:
139 (
140 model_init_names,
141 model_init_pos,
142 annotation_fields,
143 ) = self.queryset.resolve_model_init_order()
144 model_cls = self.queryset.model
145 if model_cls._meta.pk.attname not in model_init_names:
146 raise exceptions.FieldDoesNotExist(
147 "Raw query must include the primary key"
148 )
149 fields = [self.queryset.model_fields.get(c) for c in self.queryset.columns]
150 converters = compiler.get_converters(
151 [f.get_col(f.model._meta.db_table) if f else None for f in fields]
152 )
153 if converters:
154 query_iterator = compiler.apply_converters(query_iterator, converters)
155 for values in query_iterator:
156 # Associate fields to values
157 model_init_values = [values[pos] for pos in model_init_pos]
158 instance = model_cls.from_db(db, model_init_names, model_init_values)
159 if annotation_fields:
160 for column, pos in annotation_fields:
161 setattr(instance, column, values[pos])
162 yield instance
163 finally:
164 # Done iterating the Query. If it has its own cursor, close it.
165 if hasattr(query, "cursor") and query.cursor:
166 query.cursor.close()
167
168
169class ValuesIterable(BaseIterable):
170 """
171 Iterable returned by QuerySet.values() that yields a dict for each row.
172 """
173
174 def __iter__(self):
175 queryset = self.queryset
176 query = queryset.query
177 compiler = query.get_compiler(queryset.db)
178
179 # extra(select=...) cols are always at the start of the row.
180 names = [
181 *query.extra_select,
182 *query.values_select,
183 *query.annotation_select,
184 ]
185 indexes = range(len(names))
186 for row in compiler.results_iter(
187 chunked_fetch=self.chunked_fetch, chunk_size=self.chunk_size
188 ):
189 yield {names[i]: row[i] for i in indexes}
190
191
192class ValuesListIterable(BaseIterable):
193 """
194 Iterable returned by QuerySet.values_list(flat=False) that yields a tuple
195 for each row.
196 """
197
198 def __iter__(self):
199 queryset = self.queryset
200 query = queryset.query
201 compiler = query.get_compiler(queryset.db)
202
203 if queryset._fields:
204 # extra(select=...) cols are always at the start of the row.
205 names = [
206 *query.extra_select,
207 *query.values_select,
208 *query.annotation_select,
209 ]
210 fields = [
211 *queryset._fields,
212 *(f for f in query.annotation_select if f not in queryset._fields),
213 ]
214 if fields != names:
215 # Reorder according to fields.
216 index_map = {name: idx for idx, name in enumerate(names)}
217 rowfactory = operator.itemgetter(*[index_map[f] for f in fields])
218 return map(
219 rowfactory,
220 compiler.results_iter(
221 chunked_fetch=self.chunked_fetch, chunk_size=self.chunk_size
222 ),
223 )
224 return compiler.results_iter(
225 tuple_expected=True,
226 chunked_fetch=self.chunked_fetch,
227 chunk_size=self.chunk_size,
228 )
229
230
231class NamedValuesListIterable(ValuesListIterable):
232 """
233 Iterable returned by QuerySet.values_list(named=True) that yields a
234 namedtuple for each row.
235 """
236
237 def __iter__(self):
238 queryset = self.queryset
239 if queryset._fields:
240 names = queryset._fields
241 else:
242 query = queryset.query
243 names = [
244 *query.extra_select,
245 *query.values_select,
246 *query.annotation_select,
247 ]
248 tuple_class = create_namedtuple_class(*names)
249 new = tuple.__new__
250 for row in super().__iter__():
251 yield new(tuple_class, row)
252
253
254class FlatValuesListIterable(BaseIterable):
255 """
256 Iterable returned by QuerySet.values_list(flat=True) that yields single
257 values.
258 """
259
260 def __iter__(self):
261 queryset = self.queryset
262 compiler = queryset.query.get_compiler(queryset.db)
263 for row in compiler.results_iter(
264 chunked_fetch=self.chunked_fetch, chunk_size=self.chunk_size
265 ):
266 yield row[0]
267
268
269class QuerySet(AltersData):
270 """Represent a lazy database lookup for a set of objects."""
271
272 def __init__(self, model=None, query=None, using=None, hints=None):
273 self.model = model
274 self._db = using
275 self._hints = hints or {}
276 self._query = query or sql.Query(self.model)
277 self._result_cache = None
278 self._sticky_filter = False
279 self._for_write = False
280 self._prefetch_related_lookups = ()
281 self._prefetch_done = False
282 self._known_related_objects = {} # {rel_field: {pk: rel_obj}}
283 self._iterable_class = ModelIterable
284 self._fields = None
285 self._defer_next_filter = False
286 self._deferred_filter = None
287
288 @property
289 def query(self):
290 if self._deferred_filter:
291 negate, args, kwargs = self._deferred_filter
292 self._filter_or_exclude_inplace(negate, args, kwargs)
293 self._deferred_filter = None
294 return self._query
295
296 @query.setter
297 def query(self, value):
298 if value.values_select:
299 self._iterable_class = ValuesIterable
300 self._query = value
301
302 def as_manager(cls):
303 # Address the circular dependency between `Queryset` and `Manager`.
304 from plain.models.manager import Manager
305
306 manager = Manager.from_queryset(cls)()
307 manager._built_with_as_manager = True
308 return manager
309
310 as_manager.queryset_only = True
311 as_manager = classmethod(as_manager)
312
313 ########################
314 # PYTHON MAGIC METHODS #
315 ########################
316
317 def __deepcopy__(self, memo):
318 """Don't populate the QuerySet's cache."""
319 obj = self.__class__()
320 for k, v in self.__dict__.items():
321 if k == "_result_cache":
322 obj.__dict__[k] = None
323 else:
324 obj.__dict__[k] = copy.deepcopy(v, memo)
325 return obj
326
327 def __getstate__(self):
328 # Force the cache to be fully populated.
329 self._fetch_all()
330 return {**self.__dict__, PLAIN_VERSION_PICKLE_KEY: plain.runtime.__version__}
331
332 def __setstate__(self, state):
333 pickled_version = state.get(PLAIN_VERSION_PICKLE_KEY)
334 if pickled_version:
335 if pickled_version != plain.runtime.__version__:
336 warnings.warn(
337 "Pickled queryset instance's Plain version {} does not "
338 "match the current version {}.".format(
339 pickled_version, plain.runtime.__version__
340 ),
341 RuntimeWarning,
342 stacklevel=2,
343 )
344 else:
345 warnings.warn(
346 "Pickled queryset instance's Plain version is not specified.",
347 RuntimeWarning,
348 stacklevel=2,
349 )
350 self.__dict__.update(state)
351
352 def __repr__(self):
353 data = list(self[: REPR_OUTPUT_SIZE + 1])
354 if len(data) > REPR_OUTPUT_SIZE:
355 data[-1] = "...(remaining elements truncated)..."
356 return f"<{self.__class__.__name__} {data!r}>"
357
358 def __len__(self):
359 self._fetch_all()
360 return len(self._result_cache)
361
362 def __iter__(self):
363 """
364 The queryset iterator protocol uses three nested iterators in the
365 default case:
366 1. sql.compiler.execute_sql()
367 - Returns 100 rows at time (constants.GET_ITERATOR_CHUNK_SIZE)
368 using cursor.fetchmany(). This part is responsible for
369 doing some column masking, and returning the rows in chunks.
370 2. sql.compiler.results_iter()
371 - Returns one row at time. At this point the rows are still just
372 tuples. In some cases the return values are converted to
373 Python values at this location.
374 3. self.iterator()
375 - Responsible for turning the rows into model objects.
376 """
377 self._fetch_all()
378 return iter(self._result_cache)
379
380 def __bool__(self):
381 self._fetch_all()
382 return bool(self._result_cache)
383
384 def __getitem__(self, k):
385 """Retrieve an item or slice from the set of results."""
386 if not isinstance(k, int | slice):
387 raise TypeError(
388 "QuerySet indices must be integers or slices, not %s."
389 % type(k).__name__
390 )
391 if (isinstance(k, int) and k < 0) or (
392 isinstance(k, slice)
393 and (
394 (k.start is not None and k.start < 0)
395 or (k.stop is not None and k.stop < 0)
396 )
397 ):
398 raise ValueError("Negative indexing is not supported.")
399
400 if self._result_cache is not None:
401 return self._result_cache[k]
402
403 if isinstance(k, slice):
404 qs = self._chain()
405 if k.start is not None:
406 start = int(k.start)
407 else:
408 start = None
409 if k.stop is not None:
410 stop = int(k.stop)
411 else:
412 stop = None
413 qs.query.set_limits(start, stop)
414 return list(qs)[:: k.step] if k.step else qs
415
416 qs = self._chain()
417 qs.query.set_limits(k, k + 1)
418 qs._fetch_all()
419 return qs._result_cache[0]
420
421 def __class_getitem__(cls, *args, **kwargs):
422 return cls
423
424 def __and__(self, other):
425 self._check_operator_queryset(other, "&")
426 self._merge_sanity_check(other)
427 if isinstance(other, EmptyQuerySet):
428 return other
429 if isinstance(self, EmptyQuerySet):
430 return self
431 combined = self._chain()
432 combined._merge_known_related_objects(other)
433 combined.query.combine(other.query, sql.AND)
434 return combined
435
436 def __or__(self, other):
437 self._check_operator_queryset(other, "|")
438 self._merge_sanity_check(other)
439 if isinstance(self, EmptyQuerySet):
440 return other
441 if isinstance(other, EmptyQuerySet):
442 return self
443 query = (
444 self
445 if self.query.can_filter()
446 else self.model._base_manager.filter(pk__in=self.values("pk"))
447 )
448 combined = query._chain()
449 combined._merge_known_related_objects(other)
450 if not other.query.can_filter():
451 other = other.model._base_manager.filter(pk__in=other.values("pk"))
452 combined.query.combine(other.query, sql.OR)
453 return combined
454
455 def __xor__(self, other):
456 self._check_operator_queryset(other, "^")
457 self._merge_sanity_check(other)
458 if isinstance(self, EmptyQuerySet):
459 return other
460 if isinstance(other, EmptyQuerySet):
461 return self
462 query = (
463 self
464 if self.query.can_filter()
465 else self.model._base_manager.filter(pk__in=self.values("pk"))
466 )
467 combined = query._chain()
468 combined._merge_known_related_objects(other)
469 if not other.query.can_filter():
470 other = other.model._base_manager.filter(pk__in=other.values("pk"))
471 combined.query.combine(other.query, sql.XOR)
472 return combined
473
474 ####################################
475 # METHODS THAT DO DATABASE QUERIES #
476 ####################################
477
478 def _iterator(self, use_chunked_fetch, chunk_size):
479 iterable = self._iterable_class(
480 self,
481 chunked_fetch=use_chunked_fetch,
482 chunk_size=chunk_size or 2000,
483 )
484 if not self._prefetch_related_lookups or chunk_size is None:
485 yield from iterable
486 return
487
488 iterator = iter(iterable)
489 while results := list(islice(iterator, chunk_size)):
490 prefetch_related_objects(results, *self._prefetch_related_lookups)
491 yield from results
492
493 def iterator(self, chunk_size=None):
494 """
495 An iterator over the results from applying this QuerySet to the
496 database. chunk_size must be provided for QuerySets that prefetch
497 related objects. Otherwise, a default chunk_size of 2000 is supplied.
498 """
499 if chunk_size is None:
500 if self._prefetch_related_lookups:
501 raise ValueError(
502 "chunk_size must be provided when using QuerySet.iterator() after "
503 "prefetch_related()."
504 )
505 elif chunk_size <= 0:
506 raise ValueError("Chunk size must be strictly positive.")
507 use_chunked_fetch = not connections[self.db].settings_dict.get(
508 "DISABLE_SERVER_SIDE_CURSORS"
509 )
510 return self._iterator(use_chunked_fetch, chunk_size)
511
512 def aggregate(self, *args, **kwargs):
513 """
514 Return a dictionary containing the calculations (aggregation)
515 over the current queryset.
516
517 If args is present the expression is passed as a kwarg using
518 the Aggregate object's default alias.
519 """
520 if self.query.distinct_fields:
521 raise NotImplementedError("aggregate() + distinct(fields) not implemented.")
522 self._validate_values_are_expressions(
523 (*args, *kwargs.values()), method_name="aggregate"
524 )
525 for arg in args:
526 # The default_alias property raises TypeError if default_alias
527 # can't be set automatically or AttributeError if it isn't an
528 # attribute.
529 try:
530 arg.default_alias
531 except (AttributeError, TypeError):
532 raise TypeError("Complex aggregates require an alias")
533 kwargs[arg.default_alias] = arg
534
535 return self.query.chain().get_aggregation(self.db, kwargs)
536
537 def count(self):
538 """
539 Perform a SELECT COUNT() and return the number of records as an
540 integer.
541
542 If the QuerySet is already fully cached, return the length of the
543 cached results set to avoid multiple SELECT COUNT(*) calls.
544 """
545 if self._result_cache is not None:
546 return len(self._result_cache)
547
548 return self.query.get_count(using=self.db)
549
550 def get(self, *args, **kwargs):
551 """
552 Perform the query and return a single object matching the given
553 keyword arguments.
554 """
555 if self.query.combinator and (args or kwargs):
556 raise NotSupportedError(
557 "Calling QuerySet.get(...) with filters after %s() is not "
558 "supported." % self.query.combinator
559 )
560 clone = self._chain() if self.query.combinator else self.filter(*args, **kwargs)
561 if self.query.can_filter() and not self.query.distinct_fields:
562 clone = clone.order_by()
563 limit = None
564 if (
565 not clone.query.select_for_update
566 or connections[clone.db].features.supports_select_for_update_with_limit
567 ):
568 limit = MAX_GET_RESULTS
569 clone.query.set_limits(high=limit)
570 num = len(clone)
571 if num == 1:
572 return clone._result_cache[0]
573 if not num:
574 raise self.model.DoesNotExist(
575 "%s matching query does not exist." % self.model._meta.object_name
576 )
577 raise self.model.MultipleObjectsReturned(
578 "get() returned more than one {} -- it returned {}!".format(
579 self.model._meta.object_name,
580 num if not limit or num < limit else "more than %s" % (limit - 1),
581 )
582 )
583
584 def create(self, **kwargs):
585 """
586 Create a new object with the given kwargs, saving it to the database
587 and returning the created object.
588 """
589 obj = self.model(**kwargs)
590 self._for_write = True
591 obj.save(force_insert=True, using=self.db)
592 return obj
593
594 def _prepare_for_bulk_create(self, objs):
595 for obj in objs:
596 if obj.pk is None:
597 # Populate new PK values.
598 obj.pk = obj._meta.pk.get_pk_value_on_save(obj)
599 obj._prepare_related_fields_for_save(operation_name="bulk_create")
600
601 def _check_bulk_create_options(
602 self, ignore_conflicts, update_conflicts, update_fields, unique_fields
603 ):
604 if ignore_conflicts and update_conflicts:
605 raise ValueError(
606 "ignore_conflicts and update_conflicts are mutually exclusive."
607 )
608 db_features = connections[self.db].features
609 if ignore_conflicts:
610 if not db_features.supports_ignore_conflicts:
611 raise NotSupportedError(
612 "This database backend does not support ignoring conflicts."
613 )
614 return OnConflict.IGNORE
615 elif update_conflicts:
616 if not db_features.supports_update_conflicts:
617 raise NotSupportedError(
618 "This database backend does not support updating conflicts."
619 )
620 if not update_fields:
621 raise ValueError(
622 "Fields that will be updated when a row insertion fails "
623 "on conflicts must be provided."
624 )
625 if unique_fields and not db_features.supports_update_conflicts_with_target:
626 raise NotSupportedError(
627 "This database backend does not support updating "
628 "conflicts with specifying unique fields that can trigger "
629 "the upsert."
630 )
631 if not unique_fields and db_features.supports_update_conflicts_with_target:
632 raise ValueError(
633 "Unique fields that can trigger the upsert must be provided."
634 )
635 # Updating primary keys and non-concrete fields is forbidden.
636 if any(not f.concrete or f.many_to_many for f in update_fields):
637 raise ValueError(
638 "bulk_create() can only be used with concrete fields in "
639 "update_fields."
640 )
641 if any(f.primary_key for f in update_fields):
642 raise ValueError(
643 "bulk_create() cannot be used with primary keys in "
644 "update_fields."
645 )
646 if unique_fields:
647 if any(not f.concrete or f.many_to_many for f in unique_fields):
648 raise ValueError(
649 "bulk_create() can only be used with concrete fields "
650 "in unique_fields."
651 )
652 return OnConflict.UPDATE
653 return None
654
655 def bulk_create(
656 self,
657 objs,
658 batch_size=None,
659 ignore_conflicts=False,
660 update_conflicts=False,
661 update_fields=None,
662 unique_fields=None,
663 ):
664 """
665 Insert each of the instances into the database. Do *not* call
666 save() on each of the instances, do not send any pre/post_save
667 signals, and do not set the primary key attribute if it is an
668 autoincrement field (except if features.can_return_rows_from_bulk_insert=True).
669 Multi-table models are not supported.
670 """
671 # When you bulk insert you don't get the primary keys back (if it's an
672 # autoincrement, except if can_return_rows_from_bulk_insert=True), so
673 # you can't insert into the child tables which references this. There
674 # are two workarounds:
675 # 1) This could be implemented if you didn't have an autoincrement pk
676 # 2) You could do it by doing O(n) normal inserts into the parent
677 # tables to get the primary keys back and then doing a single bulk
678 # insert into the childmost table.
679 # We currently set the primary keys on the objects when using
680 # PostgreSQL via the RETURNING ID clause. It should be possible for
681 # Oracle as well, but the semantics for extracting the primary keys is
682 # trickier so it's not done yet.
683 if batch_size is not None and batch_size <= 0:
684 raise ValueError("Batch size must be a positive integer.")
685 # Check that the parents share the same concrete model with the our
686 # model to detect the inheritance pattern ConcreteGrandParent ->
687 # MultiTableParent -> ProxyChild. Simply checking self.model._meta.proxy
688 # would not identify that case as involving multiple tables.
689 for parent in self.model._meta.get_parent_list():
690 if parent._meta.concrete_model is not self.model._meta.concrete_model:
691 raise ValueError("Can't bulk create a multi-table inherited model")
692 if not objs:
693 return objs
694 opts = self.model._meta
695 if unique_fields:
696 # Primary key is allowed in unique_fields.
697 unique_fields = [
698 self.model._meta.get_field(opts.pk.name if name == "pk" else name)
699 for name in unique_fields
700 ]
701 if update_fields:
702 update_fields = [self.model._meta.get_field(name) for name in update_fields]
703 on_conflict = self._check_bulk_create_options(
704 ignore_conflicts,
705 update_conflicts,
706 update_fields,
707 unique_fields,
708 )
709 self._for_write = True
710 fields = opts.concrete_fields
711 objs = list(objs)
712 self._prepare_for_bulk_create(objs)
713 with transaction.atomic(using=self.db, savepoint=False):
714 objs_with_pk, objs_without_pk = partition(lambda o: o.pk is None, objs)
715 if objs_with_pk:
716 returned_columns = self._batched_insert(
717 objs_with_pk,
718 fields,
719 batch_size,
720 on_conflict=on_conflict,
721 update_fields=update_fields,
722 unique_fields=unique_fields,
723 )
724 for obj_with_pk, results in zip(objs_with_pk, returned_columns):
725 for result, field in zip(results, opts.db_returning_fields):
726 if field != opts.pk:
727 setattr(obj_with_pk, field.attname, result)
728 for obj_with_pk in objs_with_pk:
729 obj_with_pk._state.adding = False
730 obj_with_pk._state.db = self.db
731 if objs_without_pk:
732 fields = [f for f in fields if not isinstance(f, AutoField)]
733 returned_columns = self._batched_insert(
734 objs_without_pk,
735 fields,
736 batch_size,
737 on_conflict=on_conflict,
738 update_fields=update_fields,
739 unique_fields=unique_fields,
740 )
741 connection = connections[self.db]
742 if (
743 connection.features.can_return_rows_from_bulk_insert
744 and on_conflict is None
745 ):
746 assert len(returned_columns) == len(objs_without_pk)
747 for obj_without_pk, results in zip(objs_without_pk, returned_columns):
748 for result, field in zip(results, opts.db_returning_fields):
749 setattr(obj_without_pk, field.attname, result)
750 obj_without_pk._state.adding = False
751 obj_without_pk._state.db = self.db
752
753 return objs
754
755 def bulk_update(self, objs, fields, batch_size=None):
756 """
757 Update the given fields in each of the given objects in the database.
758 """
759 if batch_size is not None and batch_size <= 0:
760 raise ValueError("Batch size must be a positive integer.")
761 if not fields:
762 raise ValueError("Field names must be given to bulk_update().")
763 objs = tuple(objs)
764 if any(obj.pk is None for obj in objs):
765 raise ValueError("All bulk_update() objects must have a primary key set.")
766 fields = [self.model._meta.get_field(name) for name in fields]
767 if any(not f.concrete or f.many_to_many for f in fields):
768 raise ValueError("bulk_update() can only be used with concrete fields.")
769 if any(f.primary_key for f in fields):
770 raise ValueError("bulk_update() cannot be used with primary key fields.")
771 if not objs:
772 return 0
773 for obj in objs:
774 obj._prepare_related_fields_for_save(
775 operation_name="bulk_update", fields=fields
776 )
777 # PK is used twice in the resulting update query, once in the filter
778 # and once in the WHEN. Each field will also have one CAST.
779 self._for_write = True
780 connection = connections[self.db]
781 max_batch_size = connection.ops.bulk_batch_size(["pk", "pk"] + fields, objs)
782 batch_size = min(batch_size, max_batch_size) if batch_size else max_batch_size
783 requires_casting = connection.features.requires_casted_case_in_updates
784 batches = (objs[i : i + batch_size] for i in range(0, len(objs), batch_size))
785 updates = []
786 for batch_objs in batches:
787 update_kwargs = {}
788 for field in fields:
789 when_statements = []
790 for obj in batch_objs:
791 attr = getattr(obj, field.attname)
792 if not hasattr(attr, "resolve_expression"):
793 attr = Value(attr, output_field=field)
794 when_statements.append(When(pk=obj.pk, then=attr))
795 case_statement = Case(*when_statements, output_field=field)
796 if requires_casting:
797 case_statement = Cast(case_statement, output_field=field)
798 update_kwargs[field.attname] = case_statement
799 updates.append(([obj.pk for obj in batch_objs], update_kwargs))
800 rows_updated = 0
801 queryset = self.using(self.db)
802 with transaction.atomic(using=self.db, savepoint=False):
803 for pks, update_kwargs in updates:
804 rows_updated += queryset.filter(pk__in=pks).update(**update_kwargs)
805 return rows_updated
806
807 bulk_update.alters_data = True
808
809 def get_or_create(self, defaults=None, **kwargs):
810 """
811 Look up an object with the given kwargs, creating one if necessary.
812 Return a tuple of (object, created), where created is a boolean
813 specifying whether an object was created.
814 """
815 # The get() needs to be targeted at the write database in order
816 # to avoid potential transaction consistency problems.
817 self._for_write = True
818 try:
819 return self.get(**kwargs), False
820 except self.model.DoesNotExist:
821 params = self._extract_model_params(defaults, **kwargs)
822 # Try to create an object using passed params.
823 try:
824 with transaction.atomic(using=self.db):
825 params = dict(resolve_callables(params))
826 return self.create(**params), True
827 except (IntegrityError, ValidationError):
828 # Since create() also validates by default,
829 # we can get any kind of ValidationError here,
830 # or it can flow through and get an IntegrityError from the database.
831 # The main thing we're concerned about is uniqueness failures,
832 # but ValidationError could include other things too.
833 # In all cases though it should be fine to try the get() again
834 # and return an existing object.
835 try:
836 return self.get(**kwargs), False
837 except self.model.DoesNotExist:
838 pass
839 raise
840
841 def update_or_create(self, defaults=None, create_defaults=None, **kwargs):
842 """
843 Look up an object with the given kwargs, updating one with defaults
844 if it exists, otherwise create a new one. Optionally, an object can
845 be created with different values than defaults by using
846 create_defaults.
847 Return a tuple (object, created), where created is a boolean
848 specifying whether an object was created.
849 """
850 if create_defaults is None:
851 update_defaults = create_defaults = defaults or {}
852 else:
853 update_defaults = defaults or {}
854 self._for_write = True
855 with transaction.atomic(using=self.db):
856 # Lock the row so that a concurrent update is blocked until
857 # update_or_create() has performed its save.
858 obj, created = self.select_for_update().get_or_create(
859 create_defaults, **kwargs
860 )
861 if created:
862 return obj, created
863 for k, v in resolve_callables(update_defaults):
864 setattr(obj, k, v)
865
866 update_fields = set(update_defaults)
867 concrete_field_names = self.model._meta._non_pk_concrete_field_names
868 # update_fields does not support non-concrete fields.
869 if concrete_field_names.issuperset(update_fields):
870 # Add fields which are set on pre_save(), e.g. auto_now fields.
871 # This is to maintain backward compatibility as these fields
872 # are not updated unless explicitly specified in the
873 # update_fields list.
874 for field in self.model._meta.local_concrete_fields:
875 if not (
876 field.primary_key or field.__class__.pre_save is Field.pre_save
877 ):
878 update_fields.add(field.name)
879 if field.name != field.attname:
880 update_fields.add(field.attname)
881 obj.save(using=self.db, update_fields=update_fields)
882 else:
883 obj.save(using=self.db)
884 return obj, False
885
886 def _extract_model_params(self, defaults, **kwargs):
887 """
888 Prepare `params` for creating a model instance based on the given
889 kwargs; for use by get_or_create().
890 """
891 defaults = defaults or {}
892 params = {k: v for k, v in kwargs.items() if LOOKUP_SEP not in k}
893 params.update(defaults)
894 property_names = self.model._meta._property_names
895 invalid_params = []
896 for param in params:
897 try:
898 self.model._meta.get_field(param)
899 except exceptions.FieldDoesNotExist:
900 # It's okay to use a model's property if it has a setter.
901 if not (param in property_names and getattr(self.model, param).fset):
902 invalid_params.append(param)
903 if invalid_params:
904 raise exceptions.FieldError(
905 "Invalid field name(s) for model {}: '{}'.".format(
906 self.model._meta.object_name,
907 "', '".join(sorted(invalid_params)),
908 )
909 )
910 return params
911
912 def _earliest(self, *fields):
913 """
914 Return the earliest object according to fields (if given) or by the
915 model's Meta.get_latest_by.
916 """
917 if fields:
918 order_by = fields
919 else:
920 order_by = getattr(self.model._meta, "get_latest_by")
921 if order_by and not isinstance(order_by, tuple | list):
922 order_by = (order_by,)
923 if order_by is None:
924 raise ValueError(
925 "earliest() and latest() require either fields as positional "
926 "arguments or 'get_latest_by' in the model's Meta."
927 )
928 obj = self._chain()
929 obj.query.set_limits(high=1)
930 obj.query.clear_ordering(force=True)
931 obj.query.add_ordering(*order_by)
932 return obj.get()
933
934 def earliest(self, *fields):
935 if self.query.is_sliced:
936 raise TypeError("Cannot change a query once a slice has been taken.")
937 return self._earliest(*fields)
938
939 def latest(self, *fields):
940 """
941 Return the latest object according to fields (if given) or by the
942 model's Meta.get_latest_by.
943 """
944 if self.query.is_sliced:
945 raise TypeError("Cannot change a query once a slice has been taken.")
946 return self.reverse()._earliest(*fields)
947
948 def first(self):
949 """Return the first object of a query or None if no match is found."""
950 if self.ordered:
951 queryset = self
952 else:
953 self._check_ordering_first_last_queryset_aggregation(method="first")
954 queryset = self.order_by("pk")
955 for obj in queryset[:1]:
956 return obj
957
958 def last(self):
959 """Return the last object of a query or None if no match is found."""
960 if self.ordered:
961 queryset = self.reverse()
962 else:
963 self._check_ordering_first_last_queryset_aggregation(method="last")
964 queryset = self.order_by("-pk")
965 for obj in queryset[:1]:
966 return obj
967
968 def in_bulk(self, id_list=None, *, field_name="pk"):
969 """
970 Return a dictionary mapping each of the given IDs to the object with
971 that ID. If `id_list` isn't provided, evaluate the entire QuerySet.
972 """
973 if self.query.is_sliced:
974 raise TypeError("Cannot use 'limit' or 'offset' with in_bulk().")
975 opts = self.model._meta
976 unique_fields = [
977 constraint.fields[0]
978 for constraint in opts.total_unique_constraints
979 if len(constraint.fields) == 1
980 ]
981 if (
982 field_name != "pk"
983 and not opts.get_field(field_name).unique
984 and field_name not in unique_fields
985 and self.query.distinct_fields != (field_name,)
986 ):
987 raise ValueError(
988 "in_bulk()'s field_name must be a unique field but %r isn't."
989 % field_name
990 )
991 if id_list is not None:
992 if not id_list:
993 return {}
994 filter_key = f"{field_name}__in"
995 batch_size = connections[self.db].features.max_query_params
996 id_list = tuple(id_list)
997 # If the database has a limit on the number of query parameters
998 # (e.g. SQLite), retrieve objects in batches if necessary.
999 if batch_size and batch_size < len(id_list):
1000 qs = ()
1001 for offset in range(0, len(id_list), batch_size):
1002 batch = id_list[offset : offset + batch_size]
1003 qs += tuple(self.filter(**{filter_key: batch}))
1004 else:
1005 qs = self.filter(**{filter_key: id_list})
1006 else:
1007 qs = self._chain()
1008 return {getattr(obj, field_name): obj for obj in qs}
1009
1010 def delete(self):
1011 """Delete the records in the current QuerySet."""
1012 self._not_support_combined_queries("delete")
1013 if self.query.is_sliced:
1014 raise TypeError("Cannot use 'limit' or 'offset' with delete().")
1015 if self.query.distinct or self.query.distinct_fields:
1016 raise TypeError("Cannot call delete() after .distinct().")
1017 if self._fields is not None:
1018 raise TypeError("Cannot call delete() after .values() or .values_list()")
1019
1020 del_query = self._chain()
1021
1022 # The delete is actually 2 queries - one to find related objects,
1023 # and one to delete. Make sure that the discovery of related
1024 # objects is performed on the same database as the deletion.
1025 del_query._for_write = True
1026
1027 # Disable non-supported fields.
1028 del_query.query.select_for_update = False
1029 del_query.query.select_related = False
1030 del_query.query.clear_ordering(force=True)
1031
1032 from plain.models.deletion import Collector
1033
1034 collector = Collector(using=del_query.db, origin=self)
1035 collector.collect(del_query)
1036 deleted, _rows_count = collector.delete()
1037
1038 # Clear the result cache, in case this QuerySet gets reused.
1039 self._result_cache = None
1040 return deleted, _rows_count
1041
1042 delete.alters_data = True
1043 delete.queryset_only = True
1044
1045 def _raw_delete(self, using):
1046 """
1047 Delete objects found from the given queryset in single direct SQL
1048 query. No signals are sent and there is no protection for cascades.
1049 """
1050 query = self.query.clone()
1051 query.__class__ = sql.DeleteQuery
1052 cursor = query.get_compiler(using).execute_sql(CURSOR)
1053 if cursor:
1054 with cursor:
1055 return cursor.rowcount
1056 return 0
1057
1058 _raw_delete.alters_data = True
1059
1060 def update(self, **kwargs):
1061 """
1062 Update all elements in the current QuerySet, setting all the given
1063 fields to the appropriate values.
1064 """
1065 self._not_support_combined_queries("update")
1066 if self.query.is_sliced:
1067 raise TypeError("Cannot update a query once a slice has been taken.")
1068 self._for_write = True
1069 query = self.query.chain(sql.UpdateQuery)
1070 query.add_update_values(kwargs)
1071
1072 # Inline annotations in order_by(), if possible.
1073 new_order_by = []
1074 for col in query.order_by:
1075 alias = col
1076 descending = False
1077 if isinstance(alias, str) and alias.startswith("-"):
1078 alias = alias.removeprefix("-")
1079 descending = True
1080 if annotation := query.annotations.get(alias):
1081 if getattr(annotation, "contains_aggregate", False):
1082 raise exceptions.FieldError(
1083 f"Cannot update when ordering by an aggregate: {annotation}"
1084 )
1085 if descending:
1086 annotation = annotation.desc()
1087 new_order_by.append(annotation)
1088 else:
1089 new_order_by.append(col)
1090 query.order_by = tuple(new_order_by)
1091
1092 # Clear any annotations so that they won't be present in subqueries.
1093 query.annotations = {}
1094 with transaction.mark_for_rollback_on_error(using=self.db):
1095 rows = query.get_compiler(self.db).execute_sql(CURSOR)
1096 self._result_cache = None
1097 return rows
1098
1099 update.alters_data = True
1100
1101 def _update(self, values):
1102 """
1103 A version of update() that accepts field objects instead of field names.
1104 Used primarily for model saving and not intended for use by general
1105 code (it requires too much poking around at model internals to be
1106 useful at that level).
1107 """
1108 if self.query.is_sliced:
1109 raise TypeError("Cannot update a query once a slice has been taken.")
1110 query = self.query.chain(sql.UpdateQuery)
1111 query.add_update_fields(values)
1112 # Clear any annotations so that they won't be present in subqueries.
1113 query.annotations = {}
1114 self._result_cache = None
1115 return query.get_compiler(self.db).execute_sql(CURSOR)
1116
1117 _update.alters_data = True
1118 _update.queryset_only = False
1119
1120 def exists(self):
1121 """
1122 Return True if the QuerySet would have any results, False otherwise.
1123 """
1124 if self._result_cache is None:
1125 return self.query.has_results(using=self.db)
1126 return bool(self._result_cache)
1127
1128 def contains(self, obj):
1129 """
1130 Return True if the QuerySet contains the provided obj,
1131 False otherwise.
1132 """
1133 self._not_support_combined_queries("contains")
1134 if self._fields is not None:
1135 raise TypeError(
1136 "Cannot call QuerySet.contains() after .values() or .values_list()."
1137 )
1138 try:
1139 if obj._meta.concrete_model != self.model._meta.concrete_model:
1140 return False
1141 except AttributeError:
1142 raise TypeError("'obj' must be a model instance.")
1143 if obj.pk is None:
1144 raise ValueError("QuerySet.contains() cannot be used on unsaved objects.")
1145 if self._result_cache is not None:
1146 return obj in self._result_cache
1147 return self.filter(pk=obj.pk).exists()
1148
1149 def _prefetch_related_objects(self):
1150 # This method can only be called once the result cache has been filled.
1151 prefetch_related_objects(self._result_cache, *self._prefetch_related_lookups)
1152 self._prefetch_done = True
1153
1154 def explain(self, *, format=None, **options):
1155 """
1156 Runs an EXPLAIN on the SQL query this QuerySet would perform, and
1157 returns the results.
1158 """
1159 return self.query.explain(using=self.db, format=format, **options)
1160
1161 ##################################################
1162 # PUBLIC METHODS THAT RETURN A QUERYSET SUBCLASS #
1163 ##################################################
1164
1165 def raw(self, raw_query, params=(), translations=None, using=None):
1166 if using is None:
1167 using = self.db
1168 qs = RawQuerySet(
1169 raw_query,
1170 model=self.model,
1171 params=params,
1172 translations=translations,
1173 using=using,
1174 )
1175 qs._prefetch_related_lookups = self._prefetch_related_lookups[:]
1176 return qs
1177
1178 def _values(self, *fields, **expressions):
1179 clone = self._chain()
1180 if expressions:
1181 clone = clone.annotate(**expressions)
1182 clone._fields = fields
1183 clone.query.set_values(fields)
1184 return clone
1185
1186 def values(self, *fields, **expressions):
1187 fields += tuple(expressions)
1188 clone = self._values(*fields, **expressions)
1189 clone._iterable_class = ValuesIterable
1190 return clone
1191
1192 def values_list(self, *fields, flat=False, named=False):
1193 if flat and named:
1194 raise TypeError("'flat' and 'named' can't be used together.")
1195 if flat and len(fields) > 1:
1196 raise TypeError(
1197 "'flat' is not valid when values_list is called with more than one "
1198 "field."
1199 )
1200
1201 field_names = {f for f in fields if not hasattr(f, "resolve_expression")}
1202 _fields = []
1203 expressions = {}
1204 counter = 1
1205 for field in fields:
1206 if hasattr(field, "resolve_expression"):
1207 field_id_prefix = getattr(
1208 field, "default_alias", field.__class__.__name__.lower()
1209 )
1210 while True:
1211 field_id = field_id_prefix + str(counter)
1212 counter += 1
1213 if field_id not in field_names:
1214 break
1215 expressions[field_id] = field
1216 _fields.append(field_id)
1217 else:
1218 _fields.append(field)
1219
1220 clone = self._values(*_fields, **expressions)
1221 clone._iterable_class = (
1222 NamedValuesListIterable
1223 if named
1224 else FlatValuesListIterable
1225 if flat
1226 else ValuesListIterable
1227 )
1228 return clone
1229
1230 def dates(self, field_name, kind, order="ASC"):
1231 """
1232 Return a list of date objects representing all available dates for
1233 the given field_name, scoped to 'kind'.
1234 """
1235 if kind not in ("year", "month", "week", "day"):
1236 raise ValueError("'kind' must be one of 'year', 'month', 'week', or 'day'.")
1237 if order not in ("ASC", "DESC"):
1238 raise ValueError("'order' must be either 'ASC' or 'DESC'.")
1239 return (
1240 self.annotate(
1241 datefield=Trunc(field_name, kind, output_field=DateField()),
1242 plain_field=F(field_name),
1243 )
1244 .values_list("datefield", flat=True)
1245 .distinct()
1246 .filter(plain_field__isnull=False)
1247 .order_by(("-" if order == "DESC" else "") + "datefield")
1248 )
1249
1250 def datetimes(self, field_name, kind, order="ASC", tzinfo=None):
1251 """
1252 Return a list of datetime objects representing all available
1253 datetimes for the given field_name, scoped to 'kind'.
1254 """
1255 if kind not in ("year", "month", "week", "day", "hour", "minute", "second"):
1256 raise ValueError(
1257 "'kind' must be one of 'year', 'month', 'week', 'day', "
1258 "'hour', 'minute', or 'second'."
1259 )
1260 if order not in ("ASC", "DESC"):
1261 raise ValueError("'order' must be either 'ASC' or 'DESC'.")
1262 if settings.USE_TZ:
1263 if tzinfo is None:
1264 tzinfo = timezone.get_current_timezone()
1265 else:
1266 tzinfo = None
1267 return (
1268 self.annotate(
1269 datetimefield=Trunc(
1270 field_name,
1271 kind,
1272 output_field=DateTimeField(),
1273 tzinfo=tzinfo,
1274 ),
1275 plain_field=F(field_name),
1276 )
1277 .values_list("datetimefield", flat=True)
1278 .distinct()
1279 .filter(plain_field__isnull=False)
1280 .order_by(("-" if order == "DESC" else "") + "datetimefield")
1281 )
1282
1283 def none(self):
1284 """Return an empty QuerySet."""
1285 clone = self._chain()
1286 clone.query.set_empty()
1287 return clone
1288
1289 ##################################################################
1290 # PUBLIC METHODS THAT ALTER ATTRIBUTES AND RETURN A NEW QUERYSET #
1291 ##################################################################
1292
1293 def all(self):
1294 """
1295 Return a new QuerySet that is a copy of the current one. This allows a
1296 QuerySet to proxy for a model manager in some cases.
1297 """
1298 return self._chain()
1299
1300 def filter(self, *args, **kwargs):
1301 """
1302 Return a new QuerySet instance with the args ANDed to the existing
1303 set.
1304 """
1305 self._not_support_combined_queries("filter")
1306 return self._filter_or_exclude(False, args, kwargs)
1307
1308 def exclude(self, *args, **kwargs):
1309 """
1310 Return a new QuerySet instance with NOT (args) ANDed to the existing
1311 set.
1312 """
1313 self._not_support_combined_queries("exclude")
1314 return self._filter_or_exclude(True, args, kwargs)
1315
1316 def _filter_or_exclude(self, negate, args, kwargs):
1317 if (args or kwargs) and self.query.is_sliced:
1318 raise TypeError("Cannot filter a query once a slice has been taken.")
1319 clone = self._chain()
1320 if self._defer_next_filter:
1321 self._defer_next_filter = False
1322 clone._deferred_filter = negate, args, kwargs
1323 else:
1324 clone._filter_or_exclude_inplace(negate, args, kwargs)
1325 return clone
1326
1327 def _filter_or_exclude_inplace(self, negate, args, kwargs):
1328 if negate:
1329 self._query.add_q(~Q(*args, **kwargs))
1330 else:
1331 self._query.add_q(Q(*args, **kwargs))
1332
1333 def complex_filter(self, filter_obj):
1334 """
1335 Return a new QuerySet instance with filter_obj added to the filters.
1336
1337 filter_obj can be a Q object or a dictionary of keyword lookup
1338 arguments.
1339
1340 This exists to support framework features such as 'limit_choices_to',
1341 and usually it will be more natural to use other methods.
1342 """
1343 if isinstance(filter_obj, Q):
1344 clone = self._chain()
1345 clone.query.add_q(filter_obj)
1346 return clone
1347 else:
1348 return self._filter_or_exclude(False, args=(), kwargs=filter_obj)
1349
1350 def _combinator_query(self, combinator, *other_qs, all=False):
1351 # Clone the query to inherit the select list and everything
1352 clone = self._chain()
1353 # Clear limits and ordering so they can be reapplied
1354 clone.query.clear_ordering(force=True)
1355 clone.query.clear_limits()
1356 clone.query.combined_queries = (self.query,) + tuple(
1357 qs.query for qs in other_qs
1358 )
1359 clone.query.combinator = combinator
1360 clone.query.combinator_all = all
1361 return clone
1362
1363 def union(self, *other_qs, all=False):
1364 # If the query is an EmptyQuerySet, combine all nonempty querysets.
1365 if isinstance(self, EmptyQuerySet):
1366 qs = [q for q in other_qs if not isinstance(q, EmptyQuerySet)]
1367 if not qs:
1368 return self
1369 if len(qs) == 1:
1370 return qs[0]
1371 return qs[0]._combinator_query("union", *qs[1:], all=all)
1372 return self._combinator_query("union", *other_qs, all=all)
1373
1374 def intersection(self, *other_qs):
1375 # If any query is an EmptyQuerySet, return it.
1376 if isinstance(self, EmptyQuerySet):
1377 return self
1378 for other in other_qs:
1379 if isinstance(other, EmptyQuerySet):
1380 return other
1381 return self._combinator_query("intersection", *other_qs)
1382
1383 def difference(self, *other_qs):
1384 # If the query is an EmptyQuerySet, return it.
1385 if isinstance(self, EmptyQuerySet):
1386 return self
1387 return self._combinator_query("difference", *other_qs)
1388
1389 def select_for_update(self, nowait=False, skip_locked=False, of=(), no_key=False):
1390 """
1391 Return a new QuerySet instance that will select objects with a
1392 FOR UPDATE lock.
1393 """
1394 if nowait and skip_locked:
1395 raise ValueError("The nowait option cannot be used with skip_locked.")
1396 obj = self._chain()
1397 obj._for_write = True
1398 obj.query.select_for_update = True
1399 obj.query.select_for_update_nowait = nowait
1400 obj.query.select_for_update_skip_locked = skip_locked
1401 obj.query.select_for_update_of = of
1402 obj.query.select_for_no_key_update = no_key
1403 return obj
1404
1405 def select_related(self, *fields):
1406 """
1407 Return a new QuerySet instance that will select related objects.
1408
1409 If fields are specified, they must be ForeignKey fields and only those
1410 related objects are included in the selection.
1411
1412 If select_related(None) is called, clear the list.
1413 """
1414 self._not_support_combined_queries("select_related")
1415 if self._fields is not None:
1416 raise TypeError(
1417 "Cannot call select_related() after .values() or .values_list()"
1418 )
1419
1420 obj = self._chain()
1421 if fields == (None,):
1422 obj.query.select_related = False
1423 elif fields:
1424 obj.query.add_select_related(fields)
1425 else:
1426 obj.query.select_related = True
1427 return obj
1428
1429 def prefetch_related(self, *lookups):
1430 """
1431 Return a new QuerySet instance that will prefetch the specified
1432 Many-To-One and Many-To-Many related objects when the QuerySet is
1433 evaluated.
1434
1435 When prefetch_related() is called more than once, append to the list of
1436 prefetch lookups. If prefetch_related(None) is called, clear the list.
1437 """
1438 self._not_support_combined_queries("prefetch_related")
1439 clone = self._chain()
1440 if lookups == (None,):
1441 clone._prefetch_related_lookups = ()
1442 else:
1443 for lookup in lookups:
1444 if isinstance(lookup, Prefetch):
1445 lookup = lookup.prefetch_to
1446 lookup = lookup.split(LOOKUP_SEP, 1)[0]
1447 if lookup in self.query._filtered_relations:
1448 raise ValueError(
1449 "prefetch_related() is not supported with FilteredRelation."
1450 )
1451 clone._prefetch_related_lookups = clone._prefetch_related_lookups + lookups
1452 return clone
1453
1454 def annotate(self, *args, **kwargs):
1455 """
1456 Return a query set in which the returned objects have been annotated
1457 with extra data or aggregations.
1458 """
1459 self._not_support_combined_queries("annotate")
1460 return self._annotate(args, kwargs, select=True)
1461
1462 def alias(self, *args, **kwargs):
1463 """
1464 Return a query set with added aliases for extra data or aggregations.
1465 """
1466 self._not_support_combined_queries("alias")
1467 return self._annotate(args, kwargs, select=False)
1468
1469 def _annotate(self, args, kwargs, select=True):
1470 self._validate_values_are_expressions(
1471 args + tuple(kwargs.values()), method_name="annotate"
1472 )
1473 annotations = {}
1474 for arg in args:
1475 # The default_alias property may raise a TypeError.
1476 try:
1477 if arg.default_alias in kwargs:
1478 raise ValueError(
1479 "The named annotation '%s' conflicts with the "
1480 "default name for another annotation." % arg.default_alias
1481 )
1482 except TypeError:
1483 raise TypeError("Complex annotations require an alias")
1484 annotations[arg.default_alias] = arg
1485 annotations.update(kwargs)
1486
1487 clone = self._chain()
1488 names = self._fields
1489 if names is None:
1490 names = set(
1491 chain.from_iterable(
1492 (field.name, field.attname)
1493 if hasattr(field, "attname")
1494 else (field.name,)
1495 for field in self.model._meta.get_fields()
1496 )
1497 )
1498
1499 for alias, annotation in annotations.items():
1500 if alias in names:
1501 raise ValueError(
1502 "The annotation '%s' conflicts with a field on "
1503 "the model." % alias
1504 )
1505 if isinstance(annotation, FilteredRelation):
1506 clone.query.add_filtered_relation(annotation, alias)
1507 else:
1508 clone.query.add_annotation(
1509 annotation,
1510 alias,
1511 select=select,
1512 )
1513 for alias, annotation in clone.query.annotations.items():
1514 if alias in annotations and annotation.contains_aggregate:
1515 if clone._fields is None:
1516 clone.query.group_by = True
1517 else:
1518 clone.query.set_group_by()
1519 break
1520
1521 return clone
1522
1523 def order_by(self, *field_names):
1524 """Return a new QuerySet instance with the ordering changed."""
1525 if self.query.is_sliced:
1526 raise TypeError("Cannot reorder a query once a slice has been taken.")
1527 obj = self._chain()
1528 obj.query.clear_ordering(force=True, clear_default=False)
1529 obj.query.add_ordering(*field_names)
1530 return obj
1531
1532 def distinct(self, *field_names):
1533 """
1534 Return a new QuerySet instance that will select only distinct results.
1535 """
1536 self._not_support_combined_queries("distinct")
1537 if self.query.is_sliced:
1538 raise TypeError(
1539 "Cannot create distinct fields once a slice has been taken."
1540 )
1541 obj = self._chain()
1542 obj.query.add_distinct_fields(*field_names)
1543 return obj
1544
1545 def extra(
1546 self,
1547 select=None,
1548 where=None,
1549 params=None,
1550 tables=None,
1551 order_by=None,
1552 select_params=None,
1553 ):
1554 """Add extra SQL fragments to the query."""
1555 self._not_support_combined_queries("extra")
1556 if self.query.is_sliced:
1557 raise TypeError("Cannot change a query once a slice has been taken.")
1558 clone = self._chain()
1559 clone.query.add_extra(select, select_params, where, params, tables, order_by)
1560 return clone
1561
1562 def reverse(self):
1563 """Reverse the ordering of the QuerySet."""
1564 if self.query.is_sliced:
1565 raise TypeError("Cannot reverse a query once a slice has been taken.")
1566 clone = self._chain()
1567 clone.query.standard_ordering = not clone.query.standard_ordering
1568 return clone
1569
1570 def defer(self, *fields):
1571 """
1572 Defer the loading of data for certain fields until they are accessed.
1573 Add the set of deferred fields to any existing set of deferred fields.
1574 The only exception to this is if None is passed in as the only
1575 parameter, in which case removal all deferrals.
1576 """
1577 self._not_support_combined_queries("defer")
1578 if self._fields is not None:
1579 raise TypeError("Cannot call defer() after .values() or .values_list()")
1580 clone = self._chain()
1581 if fields == (None,):
1582 clone.query.clear_deferred_loading()
1583 else:
1584 clone.query.add_deferred_loading(fields)
1585 return clone
1586
1587 def only(self, *fields):
1588 """
1589 Essentially, the opposite of defer(). Only the fields passed into this
1590 method and that are not already specified as deferred are loaded
1591 immediately when the queryset is evaluated.
1592 """
1593 self._not_support_combined_queries("only")
1594 if self._fields is not None:
1595 raise TypeError("Cannot call only() after .values() or .values_list()")
1596 if fields == (None,):
1597 # Can only pass None to defer(), not only(), as the rest option.
1598 # That won't stop people trying to do this, so let's be explicit.
1599 raise TypeError("Cannot pass None as an argument to only().")
1600 for field in fields:
1601 field = field.split(LOOKUP_SEP, 1)[0]
1602 if field in self.query._filtered_relations:
1603 raise ValueError("only() is not supported with FilteredRelation.")
1604 clone = self._chain()
1605 clone.query.add_immediate_loading(fields)
1606 return clone
1607
1608 def using(self, alias):
1609 """Select which database this QuerySet should execute against."""
1610 clone = self._chain()
1611 clone._db = alias
1612 return clone
1613
1614 ###################################
1615 # PUBLIC INTROSPECTION ATTRIBUTES #
1616 ###################################
1617
1618 @property
1619 def ordered(self):
1620 """
1621 Return True if the QuerySet is ordered -- i.e. has an order_by()
1622 clause or a default ordering on the model (or is empty).
1623 """
1624 if isinstance(self, EmptyQuerySet):
1625 return True
1626 if self.query.extra_order_by or self.query.order_by:
1627 return True
1628 elif (
1629 self.query.default_ordering
1630 and self.query.get_meta().ordering
1631 and
1632 # A default ordering doesn't affect GROUP BY queries.
1633 not self.query.group_by
1634 ):
1635 return True
1636 else:
1637 return False
1638
1639 @property
1640 def db(self):
1641 """Return the database used if this query is executed now."""
1642 if self._for_write:
1643 return self._db or router.db_for_write(self.model, **self._hints)
1644 return self._db or router.db_for_read(self.model, **self._hints)
1645
1646 ###################
1647 # PRIVATE METHODS #
1648 ###################
1649
1650 def _insert(
1651 self,
1652 objs,
1653 fields,
1654 returning_fields=None,
1655 raw=False,
1656 using=None,
1657 on_conflict=None,
1658 update_fields=None,
1659 unique_fields=None,
1660 ):
1661 """
1662 Insert a new record for the given model. This provides an interface to
1663 the InsertQuery class and is how Model.save() is implemented.
1664 """
1665 self._for_write = True
1666 if using is None:
1667 using = self.db
1668 query = sql.InsertQuery(
1669 self.model,
1670 on_conflict=on_conflict,
1671 update_fields=update_fields,
1672 unique_fields=unique_fields,
1673 )
1674 query.insert_values(fields, objs, raw=raw)
1675 return query.get_compiler(using=using).execute_sql(returning_fields)
1676
1677 _insert.alters_data = True
1678 _insert.queryset_only = False
1679
1680 def _batched_insert(
1681 self,
1682 objs,
1683 fields,
1684 batch_size,
1685 on_conflict=None,
1686 update_fields=None,
1687 unique_fields=None,
1688 ):
1689 """
1690 Helper method for bulk_create() to insert objs one batch at a time.
1691 """
1692 connection = connections[self.db]
1693 ops = connection.ops
1694 max_batch_size = max(ops.bulk_batch_size(fields, objs), 1)
1695 batch_size = min(batch_size, max_batch_size) if batch_size else max_batch_size
1696 inserted_rows = []
1697 bulk_return = connection.features.can_return_rows_from_bulk_insert
1698 for item in [objs[i : i + batch_size] for i in range(0, len(objs), batch_size)]:
1699 if bulk_return and on_conflict is None:
1700 inserted_rows.extend(
1701 self._insert(
1702 item,
1703 fields=fields,
1704 using=self.db,
1705 returning_fields=self.model._meta.db_returning_fields,
1706 )
1707 )
1708 else:
1709 self._insert(
1710 item,
1711 fields=fields,
1712 using=self.db,
1713 on_conflict=on_conflict,
1714 update_fields=update_fields,
1715 unique_fields=unique_fields,
1716 )
1717 return inserted_rows
1718
1719 def _chain(self):
1720 """
1721 Return a copy of the current QuerySet that's ready for another
1722 operation.
1723 """
1724 obj = self._clone()
1725 if obj._sticky_filter:
1726 obj.query.filter_is_sticky = True
1727 obj._sticky_filter = False
1728 return obj
1729
1730 def _clone(self):
1731 """
1732 Return a copy of the current QuerySet. A lightweight alternative
1733 to deepcopy().
1734 """
1735 c = self.__class__(
1736 model=self.model,
1737 query=self.query.chain(),
1738 using=self._db,
1739 hints=self._hints,
1740 )
1741 c._sticky_filter = self._sticky_filter
1742 c._for_write = self._for_write
1743 c._prefetch_related_lookups = self._prefetch_related_lookups[:]
1744 c._known_related_objects = self._known_related_objects
1745 c._iterable_class = self._iterable_class
1746 c._fields = self._fields
1747 return c
1748
1749 def _fetch_all(self):
1750 if self._result_cache is None:
1751 self._result_cache = list(self._iterable_class(self))
1752 if self._prefetch_related_lookups and not self._prefetch_done:
1753 self._prefetch_related_objects()
1754
1755 def _next_is_sticky(self):
1756 """
1757 Indicate that the next filter call and the one following that should
1758 be treated as a single filter. This is only important when it comes to
1759 determining when to reuse tables for many-to-many filters. Required so
1760 that we can filter naturally on the results of related managers.
1761
1762 This doesn't return a clone of the current QuerySet (it returns
1763 "self"). The method is only used internally and should be immediately
1764 followed by a filter() that does create a clone.
1765 """
1766 self._sticky_filter = True
1767 return self
1768
1769 def _merge_sanity_check(self, other):
1770 """Check that two QuerySet classes may be merged."""
1771 if self._fields is not None and (
1772 set(self.query.values_select) != set(other.query.values_select)
1773 or set(self.query.extra_select) != set(other.query.extra_select)
1774 or set(self.query.annotation_select) != set(other.query.annotation_select)
1775 ):
1776 raise TypeError(
1777 "Merging '%s' classes must involve the same values in each case."
1778 % self.__class__.__name__
1779 )
1780
1781 def _merge_known_related_objects(self, other):
1782 """
1783 Keep track of all known related objects from either QuerySet instance.
1784 """
1785 for field, objects in other._known_related_objects.items():
1786 self._known_related_objects.setdefault(field, {}).update(objects)
1787
1788 def resolve_expression(self, *args, **kwargs):
1789 if self._fields and len(self._fields) > 1:
1790 # values() queryset can only be used as nested queries
1791 # if they are set up to select only a single field.
1792 raise TypeError("Cannot use multi-field values as a filter value.")
1793 query = self.query.resolve_expression(*args, **kwargs)
1794 query._db = self._db
1795 return query
1796
1797 resolve_expression.queryset_only = True
1798
1799 def _add_hints(self, **hints):
1800 """
1801 Update hinting information for use by routers. Add new key/values or
1802 overwrite existing key/values.
1803 """
1804 self._hints.update(hints)
1805
1806 def _has_filters(self):
1807 """
1808 Check if this QuerySet has any filtering going on. This isn't
1809 equivalent with checking if all objects are present in results, for
1810 example, qs[1:]._has_filters() -> False.
1811 """
1812 return self.query.has_filters()
1813
1814 @staticmethod
1815 def _validate_values_are_expressions(values, method_name):
1816 invalid_args = sorted(
1817 str(arg) for arg in values if not hasattr(arg, "resolve_expression")
1818 )
1819 if invalid_args:
1820 raise TypeError(
1821 "QuerySet.{}() received non-expression(s): {}.".format(
1822 method_name,
1823 ", ".join(invalid_args),
1824 )
1825 )
1826
1827 def _not_support_combined_queries(self, operation_name):
1828 if self.query.combinator:
1829 raise NotSupportedError(
1830 "Calling QuerySet.{}() after {}() is not supported.".format(
1831 operation_name, self.query.combinator
1832 )
1833 )
1834
1835 def _check_operator_queryset(self, other, operator_):
1836 if self.query.combinator or other.query.combinator:
1837 raise TypeError(f"Cannot use {operator_} operator with combined queryset.")
1838
1839 def _check_ordering_first_last_queryset_aggregation(self, method):
1840 if isinstance(self.query.group_by, tuple) and not any(
1841 col.output_field is self.model._meta.pk for col in self.query.group_by
1842 ):
1843 raise TypeError(
1844 f"Cannot use QuerySet.{method}() on an unordered queryset performing "
1845 f"aggregation. Add an ordering with order_by()."
1846 )
1847
1848
1849class InstanceCheckMeta(type):
1850 def __instancecheck__(self, instance):
1851 return isinstance(instance, QuerySet) and instance.query.is_empty()
1852
1853
1854class EmptyQuerySet(metaclass=InstanceCheckMeta):
1855 """
1856 Marker class to checking if a queryset is empty by .none():
1857 isinstance(qs.none(), EmptyQuerySet) -> True
1858 """
1859
1860 def __init__(self, *args, **kwargs):
1861 raise TypeError("EmptyQuerySet can't be instantiated")
1862
1863
1864class RawQuerySet:
1865 """
1866 Provide an iterator which converts the results of raw SQL queries into
1867 annotated model instances.
1868 """
1869
1870 def __init__(
1871 self,
1872 raw_query,
1873 model=None,
1874 query=None,
1875 params=(),
1876 translations=None,
1877 using=None,
1878 hints=None,
1879 ):
1880 self.raw_query = raw_query
1881 self.model = model
1882 self._db = using
1883 self._hints = hints or {}
1884 self.query = query or sql.RawQuery(sql=raw_query, using=self.db, params=params)
1885 self.params = params
1886 self.translations = translations or {}
1887 self._result_cache = None
1888 self._prefetch_related_lookups = ()
1889 self._prefetch_done = False
1890
1891 def resolve_model_init_order(self):
1892 """Resolve the init field names and value positions."""
1893 converter = connections[self.db].introspection.identifier_converter
1894 model_init_fields = [
1895 f for f in self.model._meta.fields if converter(f.column) in self.columns
1896 ]
1897 annotation_fields = [
1898 (column, pos)
1899 for pos, column in enumerate(self.columns)
1900 if column not in self.model_fields
1901 ]
1902 model_init_order = [
1903 self.columns.index(converter(f.column)) for f in model_init_fields
1904 ]
1905 model_init_names = [f.attname for f in model_init_fields]
1906 return model_init_names, model_init_order, annotation_fields
1907
1908 def prefetch_related(self, *lookups):
1909 """Same as QuerySet.prefetch_related()"""
1910 clone = self._clone()
1911 if lookups == (None,):
1912 clone._prefetch_related_lookups = ()
1913 else:
1914 clone._prefetch_related_lookups = clone._prefetch_related_lookups + lookups
1915 return clone
1916
1917 def _prefetch_related_objects(self):
1918 prefetch_related_objects(self._result_cache, *self._prefetch_related_lookups)
1919 self._prefetch_done = True
1920
1921 def _clone(self):
1922 """Same as QuerySet._clone()"""
1923 c = self.__class__(
1924 self.raw_query,
1925 model=self.model,
1926 query=self.query,
1927 params=self.params,
1928 translations=self.translations,
1929 using=self._db,
1930 hints=self._hints,
1931 )
1932 c._prefetch_related_lookups = self._prefetch_related_lookups[:]
1933 return c
1934
1935 def _fetch_all(self):
1936 if self._result_cache is None:
1937 self._result_cache = list(self.iterator())
1938 if self._prefetch_related_lookups and not self._prefetch_done:
1939 self._prefetch_related_objects()
1940
1941 def __len__(self):
1942 self._fetch_all()
1943 return len(self._result_cache)
1944
1945 def __bool__(self):
1946 self._fetch_all()
1947 return bool(self._result_cache)
1948
1949 def __iter__(self):
1950 self._fetch_all()
1951 return iter(self._result_cache)
1952
1953 def iterator(self):
1954 yield from RawModelIterable(self)
1955
1956 def __repr__(self):
1957 return f"<{self.__class__.__name__}: {self.query}>"
1958
1959 def __getitem__(self, k):
1960 return list(self)[k]
1961
1962 @property
1963 def db(self):
1964 """Return the database used if this query is executed now."""
1965 return self._db or router.db_for_read(self.model, **self._hints)
1966
1967 def using(self, alias):
1968 """Select the database this RawQuerySet should execute against."""
1969 return RawQuerySet(
1970 self.raw_query,
1971 model=self.model,
1972 query=self.query.chain(using=alias),
1973 params=self.params,
1974 translations=self.translations,
1975 using=alias,
1976 )
1977
1978 @cached_property
1979 def columns(self):
1980 """
1981 A list of model field names in the order they'll appear in the
1982 query results.
1983 """
1984 columns = self.query.get_columns()
1985 # Adjust any column names which don't match field names
1986 for query_name, model_name in self.translations.items():
1987 # Ignore translations for nonexistent column names
1988 try:
1989 index = columns.index(query_name)
1990 except ValueError:
1991 pass
1992 else:
1993 columns[index] = model_name
1994 return columns
1995
1996 @cached_property
1997 def model_fields(self):
1998 """A dict mapping column names to model field names."""
1999 converter = connections[self.db].introspection.identifier_converter
2000 model_fields = {}
2001 for field in self.model._meta.fields:
2002 name, column = field.get_attname_column()
2003 model_fields[converter(column)] = field
2004 return model_fields
2005
2006
2007class Prefetch:
2008 def __init__(self, lookup, queryset=None, to_attr=None):
2009 # `prefetch_through` is the path we traverse to perform the prefetch.
2010 self.prefetch_through = lookup
2011 # `prefetch_to` is the path to the attribute that stores the result.
2012 self.prefetch_to = lookup
2013 if queryset is not None and (
2014 isinstance(queryset, RawQuerySet)
2015 or (
2016 hasattr(queryset, "_iterable_class")
2017 and not issubclass(queryset._iterable_class, ModelIterable)
2018 )
2019 ):
2020 raise ValueError(
2021 "Prefetch querysets cannot use raw(), values(), and values_list()."
2022 )
2023 if to_attr:
2024 self.prefetch_to = LOOKUP_SEP.join(
2025 lookup.split(LOOKUP_SEP)[:-1] + [to_attr]
2026 )
2027
2028 self.queryset = queryset
2029 self.to_attr = to_attr
2030
2031 def __getstate__(self):
2032 obj_dict = self.__dict__.copy()
2033 if self.queryset is not None:
2034 queryset = self.queryset._chain()
2035 # Prevent the QuerySet from being evaluated
2036 queryset._result_cache = []
2037 queryset._prefetch_done = True
2038 obj_dict["queryset"] = queryset
2039 return obj_dict
2040
2041 def add_prefix(self, prefix):
2042 self.prefetch_through = prefix + LOOKUP_SEP + self.prefetch_through
2043 self.prefetch_to = prefix + LOOKUP_SEP + self.prefetch_to
2044
2045 def get_current_prefetch_to(self, level):
2046 return LOOKUP_SEP.join(self.prefetch_to.split(LOOKUP_SEP)[: level + 1])
2047
2048 def get_current_to_attr(self, level):
2049 parts = self.prefetch_to.split(LOOKUP_SEP)
2050 to_attr = parts[level]
2051 as_attr = self.to_attr and level == len(parts) - 1
2052 return to_attr, as_attr
2053
2054 def get_current_queryset(self, level):
2055 if self.get_current_prefetch_to(level) == self.prefetch_to:
2056 return self.queryset
2057 return None
2058
2059 def __eq__(self, other):
2060 if not isinstance(other, Prefetch):
2061 return NotImplemented
2062 return self.prefetch_to == other.prefetch_to
2063
2064 def __hash__(self):
2065 return hash((self.__class__, self.prefetch_to))
2066
2067
2068def normalize_prefetch_lookups(lookups, prefix=None):
2069 """Normalize lookups into Prefetch objects."""
2070 ret = []
2071 for lookup in lookups:
2072 if not isinstance(lookup, Prefetch):
2073 lookup = Prefetch(lookup)
2074 if prefix:
2075 lookup.add_prefix(prefix)
2076 ret.append(lookup)
2077 return ret
2078
2079
2080def prefetch_related_objects(model_instances, *related_lookups):
2081 """
2082 Populate prefetched object caches for a list of model instances based on
2083 the lookups/Prefetch instances given.
2084 """
2085 if not model_instances:
2086 return # nothing to do
2087
2088 # We need to be able to dynamically add to the list of prefetch_related
2089 # lookups that we look up (see below). So we need some book keeping to
2090 # ensure we don't do duplicate work.
2091 done_queries = {} # dictionary of things like 'foo__bar': [results]
2092
2093 auto_lookups = set() # we add to this as we go through.
2094 followed_descriptors = set() # recursion protection
2095
2096 all_lookups = normalize_prefetch_lookups(reversed(related_lookups))
2097 while all_lookups:
2098 lookup = all_lookups.pop()
2099 if lookup.prefetch_to in done_queries:
2100 if lookup.queryset is not None:
2101 raise ValueError(
2102 "'%s' lookup was already seen with a different queryset. "
2103 "You may need to adjust the ordering of your lookups."
2104 % lookup.prefetch_to
2105 )
2106
2107 continue
2108
2109 # Top level, the list of objects to decorate is the result cache
2110 # from the primary QuerySet. It won't be for deeper levels.
2111 obj_list = model_instances
2112
2113 through_attrs = lookup.prefetch_through.split(LOOKUP_SEP)
2114 for level, through_attr in enumerate(through_attrs):
2115 # Prepare main instances
2116 if not obj_list:
2117 break
2118
2119 prefetch_to = lookup.get_current_prefetch_to(level)
2120 if prefetch_to in done_queries:
2121 # Skip any prefetching, and any object preparation
2122 obj_list = done_queries[prefetch_to]
2123 continue
2124
2125 # Prepare objects:
2126 good_objects = True
2127 for obj in obj_list:
2128 # Since prefetching can re-use instances, it is possible to have
2129 # the same instance multiple times in obj_list, so obj might
2130 # already be prepared.
2131 if not hasattr(obj, "_prefetched_objects_cache"):
2132 try:
2133 obj._prefetched_objects_cache = {}
2134 except (AttributeError, TypeError):
2135 # Must be an immutable object from
2136 # values_list(flat=True), for example (TypeError) or
2137 # a QuerySet subclass that isn't returning Model
2138 # instances (AttributeError), either in Plain or a 3rd
2139 # party. prefetch_related() doesn't make sense, so quit.
2140 good_objects = False
2141 break
2142 if not good_objects:
2143 break
2144
2145 # Descend down tree
2146
2147 # We assume that objects retrieved are homogeneous (which is the premise
2148 # of prefetch_related), so what applies to first object applies to all.
2149 first_obj = obj_list[0]
2150 to_attr = lookup.get_current_to_attr(level)[0]
2151 prefetcher, descriptor, attr_found, is_fetched = get_prefetcher(
2152 first_obj, through_attr, to_attr
2153 )
2154
2155 if not attr_found:
2156 raise AttributeError(
2157 "Cannot find '{}' on {} object, '{}' is an invalid "
2158 "parameter to prefetch_related()".format(
2159 through_attr,
2160 first_obj.__class__.__name__,
2161 lookup.prefetch_through,
2162 )
2163 )
2164
2165 if level == len(through_attrs) - 1 and prefetcher is None:
2166 # Last one, this *must* resolve to something that supports
2167 # prefetching, otherwise there is no point adding it and the
2168 # developer asking for it has made a mistake.
2169 raise ValueError(
2170 "'%s' does not resolve to an item that supports "
2171 "prefetching - this is an invalid parameter to "
2172 "prefetch_related()." % lookup.prefetch_through
2173 )
2174
2175 obj_to_fetch = None
2176 if prefetcher is not None:
2177 obj_to_fetch = [obj for obj in obj_list if not is_fetched(obj)]
2178
2179 if obj_to_fetch:
2180 obj_list, additional_lookups = prefetch_one_level(
2181 obj_to_fetch,
2182 prefetcher,
2183 lookup,
2184 level,
2185 )
2186 # We need to ensure we don't keep adding lookups from the
2187 # same relationships to stop infinite recursion. So, if we
2188 # are already on an automatically added lookup, don't add
2189 # the new lookups from relationships we've seen already.
2190 if not (
2191 prefetch_to in done_queries
2192 and lookup in auto_lookups
2193 and descriptor in followed_descriptors
2194 ):
2195 done_queries[prefetch_to] = obj_list
2196 new_lookups = normalize_prefetch_lookups(
2197 reversed(additional_lookups), prefetch_to
2198 )
2199 auto_lookups.update(new_lookups)
2200 all_lookups.extend(new_lookups)
2201 followed_descriptors.add(descriptor)
2202 else:
2203 # Either a singly related object that has already been fetched
2204 # (e.g. via select_related), or hopefully some other property
2205 # that doesn't support prefetching but needs to be traversed.
2206
2207 # We replace the current list of parent objects with the list
2208 # of related objects, filtering out empty or missing values so
2209 # that we can continue with nullable or reverse relations.
2210 new_obj_list = []
2211 for obj in obj_list:
2212 if through_attr in getattr(obj, "_prefetched_objects_cache", ()):
2213 # If related objects have been prefetched, use the
2214 # cache rather than the object's through_attr.
2215 new_obj = list(obj._prefetched_objects_cache.get(through_attr))
2216 else:
2217 try:
2218 new_obj = getattr(obj, through_attr)
2219 except exceptions.ObjectDoesNotExist:
2220 continue
2221 if new_obj is None:
2222 continue
2223 # We special-case `list` rather than something more generic
2224 # like `Iterable` because we don't want to accidentally match
2225 # user models that define __iter__.
2226 if isinstance(new_obj, list):
2227 new_obj_list.extend(new_obj)
2228 else:
2229 new_obj_list.append(new_obj)
2230 obj_list = new_obj_list
2231
2232
2233def get_prefetcher(instance, through_attr, to_attr):
2234 """
2235 For the attribute 'through_attr' on the given instance, find
2236 an object that has a get_prefetch_queryset().
2237 Return a 4 tuple containing:
2238 (the object with get_prefetch_queryset (or None),
2239 the descriptor object representing this relationship (or None),
2240 a boolean that is False if the attribute was not found at all,
2241 a function that takes an instance and returns a boolean that is True if
2242 the attribute has already been fetched for that instance)
2243 """
2244
2245 def has_to_attr_attribute(instance):
2246 return hasattr(instance, to_attr)
2247
2248 prefetcher = None
2249 is_fetched = has_to_attr_attribute
2250
2251 # For singly related objects, we have to avoid getting the attribute
2252 # from the object, as this will trigger the query. So we first try
2253 # on the class, in order to get the descriptor object.
2254 rel_obj_descriptor = getattr(instance.__class__, through_attr, None)
2255 if rel_obj_descriptor is None:
2256 attr_found = hasattr(instance, through_attr)
2257 else:
2258 attr_found = True
2259 if rel_obj_descriptor:
2260 # singly related object, descriptor object has the
2261 # get_prefetch_queryset() method.
2262 if hasattr(rel_obj_descriptor, "get_prefetch_queryset"):
2263 prefetcher = rel_obj_descriptor
2264 is_fetched = rel_obj_descriptor.is_cached
2265 else:
2266 # descriptor doesn't support prefetching, so we go ahead and get
2267 # the attribute on the instance rather than the class to
2268 # support many related managers
2269 rel_obj = getattr(instance, through_attr)
2270 if hasattr(rel_obj, "get_prefetch_queryset"):
2271 prefetcher = rel_obj
2272 if through_attr != to_attr:
2273 # Special case cached_property instances because hasattr
2274 # triggers attribute computation and assignment.
2275 if isinstance(
2276 getattr(instance.__class__, to_attr, None), cached_property
2277 ):
2278
2279 def has_cached_property(instance):
2280 return to_attr in instance.__dict__
2281
2282 is_fetched = has_cached_property
2283 else:
2284
2285 def in_prefetched_cache(instance):
2286 return through_attr in instance._prefetched_objects_cache
2287
2288 is_fetched = in_prefetched_cache
2289 return prefetcher, rel_obj_descriptor, attr_found, is_fetched
2290
2291
2292def prefetch_one_level(instances, prefetcher, lookup, level):
2293 """
2294 Helper function for prefetch_related_objects().
2295
2296 Run prefetches on all instances using the prefetcher object,
2297 assigning results to relevant caches in instance.
2298
2299 Return the prefetched objects along with any additional prefetches that
2300 must be done due to prefetch_related lookups found from default managers.
2301 """
2302 # prefetcher must have a method get_prefetch_queryset() which takes a list
2303 # of instances, and returns a tuple:
2304
2305 # (queryset of instances of self.model that are related to passed in instances,
2306 # callable that gets value to be matched for returned instances,
2307 # callable that gets value to be matched for passed in instances,
2308 # boolean that is True for singly related objects,
2309 # cache or field name to assign to,
2310 # boolean that is True when the previous argument is a cache name vs a field name).
2311
2312 # The 'values to be matched' must be hashable as they will be used
2313 # in a dictionary.
2314
2315 (
2316 rel_qs,
2317 rel_obj_attr,
2318 instance_attr,
2319 single,
2320 cache_name,
2321 is_descriptor,
2322 ) = prefetcher.get_prefetch_queryset(instances, lookup.get_current_queryset(level))
2323 # We have to handle the possibility that the QuerySet we just got back
2324 # contains some prefetch_related lookups. We don't want to trigger the
2325 # prefetch_related functionality by evaluating the query. Rather, we need
2326 # to merge in the prefetch_related lookups.
2327 # Copy the lookups in case it is a Prefetch object which could be reused
2328 # later (happens in nested prefetch_related).
2329 additional_lookups = [
2330 copy.copy(additional_lookup)
2331 for additional_lookup in getattr(rel_qs, "_prefetch_related_lookups", ())
2332 ]
2333 if additional_lookups:
2334 # Don't need to clone because the manager should have given us a fresh
2335 # instance, so we access an internal instead of using public interface
2336 # for performance reasons.
2337 rel_qs._prefetch_related_lookups = ()
2338
2339 all_related_objects = list(rel_qs)
2340
2341 rel_obj_cache = {}
2342 for rel_obj in all_related_objects:
2343 rel_attr_val = rel_obj_attr(rel_obj)
2344 rel_obj_cache.setdefault(rel_attr_val, []).append(rel_obj)
2345
2346 to_attr, as_attr = lookup.get_current_to_attr(level)
2347 # Make sure `to_attr` does not conflict with a field.
2348 if as_attr and instances:
2349 # We assume that objects retrieved are homogeneous (which is the premise
2350 # of prefetch_related), so what applies to first object applies to all.
2351 model = instances[0].__class__
2352 try:
2353 model._meta.get_field(to_attr)
2354 except exceptions.FieldDoesNotExist:
2355 pass
2356 else:
2357 msg = "to_attr={} conflicts with a field on the {} model."
2358 raise ValueError(msg.format(to_attr, model.__name__))
2359
2360 # Whether or not we're prefetching the last part of the lookup.
2361 leaf = len(lookup.prefetch_through.split(LOOKUP_SEP)) - 1 == level
2362
2363 for obj in instances:
2364 instance_attr_val = instance_attr(obj)
2365 vals = rel_obj_cache.get(instance_attr_val, [])
2366
2367 if single:
2368 val = vals[0] if vals else None
2369 if as_attr:
2370 # A to_attr has been given for the prefetch.
2371 setattr(obj, to_attr, val)
2372 elif is_descriptor:
2373 # cache_name points to a field name in obj.
2374 # This field is a descriptor for a related object.
2375 setattr(obj, cache_name, val)
2376 else:
2377 # No to_attr has been given for this prefetch operation and the
2378 # cache_name does not point to a descriptor. Store the value of
2379 # the field in the object's field cache.
2380 obj._state.fields_cache[cache_name] = val
2381 else:
2382 if as_attr:
2383 setattr(obj, to_attr, vals)
2384 else:
2385 manager = getattr(obj, to_attr)
2386 if leaf and lookup.queryset is not None:
2387 qs = manager._apply_rel_filters(lookup.queryset)
2388 else:
2389 qs = manager.get_queryset()
2390 qs._result_cache = vals
2391 # We don't want the individual qs doing prefetch_related now,
2392 # since we have merged this into the current work.
2393 qs._prefetch_done = True
2394 obj._prefetched_objects_cache[cache_name] = qs
2395 return all_related_objects, additional_lookups
2396
2397
2398class RelatedPopulator:
2399 """
2400 RelatedPopulator is used for select_related() object instantiation.
2401
2402 The idea is that each select_related() model will be populated by a
2403 different RelatedPopulator instance. The RelatedPopulator instances get
2404 klass_info and select (computed in SQLCompiler) plus the used db as
2405 input for initialization. That data is used to compute which columns
2406 to use, how to instantiate the model, and how to populate the links
2407 between the objects.
2408
2409 The actual creation of the objects is done in populate() method. This
2410 method gets row and from_obj as input and populates the select_related()
2411 model instance.
2412 """
2413
2414 def __init__(self, klass_info, select, db):
2415 self.db = db
2416 # Pre-compute needed attributes. The attributes are:
2417 # - model_cls: the possibly deferred model class to instantiate
2418 # - either:
2419 # - cols_start, cols_end: usually the columns in the row are
2420 # in the same order model_cls.__init__ expects them, so we
2421 # can instantiate by model_cls(*row[cols_start:cols_end])
2422 # - reorder_for_init: When select_related descends to a child
2423 # class, then we want to reuse the already selected parent
2424 # data. However, in this case the parent data isn't necessarily
2425 # in the same order that Model.__init__ expects it to be, so
2426 # we have to reorder the parent data. The reorder_for_init
2427 # attribute contains a function used to reorder the field data
2428 # in the order __init__ expects it.
2429 # - pk_idx: the index of the primary key field in the reordered
2430 # model data. Used to check if a related object exists at all.
2431 # - init_list: the field attnames fetched from the database. For
2432 # deferred models this isn't the same as all attnames of the
2433 # model's fields.
2434 # - related_populators: a list of RelatedPopulator instances if
2435 # select_related() descends to related models from this model.
2436 # - local_setter, remote_setter: Methods to set cached values on
2437 # the object being populated and on the remote object. Usually
2438 # these are Field.set_cached_value() methods.
2439 select_fields = klass_info["select_fields"]
2440 from_parent = klass_info["from_parent"]
2441 if not from_parent:
2442 self.cols_start = select_fields[0]
2443 self.cols_end = select_fields[-1] + 1
2444 self.init_list = [
2445 f[0].target.attname for f in select[self.cols_start : self.cols_end]
2446 ]
2447 self.reorder_for_init = None
2448 else:
2449 attname_indexes = {
2450 select[idx][0].target.attname: idx for idx in select_fields
2451 }
2452 model_init_attnames = (
2453 f.attname for f in klass_info["model"]._meta.concrete_fields
2454 )
2455 self.init_list = [
2456 attname for attname in model_init_attnames if attname in attname_indexes
2457 ]
2458 self.reorder_for_init = operator.itemgetter(
2459 *[attname_indexes[attname] for attname in self.init_list]
2460 )
2461
2462 self.model_cls = klass_info["model"]
2463 self.pk_idx = self.init_list.index(self.model_cls._meta.pk.attname)
2464 self.related_populators = get_related_populators(klass_info, select, self.db)
2465 self.local_setter = klass_info["local_setter"]
2466 self.remote_setter = klass_info["remote_setter"]
2467
2468 def populate(self, row, from_obj):
2469 if self.reorder_for_init:
2470 obj_data = self.reorder_for_init(row)
2471 else:
2472 obj_data = row[self.cols_start : self.cols_end]
2473 if obj_data[self.pk_idx] is None:
2474 obj = None
2475 else:
2476 obj = self.model_cls.from_db(self.db, self.init_list, obj_data)
2477 for rel_iter in self.related_populators:
2478 rel_iter.populate(row, obj)
2479 self.local_setter(from_obj, obj)
2480 if obj is not None:
2481 self.remote_setter(obj, from_obj)
2482
2483
2484def get_related_populators(klass_info, select, db):
2485 iterators = []
2486 related_klass_infos = klass_info.get("related_klass_infos", [])
2487 for rel_klass_info in related_klass_infos:
2488 rel_cls = RelatedPopulator(rel_klass_info, select, db)
2489 iterators.append(rel_cls)
2490 return iterators