Plain is headed towards 1.0! Subscribe for development updates →

   1"""
   2Create SQL statements for QuerySets.
   3
   4The code in here encapsulates all of the SQL construction so that QuerySets
   5themselves do not have to (and could be backed by things other than SQL
   6databases). The abstraction barrier only works one way: this module has to know
   7all about the internals of models in order to get the information it needs.
   8"""
   9import copy
  10import difflib
  11import functools
  12import sys
  13from collections import Counter, namedtuple
  14from collections.abc import Iterator, Mapping
  15from itertools import chain, count, product
  16from string import ascii_uppercase
  17
  18from plain.exceptions import FieldDoesNotExist, FieldError
  19from plain.models.aggregates import Count
  20from plain.models.constants import LOOKUP_SEP
  21from plain.models.db import DEFAULT_DB_ALIAS, NotSupportedError, connections
  22from plain.models.expressions import (
  23    BaseExpression,
  24    Col,
  25    Exists,
  26    F,
  27    OuterRef,
  28    Ref,
  29    ResolvedOuterRef,
  30    Value,
  31)
  32from plain.models.fields import Field
  33from plain.models.fields.related_lookups import MultiColSource
  34from plain.models.lookups import Lookup
  35from plain.models.query_utils import (
  36    Q,
  37    check_rel_lookup_compatibility,
  38    refs_expression,
  39)
  40from plain.models.sql.constants import INNER, LOUTER, ORDER_DIR, SINGLE
  41from plain.models.sql.datastructures import BaseTable, Empty, Join, MultiJoin
  42from plain.models.sql.where import AND, OR, ExtraWhere, NothingNode, WhereNode
  43from plain.utils.functional import cached_property
  44from plain.utils.regex_helper import _lazy_re_compile
  45from plain.utils.tree import Node
  46
  47__all__ = ["Query", "RawQuery"]
  48
  49# Quotation marks ('"`[]), whitespace characters, semicolons, or inline
  50# SQL comments are forbidden in column aliases.
  51FORBIDDEN_ALIAS_PATTERN = _lazy_re_compile(r"['`\"\]\[;\s]|--|/\*|\*/")
  52
  53# Inspired from
  54# https://www.postgresql.org/docs/current/sql-syntax-lexical.html#SQL-SYNTAX-IDENTIFIERS
  55EXPLAIN_OPTIONS_PATTERN = _lazy_re_compile(r"[\w\-]+")
  56
  57
  58def get_field_names_from_opts(opts):
  59    if opts is None:
  60        return set()
  61    return set(
  62        chain.from_iterable(
  63            (f.name, f.attname) if f.concrete else (f.name,) for f in opts.get_fields()
  64        )
  65    )
  66
  67
  68def get_children_from_q(q):
  69    for child in q.children:
  70        if isinstance(child, Node):
  71            yield from get_children_from_q(child)
  72        else:
  73            yield child
  74
  75
  76JoinInfo = namedtuple(
  77    "JoinInfo",
  78    ("final_field", "targets", "opts", "joins", "path", "transform_function"),
  79)
  80
  81
  82class RawQuery:
  83    """A single raw SQL query."""
  84
  85    def __init__(self, sql, using, params=()):
  86        self.params = params
  87        self.sql = sql
  88        self.using = using
  89        self.cursor = None
  90
  91        # Mirror some properties of a normal query so that
  92        # the compiler can be used to process results.
  93        self.low_mark, self.high_mark = 0, None  # Used for offset/limit
  94        self.extra_select = {}
  95        self.annotation_select = {}
  96
  97    def chain(self, using):
  98        return self.clone(using)
  99
 100    def clone(self, using):
 101        return RawQuery(self.sql, using, params=self.params)
 102
 103    def get_columns(self):
 104        if self.cursor is None:
 105            self._execute_query()
 106        converter = connections[self.using].introspection.identifier_converter
 107        return [converter(column_meta[0]) for column_meta in self.cursor.description]
 108
 109    def __iter__(self):
 110        # Always execute a new query for a new iterator.
 111        # This could be optimized with a cache at the expense of RAM.
 112        self._execute_query()
 113        if not connections[self.using].features.can_use_chunked_reads:
 114            # If the database can't use chunked reads we need to make sure we
 115            # evaluate the entire query up front.
 116            result = list(self.cursor)
 117        else:
 118            result = self.cursor
 119        return iter(result)
 120
 121    def __repr__(self):
 122        return f"<{self.__class__.__name__}: {self}>"
 123
 124    @property
 125    def params_type(self):
 126        if self.params is None:
 127            return None
 128        return dict if isinstance(self.params, Mapping) else tuple
 129
 130    def __str__(self):
 131        if self.params_type is None:
 132            return self.sql
 133        return self.sql % self.params_type(self.params)
 134
 135    def _execute_query(self):
 136        connection = connections[self.using]
 137
 138        # Adapt parameters to the database, as much as possible considering
 139        # that the target type isn't known. See #17755.
 140        params_type = self.params_type
 141        adapter = connection.ops.adapt_unknown_value
 142        if params_type is tuple:
 143            params = tuple(adapter(val) for val in self.params)
 144        elif params_type is dict:
 145            params = {key: adapter(val) for key, val in self.params.items()}
 146        elif params_type is None:
 147            params = None
 148        else:
 149            raise RuntimeError("Unexpected params type: %s" % params_type)
 150
 151        self.cursor = connection.cursor()
 152        self.cursor.execute(self.sql, params)
 153
 154
 155ExplainInfo = namedtuple("ExplainInfo", ("format", "options"))
 156
 157
 158class Query(BaseExpression):
 159    """A single SQL query."""
 160
 161    alias_prefix = "T"
 162    empty_result_set_value = None
 163    subq_aliases = frozenset([alias_prefix])
 164
 165    compiler = "SQLCompiler"
 166
 167    base_table_class = BaseTable
 168    join_class = Join
 169
 170    default_cols = True
 171    default_ordering = True
 172    standard_ordering = True
 173
 174    filter_is_sticky = False
 175    subquery = False
 176
 177    # SQL-related attributes.
 178    # Select and related select clauses are expressions to use in the SELECT
 179    # clause of the query. The select is used for cases where we want to set up
 180    # the select clause to contain other than default fields (values(),
 181    # subqueries...). Note that annotations go to annotations dictionary.
 182    select = ()
 183    # The group_by attribute can have one of the following forms:
 184    #  - None: no group by at all in the query
 185    #  - A tuple of expressions: group by (at least) those expressions.
 186    #    String refs are also allowed for now.
 187    #  - True: group by all select fields of the model
 188    # See compiler.get_group_by() for details.
 189    group_by = None
 190    order_by = ()
 191    low_mark = 0  # Used for offset/limit.
 192    high_mark = None  # Used for offset/limit.
 193    distinct = False
 194    distinct_fields = ()
 195    select_for_update = False
 196    select_for_update_nowait = False
 197    select_for_update_skip_locked = False
 198    select_for_update_of = ()
 199    select_for_no_key_update = False
 200    select_related = False
 201    has_select_fields = False
 202    # Arbitrary limit for select_related to prevents infinite recursion.
 203    max_depth = 5
 204    # Holds the selects defined by a call to values() or values_list()
 205    # excluding annotation_select and extra_select.
 206    values_select = ()
 207
 208    # SQL annotation-related attributes.
 209    annotation_select_mask = None
 210    _annotation_select_cache = None
 211
 212    # Set combination attributes.
 213    combinator = None
 214    combinator_all = False
 215    combined_queries = ()
 216
 217    # These are for extensions. The contents are more or less appended verbatim
 218    # to the appropriate clause.
 219    extra_select_mask = None
 220    _extra_select_cache = None
 221
 222    extra_tables = ()
 223    extra_order_by = ()
 224
 225    # A tuple that is a set of model field names and either True, if these are
 226    # the fields to defer, or False if these are the only fields to load.
 227    deferred_loading = (frozenset(), True)
 228
 229    explain_info = None
 230
 231    def __init__(self, model, alias_cols=True):
 232        self.model = model
 233        self.alias_refcount = {}
 234        # alias_map is the most important data structure regarding joins.
 235        # It's used for recording which joins exist in the query and what
 236        # types they are. The key is the alias of the joined table (possibly
 237        # the table name) and the value is a Join-like object (see
 238        # sql.datastructures.Join for more information).
 239        self.alias_map = {}
 240        # Whether to provide alias to columns during reference resolving.
 241        self.alias_cols = alias_cols
 242        # Sometimes the query contains references to aliases in outer queries (as
 243        # a result of split_exclude). Correct alias quoting needs to know these
 244        # aliases too.
 245        # Map external tables to whether they are aliased.
 246        self.external_aliases = {}
 247        self.table_map = {}  # Maps table names to list of aliases.
 248        self.used_aliases = set()
 249
 250        self.where = WhereNode()
 251        # Maps alias -> Annotation Expression.
 252        self.annotations = {}
 253        # These are for extensions. The contents are more or less appended
 254        # verbatim to the appropriate clause.
 255        self.extra = {}  # Maps col_alias -> (col_sql, params).
 256
 257        self._filtered_relations = {}
 258
 259    @property
 260    def output_field(self):
 261        if len(self.select) == 1:
 262            select = self.select[0]
 263            return getattr(select, "target", None) or select.field
 264        elif len(self.annotation_select) == 1:
 265            return next(iter(self.annotation_select.values())).output_field
 266
 267    @cached_property
 268    def base_table(self):
 269        for alias in self.alias_map:
 270            return alias
 271
 272    def __str__(self):
 273        """
 274        Return the query as a string of SQL with the parameter values
 275        substituted in (use sql_with_params() to see the unsubstituted string).
 276
 277        Parameter values won't necessarily be quoted correctly, since that is
 278        done by the database interface at execution time.
 279        """
 280        sql, params = self.sql_with_params()
 281        return sql % params
 282
 283    def sql_with_params(self):
 284        """
 285        Return the query as an SQL string and the parameters that will be
 286        substituted into the query.
 287        """
 288        return self.get_compiler(DEFAULT_DB_ALIAS).as_sql()
 289
 290    def __deepcopy__(self, memo):
 291        """Limit the amount of work when a Query is deepcopied."""
 292        result = self.clone()
 293        memo[id(self)] = result
 294        return result
 295
 296    def get_compiler(self, using=None, connection=None, elide_empty=True):
 297        if using is None and connection is None:
 298            raise ValueError("Need either using or connection")
 299        if using:
 300            connection = connections[using]
 301        return connection.ops.compiler(self.compiler)(
 302            self, connection, using, elide_empty
 303        )
 304
 305    def get_meta(self):
 306        """
 307        Return the Options instance (the model._meta) from which to start
 308        processing. Normally, this is self.model._meta, but it can be changed
 309        by subclasses.
 310        """
 311        if self.model:
 312            return self.model._meta
 313
 314    def clone(self):
 315        """
 316        Return a copy of the current Query. A lightweight alternative to
 317        deepcopy().
 318        """
 319        obj = Empty()
 320        obj.__class__ = self.__class__
 321        # Copy references to everything.
 322        obj.__dict__ = self.__dict__.copy()
 323        # Clone attributes that can't use shallow copy.
 324        obj.alias_refcount = self.alias_refcount.copy()
 325        obj.alias_map = self.alias_map.copy()
 326        obj.external_aliases = self.external_aliases.copy()
 327        obj.table_map = self.table_map.copy()
 328        obj.where = self.where.clone()
 329        obj.annotations = self.annotations.copy()
 330        if self.annotation_select_mask is not None:
 331            obj.annotation_select_mask = self.annotation_select_mask.copy()
 332        if self.combined_queries:
 333            obj.combined_queries = tuple(
 334                [query.clone() for query in self.combined_queries]
 335            )
 336        # _annotation_select_cache cannot be copied, as doing so breaks the
 337        # (necessary) state in which both annotations and
 338        # _annotation_select_cache point to the same underlying objects.
 339        # It will get re-populated in the cloned queryset the next time it's
 340        # used.
 341        obj._annotation_select_cache = None
 342        obj.extra = self.extra.copy()
 343        if self.extra_select_mask is not None:
 344            obj.extra_select_mask = self.extra_select_mask.copy()
 345        if self._extra_select_cache is not None:
 346            obj._extra_select_cache = self._extra_select_cache.copy()
 347        if self.select_related is not False:
 348            # Use deepcopy because select_related stores fields in nested
 349            # dicts.
 350            obj.select_related = copy.deepcopy(obj.select_related)
 351        if "subq_aliases" in self.__dict__:
 352            obj.subq_aliases = self.subq_aliases.copy()
 353        obj.used_aliases = self.used_aliases.copy()
 354        obj._filtered_relations = self._filtered_relations.copy()
 355        # Clear the cached_property, if it exists.
 356        obj.__dict__.pop("base_table", None)
 357        return obj
 358
 359    def chain(self, klass=None):
 360        """
 361        Return a copy of the current Query that's ready for another operation.
 362        The klass argument changes the type of the Query, e.g. UpdateQuery.
 363        """
 364        obj = self.clone()
 365        if klass and obj.__class__ != klass:
 366            obj.__class__ = klass
 367        if not obj.filter_is_sticky:
 368            obj.used_aliases = set()
 369        obj.filter_is_sticky = False
 370        if hasattr(obj, "_setup_query"):
 371            obj._setup_query()
 372        return obj
 373
 374    def relabeled_clone(self, change_map):
 375        clone = self.clone()
 376        clone.change_aliases(change_map)
 377        return clone
 378
 379    def _get_col(self, target, field, alias):
 380        if not self.alias_cols:
 381            alias = None
 382        return target.get_col(alias, field)
 383
 384    def get_aggregation(self, using, aggregate_exprs):
 385        """
 386        Return the dictionary with the values of the existing aggregations.
 387        """
 388        if not aggregate_exprs:
 389            return {}
 390        aggregates = {}
 391        for alias, aggregate_expr in aggregate_exprs.items():
 392            self.check_alias(alias)
 393            aggregate = aggregate_expr.resolve_expression(
 394                self, allow_joins=True, reuse=None, summarize=True
 395            )
 396            if not aggregate.contains_aggregate:
 397                raise TypeError("%s is not an aggregate expression" % alias)
 398            aggregates[alias] = aggregate
 399        # Existing usage of aggregation can be determined by the presence of
 400        # selected aggregates but also by filters against aliased aggregates.
 401        _, having, qualify = self.where.split_having_qualify()
 402        has_existing_aggregation = (
 403            any(
 404                getattr(annotation, "contains_aggregate", True)
 405                for annotation in self.annotations.values()
 406            )
 407            or having
 408        )
 409        # Decide if we need to use a subquery.
 410        #
 411        # Existing aggregations would cause incorrect results as
 412        # get_aggregation() must produce just one result and thus must not use
 413        # GROUP BY.
 414        #
 415        # If the query has limit or distinct, or uses set operations, then
 416        # those operations must be done in a subquery so that the query
 417        # aggregates on the limit and/or distinct results instead of applying
 418        # the distinct and limit after the aggregation.
 419        if (
 420            isinstance(self.group_by, tuple)
 421            or self.is_sliced
 422            or has_existing_aggregation
 423            or qualify
 424            or self.distinct
 425            or self.combinator
 426        ):
 427            from plain.models.sql.subqueries import AggregateQuery
 428
 429            inner_query = self.clone()
 430            inner_query.subquery = True
 431            outer_query = AggregateQuery(self.model, inner_query)
 432            inner_query.select_for_update = False
 433            inner_query.select_related = False
 434            inner_query.set_annotation_mask(self.annotation_select)
 435            # Queries with distinct_fields need ordering and when a limit is
 436            # applied we must take the slice from the ordered query. Otherwise
 437            # no need for ordering.
 438            inner_query.clear_ordering(force=False)
 439            if not inner_query.distinct:
 440                # If the inner query uses default select and it has some
 441                # aggregate annotations, then we must make sure the inner
 442                # query is grouped by the main model's primary key. However,
 443                # clearing the select clause can alter results if distinct is
 444                # used.
 445                if inner_query.default_cols and has_existing_aggregation:
 446                    inner_query.group_by = (
 447                        self.model._meta.pk.get_col(inner_query.get_initial_alias()),
 448                    )
 449                inner_query.default_cols = False
 450                if not qualify:
 451                    # Mask existing annotations that are not referenced by
 452                    # aggregates to be pushed to the outer query unless
 453                    # filtering against window functions is involved as it
 454                    # requires complex realising.
 455                    annotation_mask = set()
 456                    for aggregate in aggregates.values():
 457                        annotation_mask |= aggregate.get_refs()
 458                    inner_query.set_annotation_mask(annotation_mask)
 459
 460            # Add aggregates to the outer AggregateQuery. This requires making
 461            # sure all columns referenced by the aggregates are selected in the
 462            # inner query. It is achieved by retrieving all column references
 463            # by the aggregates, explicitly selecting them in the inner query,
 464            # and making sure the aggregates are repointed to them.
 465            col_refs = {}
 466            for alias, aggregate in aggregates.items():
 467                replacements = {}
 468                for col in self._gen_cols([aggregate], resolve_refs=False):
 469                    if not (col_ref := col_refs.get(col)):
 470                        index = len(col_refs) + 1
 471                        col_alias = f"__col{index}"
 472                        col_ref = Ref(col_alias, col)
 473                        col_refs[col] = col_ref
 474                        inner_query.annotations[col_alias] = col
 475                        inner_query.append_annotation_mask([col_alias])
 476                    replacements[col] = col_ref
 477                outer_query.annotations[alias] = aggregate.replace_expressions(
 478                    replacements
 479                )
 480            if (
 481                inner_query.select == ()
 482                and not inner_query.default_cols
 483                and not inner_query.annotation_select_mask
 484            ):
 485                # In case of Model.objects[0:3].count(), there would be no
 486                # field selected in the inner query, yet we must use a subquery.
 487                # So, make sure at least one field is selected.
 488                inner_query.select = (
 489                    self.model._meta.pk.get_col(inner_query.get_initial_alias()),
 490                )
 491        else:
 492            outer_query = self
 493            self.select = ()
 494            self.default_cols = False
 495            self.extra = {}
 496            if self.annotations:
 497                # Inline reference to existing annotations and mask them as
 498                # they are unnecessary given only the summarized aggregations
 499                # are requested.
 500                replacements = {
 501                    Ref(alias, annotation): annotation
 502                    for alias, annotation in self.annotations.items()
 503                }
 504                self.annotations = {
 505                    alias: aggregate.replace_expressions(replacements)
 506                    for alias, aggregate in aggregates.items()
 507                }
 508            else:
 509                self.annotations = aggregates
 510            self.set_annotation_mask(aggregates)
 511
 512        empty_set_result = [
 513            expression.empty_result_set_value
 514            for expression in outer_query.annotation_select.values()
 515        ]
 516        elide_empty = not any(result is NotImplemented for result in empty_set_result)
 517        outer_query.clear_ordering(force=True)
 518        outer_query.clear_limits()
 519        outer_query.select_for_update = False
 520        outer_query.select_related = False
 521        compiler = outer_query.get_compiler(using, elide_empty=elide_empty)
 522        result = compiler.execute_sql(SINGLE)
 523        if result is None:
 524            result = empty_set_result
 525        else:
 526            converters = compiler.get_converters(outer_query.annotation_select.values())
 527            result = next(compiler.apply_converters((result,), converters))
 528
 529        return dict(zip(outer_query.annotation_select, result))
 530
 531    def get_count(self, using):
 532        """
 533        Perform a COUNT() query using the current filter constraints.
 534        """
 535        obj = self.clone()
 536        return obj.get_aggregation(using, {"__count": Count("*")})["__count"]
 537
 538    def has_filters(self):
 539        return self.where
 540
 541    def exists(self, limit=True):
 542        q = self.clone()
 543        if not (q.distinct and q.is_sliced):
 544            if q.group_by is True:
 545                q.add_fields(
 546                    (f.attname for f in self.model._meta.concrete_fields), False
 547                )
 548                # Disable GROUP BY aliases to avoid orphaning references to the
 549                # SELECT clause which is about to be cleared.
 550                q.set_group_by(allow_aliases=False)
 551            q.clear_select_clause()
 552        if q.combined_queries and q.combinator == "union":
 553            q.combined_queries = tuple(
 554                combined_query.exists(limit=False)
 555                for combined_query in q.combined_queries
 556            )
 557        q.clear_ordering(force=True)
 558        if limit:
 559            q.set_limits(high=1)
 560        q.add_annotation(Value(1), "a")
 561        return q
 562
 563    def has_results(self, using):
 564        q = self.exists(using)
 565        compiler = q.get_compiler(using=using)
 566        return compiler.has_results()
 567
 568    def explain(self, using, format=None, **options):
 569        q = self.clone()
 570        for option_name in options:
 571            if (
 572                not EXPLAIN_OPTIONS_PATTERN.fullmatch(option_name)
 573                or "--" in option_name
 574            ):
 575                raise ValueError(f"Invalid option name: {option_name!r}.")
 576        q.explain_info = ExplainInfo(format, options)
 577        compiler = q.get_compiler(using=using)
 578        return "\n".join(compiler.explain_query())
 579
 580    def combine(self, rhs, connector):
 581        """
 582        Merge the 'rhs' query into the current one (with any 'rhs' effects
 583        being applied *after* (that is, "to the right of") anything in the
 584        current query. 'rhs' is not modified during a call to this function.
 585
 586        The 'connector' parameter describes how to connect filters from the
 587        'rhs' query.
 588        """
 589        if self.model != rhs.model:
 590            raise TypeError("Cannot combine queries on two different base models.")
 591        if self.is_sliced:
 592            raise TypeError("Cannot combine queries once a slice has been taken.")
 593        if self.distinct != rhs.distinct:
 594            raise TypeError("Cannot combine a unique query with a non-unique query.")
 595        if self.distinct_fields != rhs.distinct_fields:
 596            raise TypeError("Cannot combine queries with different distinct fields.")
 597
 598        # If lhs and rhs shares the same alias prefix, it is possible to have
 599        # conflicting alias changes like T4 -> T5, T5 -> T6, which might end up
 600        # as T4 -> T6 while combining two querysets. To prevent this, change an
 601        # alias prefix of the rhs and update current aliases accordingly,
 602        # except if the alias is the base table since it must be present in the
 603        # query on both sides.
 604        initial_alias = self.get_initial_alias()
 605        rhs.bump_prefix(self, exclude={initial_alias})
 606
 607        # Work out how to relabel the rhs aliases, if necessary.
 608        change_map = {}
 609        conjunction = connector == AND
 610
 611        # Determine which existing joins can be reused. When combining the
 612        # query with AND we must recreate all joins for m2m filters. When
 613        # combining with OR we can reuse joins. The reason is that in AND
 614        # case a single row can't fulfill a condition like:
 615        #     revrel__col=1 & revrel__col=2
 616        # But, there might be two different related rows matching this
 617        # condition. In OR case a single True is enough, so single row is
 618        # enough, too.
 619        #
 620        # Note that we will be creating duplicate joins for non-m2m joins in
 621        # the AND case. The results will be correct but this creates too many
 622        # joins. This is something that could be fixed later on.
 623        reuse = set() if conjunction else set(self.alias_map)
 624        joinpromoter = JoinPromoter(connector, 2, False)
 625        joinpromoter.add_votes(
 626            j for j in self.alias_map if self.alias_map[j].join_type == INNER
 627        )
 628        rhs_votes = set()
 629        # Now, add the joins from rhs query into the new query (skipping base
 630        # table).
 631        rhs_tables = list(rhs.alias_map)[1:]
 632        for alias in rhs_tables:
 633            join = rhs.alias_map[alias]
 634            # If the left side of the join was already relabeled, use the
 635            # updated alias.
 636            join = join.relabeled_clone(change_map)
 637            new_alias = self.join(join, reuse=reuse)
 638            if join.join_type == INNER:
 639                rhs_votes.add(new_alias)
 640            # We can't reuse the same join again in the query. If we have two
 641            # distinct joins for the same connection in rhs query, then the
 642            # combined query must have two joins, too.
 643            reuse.discard(new_alias)
 644            if alias != new_alias:
 645                change_map[alias] = new_alias
 646            if not rhs.alias_refcount[alias]:
 647                # The alias was unused in the rhs query. Unref it so that it
 648                # will be unused in the new query, too. We have to add and
 649                # unref the alias so that join promotion has information of
 650                # the join type for the unused alias.
 651                self.unref_alias(new_alias)
 652        joinpromoter.add_votes(rhs_votes)
 653        joinpromoter.update_join_types(self)
 654
 655        # Combine subqueries aliases to ensure aliases relabelling properly
 656        # handle subqueries when combining where and select clauses.
 657        self.subq_aliases |= rhs.subq_aliases
 658
 659        # Now relabel a copy of the rhs where-clause and add it to the current
 660        # one.
 661        w = rhs.where.clone()
 662        w.relabel_aliases(change_map)
 663        self.where.add(w, connector)
 664
 665        # Selection columns and extra extensions are those provided by 'rhs'.
 666        if rhs.select:
 667            self.set_select([col.relabeled_clone(change_map) for col in rhs.select])
 668        else:
 669            self.select = ()
 670
 671        if connector == OR:
 672            # It would be nice to be able to handle this, but the queries don't
 673            # really make sense (or return consistent value sets). Not worth
 674            # the extra complexity when you can write a real query instead.
 675            if self.extra and rhs.extra:
 676                raise ValueError(
 677                    "When merging querysets using 'or', you cannot have "
 678                    "extra(select=...) on both sides."
 679                )
 680        self.extra.update(rhs.extra)
 681        extra_select_mask = set()
 682        if self.extra_select_mask is not None:
 683            extra_select_mask.update(self.extra_select_mask)
 684        if rhs.extra_select_mask is not None:
 685            extra_select_mask.update(rhs.extra_select_mask)
 686        if extra_select_mask:
 687            self.set_extra_mask(extra_select_mask)
 688        self.extra_tables += rhs.extra_tables
 689
 690        # Ordering uses the 'rhs' ordering, unless it has none, in which case
 691        # the current ordering is used.
 692        self.order_by = rhs.order_by or self.order_by
 693        self.extra_order_by = rhs.extra_order_by or self.extra_order_by
 694
 695    def _get_defer_select_mask(self, opts, mask, select_mask=None):
 696        if select_mask is None:
 697            select_mask = {}
 698        select_mask[opts.pk] = {}
 699        # All concrete fields that are not part of the defer mask must be
 700        # loaded. If a relational field is encountered it gets added to the
 701        # mask for it be considered if `select_related` and the cycle continues
 702        # by recursively calling this function.
 703        for field in opts.concrete_fields:
 704            field_mask = mask.pop(field.name, None)
 705            if field_mask is None:
 706                select_mask.setdefault(field, {})
 707            elif field_mask:
 708                if not field.is_relation:
 709                    raise FieldError(next(iter(field_mask)))
 710                field_select_mask = select_mask.setdefault(field, {})
 711                related_model = field.remote_field.model._meta.concrete_model
 712                self._get_defer_select_mask(
 713                    related_model._meta, field_mask, field_select_mask
 714                )
 715        # Remaining defer entries must be references to reverse relationships.
 716        # The following code is expected to raise FieldError if it encounters
 717        # a malformed defer entry.
 718        for field_name, field_mask in mask.items():
 719            if filtered_relation := self._filtered_relations.get(field_name):
 720                relation = opts.get_field(filtered_relation.relation_name)
 721                field_select_mask = select_mask.setdefault((field_name, relation), {})
 722                field = relation.field
 723            else:
 724                field = opts.get_field(field_name).field
 725                field_select_mask = select_mask.setdefault(field, {})
 726            related_model = field.model._meta.concrete_model
 727            self._get_defer_select_mask(
 728                related_model._meta, field_mask, field_select_mask
 729            )
 730        return select_mask
 731
 732    def _get_only_select_mask(self, opts, mask, select_mask=None):
 733        if select_mask is None:
 734            select_mask = {}
 735        select_mask[opts.pk] = {}
 736        # Only include fields mentioned in the mask.
 737        for field_name, field_mask in mask.items():
 738            field = opts.get_field(field_name)
 739            field_select_mask = select_mask.setdefault(field, {})
 740            if field_mask:
 741                if not field.is_relation:
 742                    raise FieldError(next(iter(field_mask)))
 743                related_model = field.remote_field.model._meta.concrete_model
 744                self._get_only_select_mask(
 745                    related_model._meta, field_mask, field_select_mask
 746                )
 747        return select_mask
 748
 749    def get_select_mask(self):
 750        """
 751        Convert the self.deferred_loading data structure to an alternate data
 752        structure, describing the field that *will* be loaded. This is used to
 753        compute the columns to select from the database and also by the
 754        QuerySet class to work out which fields are being initialized on each
 755        model. Models that have all their fields included aren't mentioned in
 756        the result, only those that have field restrictions in place.
 757        """
 758        field_names, defer = self.deferred_loading
 759        if not field_names:
 760            return {}
 761        mask = {}
 762        for field_name in field_names:
 763            part_mask = mask
 764            for part in field_name.split(LOOKUP_SEP):
 765                part_mask = part_mask.setdefault(part, {})
 766        opts = self.get_meta()
 767        if defer:
 768            return self._get_defer_select_mask(opts, mask)
 769        return self._get_only_select_mask(opts, mask)
 770
 771    def table_alias(self, table_name, create=False, filtered_relation=None):
 772        """
 773        Return a table alias for the given table_name and whether this is a
 774        new alias or not.
 775
 776        If 'create' is true, a new alias is always created. Otherwise, the
 777        most recently created alias for the table (if one exists) is reused.
 778        """
 779        alias_list = self.table_map.get(table_name)
 780        if not create and alias_list:
 781            alias = alias_list[0]
 782            self.alias_refcount[alias] += 1
 783            return alias, False
 784
 785        # Create a new alias for this table.
 786        if alias_list:
 787            alias = "%s%d" % (self.alias_prefix, len(self.alias_map) + 1)
 788            alias_list.append(alias)
 789        else:
 790            # The first occurrence of a table uses the table name directly.
 791            alias = (
 792                filtered_relation.alias if filtered_relation is not None else table_name
 793            )
 794            self.table_map[table_name] = [alias]
 795        self.alias_refcount[alias] = 1
 796        return alias, True
 797
 798    def ref_alias(self, alias):
 799        """Increases the reference count for this alias."""
 800        self.alias_refcount[alias] += 1
 801
 802    def unref_alias(self, alias, amount=1):
 803        """Decreases the reference count for this alias."""
 804        self.alias_refcount[alias] -= amount
 805
 806    def promote_joins(self, aliases):
 807        """
 808        Promote recursively the join type of given aliases and its children to
 809        an outer join. If 'unconditional' is False, only promote the join if
 810        it is nullable or the parent join is an outer join.
 811
 812        The children promotion is done to avoid join chains that contain a LOUTER
 813        b INNER c. So, if we have currently a INNER b INNER c and a->b is promoted,
 814        then we must also promote b->c automatically, or otherwise the promotion
 815        of a->b doesn't actually change anything in the query results.
 816        """
 817        aliases = list(aliases)
 818        while aliases:
 819            alias = aliases.pop(0)
 820            if self.alias_map[alias].join_type is None:
 821                # This is the base table (first FROM entry) - this table
 822                # isn't really joined at all in the query, so we should not
 823                # alter its join type.
 824                continue
 825            # Only the first alias (skipped above) should have None join_type
 826            assert self.alias_map[alias].join_type is not None
 827            parent_alias = self.alias_map[alias].parent_alias
 828            parent_louter = (
 829                parent_alias and self.alias_map[parent_alias].join_type == LOUTER
 830            )
 831            already_louter = self.alias_map[alias].join_type == LOUTER
 832            if (self.alias_map[alias].nullable or parent_louter) and not already_louter:
 833                self.alias_map[alias] = self.alias_map[alias].promote()
 834                # Join type of 'alias' changed, so re-examine all aliases that
 835                # refer to this one.
 836                aliases.extend(
 837                    join
 838                    for join in self.alias_map
 839                    if self.alias_map[join].parent_alias == alias
 840                    and join not in aliases
 841                )
 842
 843    def demote_joins(self, aliases):
 844        """
 845        Change join type from LOUTER to INNER for all joins in aliases.
 846
 847        Similarly to promote_joins(), this method must ensure no join chains
 848        containing first an outer, then an inner join are generated. If we
 849        are demoting b->c join in chain a LOUTER b LOUTER c then we must
 850        demote a->b automatically, or otherwise the demotion of b->c doesn't
 851        actually change anything in the query results. .
 852        """
 853        aliases = list(aliases)
 854        while aliases:
 855            alias = aliases.pop(0)
 856            if self.alias_map[alias].join_type == LOUTER:
 857                self.alias_map[alias] = self.alias_map[alias].demote()
 858                parent_alias = self.alias_map[alias].parent_alias
 859                if self.alias_map[parent_alias].join_type == INNER:
 860                    aliases.append(parent_alias)
 861
 862    def reset_refcounts(self, to_counts):
 863        """
 864        Reset reference counts for aliases so that they match the value passed
 865        in `to_counts`.
 866        """
 867        for alias, cur_refcount in self.alias_refcount.copy().items():
 868            unref_amount = cur_refcount - to_counts.get(alias, 0)
 869            self.unref_alias(alias, unref_amount)
 870
 871    def change_aliases(self, change_map):
 872        """
 873        Change the aliases in change_map (which maps old-alias -> new-alias),
 874        relabelling any references to them in select columns and the where
 875        clause.
 876        """
 877        # If keys and values of change_map were to intersect, an alias might be
 878        # updated twice (e.g. T4 -> T5, T5 -> T6, so also T4 -> T6) depending
 879        # on their order in change_map.
 880        assert set(change_map).isdisjoint(change_map.values())
 881
 882        # 1. Update references in "select" (normal columns plus aliases),
 883        # "group by" and "where".
 884        self.where.relabel_aliases(change_map)
 885        if isinstance(self.group_by, tuple):
 886            self.group_by = tuple(
 887                [col.relabeled_clone(change_map) for col in self.group_by]
 888            )
 889        self.select = tuple([col.relabeled_clone(change_map) for col in self.select])
 890        self.annotations = self.annotations and {
 891            key: col.relabeled_clone(change_map)
 892            for key, col in self.annotations.items()
 893        }
 894
 895        # 2. Rename the alias in the internal table/alias datastructures.
 896        for old_alias, new_alias in change_map.items():
 897            if old_alias not in self.alias_map:
 898                continue
 899            alias_data = self.alias_map[old_alias].relabeled_clone(change_map)
 900            self.alias_map[new_alias] = alias_data
 901            self.alias_refcount[new_alias] = self.alias_refcount[old_alias]
 902            del self.alias_refcount[old_alias]
 903            del self.alias_map[old_alias]
 904
 905            table_aliases = self.table_map[alias_data.table_name]
 906            for pos, alias in enumerate(table_aliases):
 907                if alias == old_alias:
 908                    table_aliases[pos] = new_alias
 909                    break
 910        self.external_aliases = {
 911            # Table is aliased or it's being changed and thus is aliased.
 912            change_map.get(alias, alias): (aliased or alias in change_map)
 913            for alias, aliased in self.external_aliases.items()
 914        }
 915
 916    def bump_prefix(self, other_query, exclude=None):
 917        """
 918        Change the alias prefix to the next letter in the alphabet in a way
 919        that the other query's aliases and this query's aliases will not
 920        conflict. Even tables that previously had no alias will get an alias
 921        after this call. To prevent changing aliases use the exclude parameter.
 922        """
 923
 924        def prefix_gen():
 925            """
 926            Generate a sequence of characters in alphabetical order:
 927                -> 'A', 'B', 'C', ...
 928
 929            When the alphabet is finished, the sequence will continue with the
 930            Cartesian product:
 931                -> 'AA', 'AB', 'AC', ...
 932            """
 933            alphabet = ascii_uppercase
 934            prefix = chr(ord(self.alias_prefix) + 1)
 935            yield prefix
 936            for n in count(1):
 937                seq = alphabet[alphabet.index(prefix) :] if prefix else alphabet
 938                for s in product(seq, repeat=n):
 939                    yield "".join(s)
 940                prefix = None
 941
 942        if self.alias_prefix != other_query.alias_prefix:
 943            # No clashes between self and outer query should be possible.
 944            return
 945
 946        # Explicitly avoid infinite loop. The constant divider is based on how
 947        # much depth recursive subquery references add to the stack. This value
 948        # might need to be adjusted when adding or removing function calls from
 949        # the code path in charge of performing these operations.
 950        local_recursion_limit = sys.getrecursionlimit() // 16
 951        for pos, prefix in enumerate(prefix_gen()):
 952            if prefix not in self.subq_aliases:
 953                self.alias_prefix = prefix
 954                break
 955            if pos > local_recursion_limit:
 956                raise RecursionError(
 957                    "Maximum recursion depth exceeded: too many subqueries."
 958                )
 959        self.subq_aliases = self.subq_aliases.union([self.alias_prefix])
 960        other_query.subq_aliases = other_query.subq_aliases.union(self.subq_aliases)
 961        if exclude is None:
 962            exclude = {}
 963        self.change_aliases(
 964            {
 965                alias: "%s%d" % (self.alias_prefix, pos)
 966                for pos, alias in enumerate(self.alias_map)
 967                if alias not in exclude
 968            }
 969        )
 970
 971    def get_initial_alias(self):
 972        """
 973        Return the first alias for this query, after increasing its reference
 974        count.
 975        """
 976        if self.alias_map:
 977            alias = self.base_table
 978            self.ref_alias(alias)
 979        elif self.model:
 980            alias = self.join(self.base_table_class(self.get_meta().db_table, None))
 981        else:
 982            alias = None
 983        return alias
 984
 985    def count_active_tables(self):
 986        """
 987        Return the number of tables in this query with a non-zero reference
 988        count. After execution, the reference counts are zeroed, so tables
 989        added in compiler will not be seen by this method.
 990        """
 991        return len([1 for count in self.alias_refcount.values() if count])
 992
 993    def join(self, join, reuse=None, reuse_with_filtered_relation=False):
 994        """
 995        Return an alias for the 'join', either reusing an existing alias for
 996        that join or creating a new one. 'join' is either a base_table_class or
 997        join_class.
 998
 999        The 'reuse' parameter can be either None which means all joins are
1000        reusable, or it can be a set containing the aliases that can be reused.
1001
1002        The 'reuse_with_filtered_relation' parameter is used when computing
1003        FilteredRelation instances.
1004
1005        A join is always created as LOUTER if the lhs alias is LOUTER to make
1006        sure chains like t1 LOUTER t2 INNER t3 aren't generated. All new
1007        joins are created as LOUTER if the join is nullable.
1008        """
1009        if reuse_with_filtered_relation and reuse:
1010            reuse_aliases = [
1011                a for a, j in self.alias_map.items() if a in reuse and j.equals(join)
1012            ]
1013        else:
1014            reuse_aliases = [
1015                a
1016                for a, j in self.alias_map.items()
1017                if (reuse is None or a in reuse) and j == join
1018            ]
1019        if reuse_aliases:
1020            if join.table_alias in reuse_aliases:
1021                reuse_alias = join.table_alias
1022            else:
1023                # Reuse the most recent alias of the joined table
1024                # (a many-to-many relation may be joined multiple times).
1025                reuse_alias = reuse_aliases[-1]
1026            self.ref_alias(reuse_alias)
1027            return reuse_alias
1028
1029        # No reuse is possible, so we need a new alias.
1030        alias, _ = self.table_alias(
1031            join.table_name, create=True, filtered_relation=join.filtered_relation
1032        )
1033        if join.join_type:
1034            if self.alias_map[join.parent_alias].join_type == LOUTER or join.nullable:
1035                join_type = LOUTER
1036            else:
1037                join_type = INNER
1038            join.join_type = join_type
1039        join.table_alias = alias
1040        self.alias_map[alias] = join
1041        return alias
1042
1043    def join_parent_model(self, opts, model, alias, seen):
1044        """
1045        Make sure the given 'model' is joined in the query. If 'model' isn't
1046        a parent of 'opts' or if it is None this method is a no-op.
1047
1048        The 'alias' is the root alias for starting the join, 'seen' is a dict
1049        of model -> alias of existing joins. It must also contain a mapping
1050        of None -> some alias. This will be returned in the no-op case.
1051        """
1052        if model in seen:
1053            return seen[model]
1054        chain = opts.get_base_chain(model)
1055        if not chain:
1056            return alias
1057        curr_opts = opts
1058        for int_model in chain:
1059            if int_model in seen:
1060                curr_opts = int_model._meta
1061                alias = seen[int_model]
1062                continue
1063            # Proxy model have elements in base chain
1064            # with no parents, assign the new options
1065            # object and skip to the next base in that
1066            # case
1067            if not curr_opts.parents[int_model]:
1068                curr_opts = int_model._meta
1069                continue
1070            link_field = curr_opts.get_ancestor_link(int_model)
1071            join_info = self.setup_joins([link_field.name], curr_opts, alias)
1072            curr_opts = int_model._meta
1073            alias = seen[int_model] = join_info.joins[-1]
1074        return alias or seen[None]
1075
1076    def check_alias(self, alias):
1077        if FORBIDDEN_ALIAS_PATTERN.search(alias):
1078            raise ValueError(
1079                "Column aliases cannot contain whitespace characters, quotation marks, "
1080                "semicolons, or SQL comments."
1081            )
1082
1083    def add_annotation(self, annotation, alias, select=True):
1084        """Add a single annotation expression to the Query."""
1085        self.check_alias(alias)
1086        annotation = annotation.resolve_expression(self, allow_joins=True, reuse=None)
1087        if select:
1088            self.append_annotation_mask([alias])
1089        else:
1090            self.set_annotation_mask(set(self.annotation_select).difference({alias}))
1091        self.annotations[alias] = annotation
1092
1093    def resolve_expression(self, query, *args, **kwargs):
1094        clone = self.clone()
1095        # Subqueries need to use a different set of aliases than the outer query.
1096        clone.bump_prefix(query)
1097        clone.subquery = True
1098        clone.where.resolve_expression(query, *args, **kwargs)
1099        # Resolve combined queries.
1100        if clone.combinator:
1101            clone.combined_queries = tuple(
1102                [
1103                    combined_query.resolve_expression(query, *args, **kwargs)
1104                    for combined_query in clone.combined_queries
1105                ]
1106            )
1107        for key, value in clone.annotations.items():
1108            resolved = value.resolve_expression(query, *args, **kwargs)
1109            if hasattr(resolved, "external_aliases"):
1110                resolved.external_aliases.update(clone.external_aliases)
1111            clone.annotations[key] = resolved
1112        # Outer query's aliases are considered external.
1113        for alias, table in query.alias_map.items():
1114            clone.external_aliases[alias] = (
1115                isinstance(table, Join)
1116                and table.join_field.related_model._meta.db_table != alias
1117            ) or (
1118                isinstance(table, BaseTable) and table.table_name != table.table_alias
1119            )
1120        return clone
1121
1122    def get_external_cols(self):
1123        exprs = chain(self.annotations.values(), self.where.children)
1124        return [
1125            col
1126            for col in self._gen_cols(exprs, include_external=True)
1127            if col.alias in self.external_aliases
1128        ]
1129
1130    def get_group_by_cols(self, wrapper=None):
1131        # If wrapper is referenced by an alias for an explicit GROUP BY through
1132        # values() a reference to this expression and not the self must be
1133        # returned to ensure external column references are not grouped against
1134        # as well.
1135        external_cols = self.get_external_cols()
1136        if any(col.possibly_multivalued for col in external_cols):
1137            return [wrapper or self]
1138        return external_cols
1139
1140    def as_sql(self, compiler, connection):
1141        # Some backends (e.g. Oracle) raise an error when a subquery contains
1142        # unnecessary ORDER BY clause.
1143        if (
1144            self.subquery
1145            and not connection.features.ignores_unnecessary_order_by_in_subqueries
1146        ):
1147            self.clear_ordering(force=False)
1148            for query in self.combined_queries:
1149                query.clear_ordering(force=False)
1150        sql, params = self.get_compiler(connection=connection).as_sql()
1151        if self.subquery:
1152            sql = "(%s)" % sql
1153        return sql, params
1154
1155    def resolve_lookup_value(self, value, can_reuse, allow_joins):
1156        if hasattr(value, "resolve_expression"):
1157            value = value.resolve_expression(
1158                self,
1159                reuse=can_reuse,
1160                allow_joins=allow_joins,
1161            )
1162        elif isinstance(value, list | tuple):
1163            # The items of the iterable may be expressions and therefore need
1164            # to be resolved independently.
1165            values = (
1166                self.resolve_lookup_value(sub_value, can_reuse, allow_joins)
1167                for sub_value in value
1168            )
1169            type_ = type(value)
1170            if hasattr(type_, "_make"):  # namedtuple
1171                return type_(*values)
1172            return type_(values)
1173        return value
1174
1175    def solve_lookup_type(self, lookup, summarize=False):
1176        """
1177        Solve the lookup type from the lookup (e.g.: 'foobar__id__icontains').
1178        """
1179        lookup_splitted = lookup.split(LOOKUP_SEP)
1180        if self.annotations:
1181            annotation, expression_lookups = refs_expression(
1182                lookup_splitted, self.annotations
1183            )
1184            if annotation:
1185                expression = self.annotations[annotation]
1186                if summarize:
1187                    expression = Ref(annotation, expression)
1188                return expression_lookups, (), expression
1189        _, field, _, lookup_parts = self.names_to_path(lookup_splitted, self.get_meta())
1190        field_parts = lookup_splitted[0 : len(lookup_splitted) - len(lookup_parts)]
1191        if len(lookup_parts) > 1 and not field_parts:
1192            raise FieldError(
1193                'Invalid lookup "{}" for model {}".'.format(
1194                    lookup, self.get_meta().model.__name__
1195                )
1196            )
1197        return lookup_parts, field_parts, False
1198
1199    def check_query_object_type(self, value, opts, field):
1200        """
1201        Check whether the object passed while querying is of the correct type.
1202        If not, raise a ValueError specifying the wrong object.
1203        """
1204        if hasattr(value, "_meta"):
1205            if not check_rel_lookup_compatibility(value._meta.model, opts, field):
1206                raise ValueError(
1207                    f'Cannot query "{value}": Must be "{opts.object_name}" instance.'
1208                )
1209
1210    def check_related_objects(self, field, value, opts):
1211        """Check the type of object passed to query relations."""
1212        if field.is_relation:
1213            # Check that the field and the queryset use the same model in a
1214            # query like .filter(author=Author.objects.all()). For example, the
1215            # opts would be Author's (from the author field) and value.model
1216            # would be Author.objects.all() queryset's .model (Author also).
1217            # The field is the related field on the lhs side.
1218            if (
1219                isinstance(value, Query)
1220                and not value.has_select_fields
1221                and not check_rel_lookup_compatibility(value.model, opts, field)
1222            ):
1223                raise ValueError(
1224                    'Cannot use QuerySet for "{}": Use a QuerySet for "{}".'.format(
1225                        value.model._meta.object_name, opts.object_name
1226                    )
1227                )
1228            elif hasattr(value, "_meta"):
1229                self.check_query_object_type(value, opts, field)
1230            elif hasattr(value, "__iter__"):
1231                for v in value:
1232                    self.check_query_object_type(v, opts, field)
1233
1234    def check_filterable(self, expression):
1235        """Raise an error if expression cannot be used in a WHERE clause."""
1236        if hasattr(expression, "resolve_expression") and not getattr(
1237            expression, "filterable", True
1238        ):
1239            raise NotSupportedError(
1240                expression.__class__.__name__ + " is disallowed in the filter "
1241                "clause."
1242            )
1243        if hasattr(expression, "get_source_expressions"):
1244            for expr in expression.get_source_expressions():
1245                self.check_filterable(expr)
1246
1247    def build_lookup(self, lookups, lhs, rhs):
1248        """
1249        Try to extract transforms and lookup from given lhs.
1250
1251        The lhs value is something that works like SQLExpression.
1252        The rhs value is what the lookup is going to compare against.
1253        The lookups is a list of names to extract using get_lookup()
1254        and get_transform().
1255        """
1256        # __exact is the default lookup if one isn't given.
1257        *transforms, lookup_name = lookups or ["exact"]
1258        for name in transforms:
1259            lhs = self.try_transform(lhs, name)
1260        # First try get_lookup() so that the lookup takes precedence if the lhs
1261        # supports both transform and lookup for the name.
1262        lookup_class = lhs.get_lookup(lookup_name)
1263        if not lookup_class:
1264            # A lookup wasn't found. Try to interpret the name as a transform
1265            # and do an Exact lookup against it.
1266            lhs = self.try_transform(lhs, lookup_name)
1267            lookup_name = "exact"
1268            lookup_class = lhs.get_lookup(lookup_name)
1269            if not lookup_class:
1270                return
1271
1272        lookup = lookup_class(lhs, rhs)
1273        # Interpret '__exact=None' as the sql 'is NULL'; otherwise, reject all
1274        # uses of None as a query value unless the lookup supports it.
1275        if lookup.rhs is None and not lookup.can_use_none_as_rhs:
1276            if lookup_name not in ("exact", "iexact"):
1277                raise ValueError("Cannot use None as a query value")
1278            return lhs.get_lookup("isnull")(lhs, True)
1279
1280        # For Oracle '' is equivalent to null. The check must be done at this
1281        # stage because join promotion can't be done in the compiler. Using
1282        # DEFAULT_DB_ALIAS isn't nice but it's the best that can be done here.
1283        # A similar thing is done in is_nullable(), too.
1284        if (
1285            lookup_name == "exact"
1286            and lookup.rhs == ""
1287            and connections[DEFAULT_DB_ALIAS].features.interprets_empty_strings_as_nulls
1288        ):
1289            return lhs.get_lookup("isnull")(lhs, True)
1290
1291        return lookup
1292
1293    def try_transform(self, lhs, name):
1294        """
1295        Helper method for build_lookup(). Try to fetch and initialize
1296        a transform for name parameter from lhs.
1297        """
1298        transform_class = lhs.get_transform(name)
1299        if transform_class:
1300            return transform_class(lhs)
1301        else:
1302            output_field = lhs.output_field.__class__
1303            suggested_lookups = difflib.get_close_matches(
1304                name, output_field.get_lookups()
1305            )
1306            if suggested_lookups:
1307                suggestion = ", perhaps you meant %s?" % " or ".join(suggested_lookups)
1308            else:
1309                suggestion = "."
1310            raise FieldError(
1311                "Unsupported lookup '{}' for {} or join on the field not "
1312                "permitted{}".format(name, output_field.__name__, suggestion)
1313            )
1314
1315    def build_filter(
1316        self,
1317        filter_expr,
1318        branch_negated=False,
1319        current_negated=False,
1320        can_reuse=None,
1321        allow_joins=True,
1322        split_subq=True,
1323        reuse_with_filtered_relation=False,
1324        check_filterable=True,
1325        summarize=False,
1326    ):
1327        """
1328        Build a WhereNode for a single filter clause but don't add it
1329        to this Query. Query.add_q() will then add this filter to the where
1330        Node.
1331
1332        The 'branch_negated' tells us if the current branch contains any
1333        negations. This will be used to determine if subqueries are needed.
1334
1335        The 'current_negated' is used to determine if the current filter is
1336        negated or not and this will be used to determine if IS NULL filtering
1337        is needed.
1338
1339        The difference between current_negated and branch_negated is that
1340        branch_negated is set on first negation, but current_negated is
1341        flipped for each negation.
1342
1343        Note that add_filter will not do any negating itself, that is done
1344        upper in the code by add_q().
1345
1346        The 'can_reuse' is a set of reusable joins for multijoins.
1347
1348        If 'reuse_with_filtered_relation' is True, then only joins in can_reuse
1349        will be reused.
1350
1351        The method will create a filter clause that can be added to the current
1352        query. However, if the filter isn't added to the query then the caller
1353        is responsible for unreffing the joins used.
1354        """
1355        if isinstance(filter_expr, dict):
1356            raise FieldError("Cannot parse keyword query as dict")
1357        if isinstance(filter_expr, Q):
1358            return self._add_q(
1359                filter_expr,
1360                branch_negated=branch_negated,
1361                current_negated=current_negated,
1362                used_aliases=can_reuse,
1363                allow_joins=allow_joins,
1364                split_subq=split_subq,
1365                check_filterable=check_filterable,
1366                summarize=summarize,
1367            )
1368        if hasattr(filter_expr, "resolve_expression"):
1369            if not getattr(filter_expr, "conditional", False):
1370                raise TypeError("Cannot filter against a non-conditional expression.")
1371            condition = filter_expr.resolve_expression(
1372                self, allow_joins=allow_joins, summarize=summarize
1373            )
1374            if not isinstance(condition, Lookup):
1375                condition = self.build_lookup(["exact"], condition, True)
1376            return WhereNode([condition], connector=AND), []
1377        arg, value = filter_expr
1378        if not arg:
1379            raise FieldError("Cannot parse keyword query %r" % arg)
1380        lookups, parts, reffed_expression = self.solve_lookup_type(arg, summarize)
1381
1382        if check_filterable:
1383            self.check_filterable(reffed_expression)
1384
1385        if not allow_joins and len(parts) > 1:
1386            raise FieldError("Joined field references are not permitted in this query")
1387
1388        pre_joins = self.alias_refcount.copy()
1389        value = self.resolve_lookup_value(value, can_reuse, allow_joins)
1390        used_joins = {
1391            k for k, v in self.alias_refcount.items() if v > pre_joins.get(k, 0)
1392        }
1393
1394        if check_filterable:
1395            self.check_filterable(value)
1396
1397        if reffed_expression:
1398            condition = self.build_lookup(lookups, reffed_expression, value)
1399            return WhereNode([condition], connector=AND), []
1400
1401        opts = self.get_meta()
1402        alias = self.get_initial_alias()
1403        allow_many = not branch_negated or not split_subq
1404
1405        try:
1406            join_info = self.setup_joins(
1407                parts,
1408                opts,
1409                alias,
1410                can_reuse=can_reuse,
1411                allow_many=allow_many,
1412                reuse_with_filtered_relation=reuse_with_filtered_relation,
1413            )
1414
1415            # Prevent iterator from being consumed by check_related_objects()
1416            if isinstance(value, Iterator):
1417                value = list(value)
1418            self.check_related_objects(join_info.final_field, value, join_info.opts)
1419
1420            # split_exclude() needs to know which joins were generated for the
1421            # lookup parts
1422            self._lookup_joins = join_info.joins
1423        except MultiJoin as e:
1424            return self.split_exclude(filter_expr, can_reuse, e.names_with_path)
1425
1426        # Update used_joins before trimming since they are reused to determine
1427        # which joins could be later promoted to INNER.
1428        used_joins.update(join_info.joins)
1429        targets, alias, join_list = self.trim_joins(
1430            join_info.targets, join_info.joins, join_info.path
1431        )
1432        if can_reuse is not None:
1433            can_reuse.update(join_list)
1434
1435        if join_info.final_field.is_relation:
1436            if len(targets) == 1:
1437                col = self._get_col(targets[0], join_info.final_field, alias)
1438            else:
1439                col = MultiColSource(
1440                    alias, targets, join_info.targets, join_info.final_field
1441                )
1442        else:
1443            col = self._get_col(targets[0], join_info.final_field, alias)
1444
1445        condition = self.build_lookup(lookups, col, value)
1446        lookup_type = condition.lookup_name
1447        clause = WhereNode([condition], connector=AND)
1448
1449        require_outer = (
1450            lookup_type == "isnull" and condition.rhs is True and not current_negated
1451        )
1452        if (
1453            current_negated
1454            and (lookup_type != "isnull" or condition.rhs is False)
1455            and condition.rhs is not None
1456        ):
1457            require_outer = True
1458            if lookup_type != "isnull":
1459                # The condition added here will be SQL like this:
1460                # NOT (col IS NOT NULL), where the first NOT is added in
1461                # upper layers of code. The reason for addition is that if col
1462                # is null, then col != someval will result in SQL "unknown"
1463                # which isn't the same as in Python. The Python None handling
1464                # is wanted, and it can be gotten by
1465                # (col IS NULL OR col != someval)
1466                #   <=>
1467                # NOT (col IS NOT NULL AND col = someval).
1468                if (
1469                    self.is_nullable(targets[0])
1470                    or self.alias_map[join_list[-1]].join_type == LOUTER
1471                ):
1472                    lookup_class = targets[0].get_lookup("isnull")
1473                    col = self._get_col(targets[0], join_info.targets[0], alias)
1474                    clause.add(lookup_class(col, False), AND)
1475                # If someval is a nullable column, someval IS NOT NULL is
1476                # added.
1477                if isinstance(value, Col) and self.is_nullable(value.target):
1478                    lookup_class = value.target.get_lookup("isnull")
1479                    clause.add(lookup_class(value, False), AND)
1480        return clause, used_joins if not require_outer else ()
1481
1482    def add_filter(self, filter_lhs, filter_rhs):
1483        self.add_q(Q((filter_lhs, filter_rhs)))
1484
1485    def add_q(self, q_object):
1486        """
1487        A preprocessor for the internal _add_q(). Responsible for doing final
1488        join promotion.
1489        """
1490        # For join promotion this case is doing an AND for the added q_object
1491        # and existing conditions. So, any existing inner join forces the join
1492        # type to remain inner. Existing outer joins can however be demoted.
1493        # (Consider case where rel_a is LOUTER and rel_a__col=1 is added - if
1494        # rel_a doesn't produce any rows, then the whole condition must fail.
1495        # So, demotion is OK.
1496        existing_inner = {
1497            a for a in self.alias_map if self.alias_map[a].join_type == INNER
1498        }
1499        clause, _ = self._add_q(q_object, self.used_aliases)
1500        if clause:
1501            self.where.add(clause, AND)
1502        self.demote_joins(existing_inner)
1503
1504    def build_where(self, filter_expr):
1505        return self.build_filter(filter_expr, allow_joins=False)[0]
1506
1507    def clear_where(self):
1508        self.where = WhereNode()
1509
1510    def _add_q(
1511        self,
1512        q_object,
1513        used_aliases,
1514        branch_negated=False,
1515        current_negated=False,
1516        allow_joins=True,
1517        split_subq=True,
1518        check_filterable=True,
1519        summarize=False,
1520    ):
1521        """Add a Q-object to the current filter."""
1522        connector = q_object.connector
1523        current_negated ^= q_object.negated
1524        branch_negated = branch_negated or q_object.negated
1525        target_clause = WhereNode(connector=connector, negated=q_object.negated)
1526        joinpromoter = JoinPromoter(
1527            q_object.connector, len(q_object.children), current_negated
1528        )
1529        for child in q_object.children:
1530            child_clause, needed_inner = self.build_filter(
1531                child,
1532                can_reuse=used_aliases,
1533                branch_negated=branch_negated,
1534                current_negated=current_negated,
1535                allow_joins=allow_joins,
1536                split_subq=split_subq,
1537                check_filterable=check_filterable,
1538                summarize=summarize,
1539            )
1540            joinpromoter.add_votes(needed_inner)
1541            if child_clause:
1542                target_clause.add(child_clause, connector)
1543        needed_inner = joinpromoter.update_join_types(self)
1544        return target_clause, needed_inner
1545
1546    def build_filtered_relation_q(
1547        self, q_object, reuse, branch_negated=False, current_negated=False
1548    ):
1549        """Add a FilteredRelation object to the current filter."""
1550        connector = q_object.connector
1551        current_negated ^= q_object.negated
1552        branch_negated = branch_negated or q_object.negated
1553        target_clause = WhereNode(connector=connector, negated=q_object.negated)
1554        for child in q_object.children:
1555            if isinstance(child, Node):
1556                child_clause = self.build_filtered_relation_q(
1557                    child,
1558                    reuse=reuse,
1559                    branch_negated=branch_negated,
1560                    current_negated=current_negated,
1561                )
1562            else:
1563                child_clause, _ = self.build_filter(
1564                    child,
1565                    can_reuse=reuse,
1566                    branch_negated=branch_negated,
1567                    current_negated=current_negated,
1568                    allow_joins=True,
1569                    split_subq=False,
1570                    reuse_with_filtered_relation=True,
1571                )
1572            target_clause.add(child_clause, connector)
1573        return target_clause
1574
1575    def add_filtered_relation(self, filtered_relation, alias):
1576        filtered_relation.alias = alias
1577        lookups = dict(get_children_from_q(filtered_relation.condition))
1578        relation_lookup_parts, relation_field_parts, _ = self.solve_lookup_type(
1579            filtered_relation.relation_name
1580        )
1581        if relation_lookup_parts:
1582            raise ValueError(
1583                "FilteredRelation's relation_name cannot contain lookups "
1584                "(got %r)." % filtered_relation.relation_name
1585            )
1586        for lookup in chain(lookups):
1587            lookup_parts, lookup_field_parts, _ = self.solve_lookup_type(lookup)
1588            shift = 2 if not lookup_parts else 1
1589            lookup_field_path = lookup_field_parts[:-shift]
1590            for idx, lookup_field_part in enumerate(lookup_field_path):
1591                if len(relation_field_parts) > idx:
1592                    if relation_field_parts[idx] != lookup_field_part:
1593                        raise ValueError(
1594                            "FilteredRelation's condition doesn't support "
1595                            "relations outside the {!r} (got {!r}).".format(
1596                                filtered_relation.relation_name, lookup
1597                            )
1598                        )
1599                else:
1600                    raise ValueError(
1601                        "FilteredRelation's condition doesn't support nested "
1602                        f"relations deeper than the relation_name (got {lookup!r} for "
1603                        f"{filtered_relation.relation_name!r})."
1604                    )
1605        self._filtered_relations[filtered_relation.alias] = filtered_relation
1606
1607    def names_to_path(self, names, opts, allow_many=True, fail_on_missing=False):
1608        """
1609        Walk the list of names and turns them into PathInfo tuples. A single
1610        name in 'names' can generate multiple PathInfos (m2m, for example).
1611
1612        'names' is the path of names to travel, 'opts' is the model Options we
1613        start the name resolving from, 'allow_many' is as for setup_joins().
1614        If fail_on_missing is set to True, then a name that can't be resolved
1615        will generate a FieldError.
1616
1617        Return a list of PathInfo tuples. In addition return the final field
1618        (the last used join field) and target (which is a field guaranteed to
1619        contain the same value as the final field). Finally, return those names
1620        that weren't found (which are likely transforms and the final lookup).
1621        """
1622        path, names_with_path = [], []
1623        for pos, name in enumerate(names):
1624            cur_names_with_path = (name, [])
1625            if name == "pk":
1626                name = opts.pk.name
1627
1628            field = None
1629            filtered_relation = None
1630            try:
1631                if opts is None:
1632                    raise FieldDoesNotExist
1633                field = opts.get_field(name)
1634            except FieldDoesNotExist:
1635                if name in self.annotation_select:
1636                    field = self.annotation_select[name].output_field
1637                elif name in self._filtered_relations and pos == 0:
1638                    filtered_relation = self._filtered_relations[name]
1639                    if LOOKUP_SEP in filtered_relation.relation_name:
1640                        parts = filtered_relation.relation_name.split(LOOKUP_SEP)
1641                        filtered_relation_path, field, _, _ = self.names_to_path(
1642                            parts,
1643                            opts,
1644                            allow_many,
1645                            fail_on_missing,
1646                        )
1647                        path.extend(filtered_relation_path[:-1])
1648                    else:
1649                        field = opts.get_field(filtered_relation.relation_name)
1650            if field is not None:
1651                # Fields that contain one-to-many relations with a generic
1652                # model (like a GenericForeignKey) cannot generate reverse
1653                # relations and therefore cannot be used for reverse querying.
1654                if field.is_relation and not field.related_model:
1655                    raise FieldError(
1656                        "Field %r does not generate an automatic reverse "
1657                        "relation and therefore cannot be used for reverse "
1658                        "querying. If it is a GenericForeignKey, consider "
1659                        "adding a GenericRelation." % name
1660                    )
1661                try:
1662                    model = field.model._meta.concrete_model
1663                except AttributeError:
1664                    # QuerySet.annotate() may introduce fields that aren't
1665                    # attached to a model.
1666                    model = None
1667            else:
1668                # We didn't find the current field, so move position back
1669                # one step.
1670                pos -= 1
1671                if pos == -1 or fail_on_missing:
1672                    available = sorted(
1673                        [
1674                            *get_field_names_from_opts(opts),
1675                            *self.annotation_select,
1676                            *self._filtered_relations,
1677                        ]
1678                    )
1679                    raise FieldError(
1680                        "Cannot resolve keyword '{}' into field. "
1681                        "Choices are: {}".format(name, ", ".join(available))
1682                    )
1683                break
1684            # Check if we need any joins for concrete inheritance cases (the
1685            # field lives in parent, but we are currently in one of its
1686            # children)
1687            if opts is not None and model is not opts.model:
1688                path_to_parent = opts.get_path_to_parent(model)
1689                if path_to_parent:
1690                    path.extend(path_to_parent)
1691                    cur_names_with_path[1].extend(path_to_parent)
1692                    opts = path_to_parent[-1].to_opts
1693            if hasattr(field, "path_infos"):
1694                if filtered_relation:
1695                    pathinfos = field.get_path_info(filtered_relation)
1696                else:
1697                    pathinfos = field.path_infos
1698                if not allow_many:
1699                    for inner_pos, p in enumerate(pathinfos):
1700                        if p.m2m:
1701                            cur_names_with_path[1].extend(pathinfos[0 : inner_pos + 1])
1702                            names_with_path.append(cur_names_with_path)
1703                            raise MultiJoin(pos + 1, names_with_path)
1704                last = pathinfos[-1]
1705                path.extend(pathinfos)
1706                final_field = last.join_field
1707                opts = last.to_opts
1708                targets = last.target_fields
1709                cur_names_with_path[1].extend(pathinfos)
1710                names_with_path.append(cur_names_with_path)
1711            else:
1712                # Local non-relational field.
1713                final_field = field
1714                targets = (field,)
1715                if fail_on_missing and pos + 1 != len(names):
1716                    raise FieldError(
1717                        "Cannot resolve keyword {!r} into field. Join on '{}'"
1718                        " not permitted.".format(names[pos + 1], name)
1719                    )
1720                break
1721        return path, final_field, targets, names[pos + 1 :]
1722
1723    def setup_joins(
1724        self,
1725        names,
1726        opts,
1727        alias,
1728        can_reuse=None,
1729        allow_many=True,
1730        reuse_with_filtered_relation=False,
1731    ):
1732        """
1733        Compute the necessary table joins for the passage through the fields
1734        given in 'names'. 'opts' is the Options class for the current model
1735        (which gives the table we are starting from), 'alias' is the alias for
1736        the table to start the joining from.
1737
1738        The 'can_reuse' defines the reverse foreign key joins we can reuse. It
1739        can be None in which case all joins are reusable or a set of aliases
1740        that can be reused. Note that non-reverse foreign keys are always
1741        reusable when using setup_joins().
1742
1743        The 'reuse_with_filtered_relation' can be used to force 'can_reuse'
1744        parameter and force the relation on the given connections.
1745
1746        If 'allow_many' is False, then any reverse foreign key seen will
1747        generate a MultiJoin exception.
1748
1749        Return the final field involved in the joins, the target field (used
1750        for any 'where' constraint), the final 'opts' value, the joins, the
1751        field path traveled to generate the joins, and a transform function
1752        that takes a field and alias and is equivalent to `field.get_col(alias)`
1753        in the simple case but wraps field transforms if they were included in
1754        names.
1755
1756        The target field is the field containing the concrete value. Final
1757        field can be something different, for example foreign key pointing to
1758        that value. Final field is needed for example in some value
1759        conversions (convert 'obj' in fk__id=obj to pk val using the foreign
1760        key field for example).
1761        """
1762        joins = [alias]
1763        # The transform can't be applied yet, as joins must be trimmed later.
1764        # To avoid making every caller of this method look up transforms
1765        # directly, compute transforms here and create a partial that converts
1766        # fields to the appropriate wrapped version.
1767
1768        def final_transformer(field, alias):
1769            if not self.alias_cols:
1770                alias = None
1771            return field.get_col(alias)
1772
1773        # Try resolving all the names as fields first. If there's an error,
1774        # treat trailing names as lookups until a field can be resolved.
1775        last_field_exception = None
1776        for pivot in range(len(names), 0, -1):
1777            try:
1778                path, final_field, targets, rest = self.names_to_path(
1779                    names[:pivot],
1780                    opts,
1781                    allow_many,
1782                    fail_on_missing=True,
1783                )
1784            except FieldError as exc:
1785                if pivot == 1:
1786                    # The first item cannot be a lookup, so it's safe
1787                    # to raise the field error here.
1788                    raise
1789                else:
1790                    last_field_exception = exc
1791            else:
1792                # The transforms are the remaining items that couldn't be
1793                # resolved into fields.
1794                transforms = names[pivot:]
1795                break
1796        for name in transforms:
1797
1798            def transform(field, alias, *, name, previous):
1799                try:
1800                    wrapped = previous(field, alias)
1801                    return self.try_transform(wrapped, name)
1802                except FieldError:
1803                    # FieldError is raised if the transform doesn't exist.
1804                    if isinstance(final_field, Field) and last_field_exception:
1805                        raise last_field_exception
1806                    else:
1807                        raise
1808
1809            final_transformer = functools.partial(
1810                transform, name=name, previous=final_transformer
1811            )
1812            final_transformer.has_transforms = True
1813        # Then, add the path to the query's joins. Note that we can't trim
1814        # joins at this stage - we will need the information about join type
1815        # of the trimmed joins.
1816        for join in path:
1817            if join.filtered_relation:
1818                filtered_relation = join.filtered_relation.clone()
1819                table_alias = filtered_relation.alias
1820            else:
1821                filtered_relation = None
1822                table_alias = None
1823            opts = join.to_opts
1824            if join.direct:
1825                nullable = self.is_nullable(join.join_field)
1826            else:
1827                nullable = True
1828            connection = self.join_class(
1829                opts.db_table,
1830                alias,
1831                table_alias,
1832                INNER,
1833                join.join_field,
1834                nullable,
1835                filtered_relation=filtered_relation,
1836            )
1837            reuse = can_reuse if join.m2m or reuse_with_filtered_relation else None
1838            alias = self.join(
1839                connection,
1840                reuse=reuse,
1841                reuse_with_filtered_relation=reuse_with_filtered_relation,
1842            )
1843            joins.append(alias)
1844            if filtered_relation:
1845                filtered_relation.path = joins[:]
1846        return JoinInfo(final_field, targets, opts, joins, path, final_transformer)
1847
1848    def trim_joins(self, targets, joins, path):
1849        """
1850        The 'target' parameter is the final field being joined to, 'joins'
1851        is the full list of join aliases. The 'path' contain the PathInfos
1852        used to create the joins.
1853
1854        Return the final target field and table alias and the new active
1855        joins.
1856
1857        Always trim any direct join if the target column is already in the
1858        previous table. Can't trim reverse joins as it's unknown if there's
1859        anything on the other side of the join.
1860        """
1861        joins = joins[:]
1862        for pos, info in enumerate(reversed(path)):
1863            if len(joins) == 1 or not info.direct:
1864                break
1865            if info.filtered_relation:
1866                break
1867            join_targets = {t.column for t in info.join_field.foreign_related_fields}
1868            cur_targets = {t.column for t in targets}
1869            if not cur_targets.issubset(join_targets):
1870                break
1871            targets_dict = {
1872                r[1].column: r[0]
1873                for r in info.join_field.related_fields
1874                if r[1].column in cur_targets
1875            }
1876            targets = tuple(targets_dict[t.column] for t in targets)
1877            self.unref_alias(joins.pop())
1878        return targets, joins[-1], joins
1879
1880    @classmethod
1881    def _gen_cols(cls, exprs, include_external=False, resolve_refs=True):
1882        for expr in exprs:
1883            if isinstance(expr, Col):
1884                yield expr
1885            elif include_external and callable(
1886                getattr(expr, "get_external_cols", None)
1887            ):
1888                yield from expr.get_external_cols()
1889            elif hasattr(expr, "get_source_expressions"):
1890                if not resolve_refs and isinstance(expr, Ref):
1891                    continue
1892                yield from cls._gen_cols(
1893                    expr.get_source_expressions(),
1894                    include_external=include_external,
1895                    resolve_refs=resolve_refs,
1896                )
1897
1898    @classmethod
1899    def _gen_col_aliases(cls, exprs):
1900        yield from (expr.alias for expr in cls._gen_cols(exprs))
1901
1902    def resolve_ref(self, name, allow_joins=True, reuse=None, summarize=False):
1903        annotation = self.annotations.get(name)
1904        if annotation is not None:
1905            if not allow_joins:
1906                for alias in self._gen_col_aliases([annotation]):
1907                    if isinstance(self.alias_map[alias], Join):
1908                        raise FieldError(
1909                            "Joined field references are not permitted in this query"
1910                        )
1911            if summarize:
1912                # Summarize currently means we are doing an aggregate() query
1913                # which is executed as a wrapped subquery if any of the
1914                # aggregate() elements reference an existing annotation. In
1915                # that case we need to return a Ref to the subquery's annotation.
1916                if name not in self.annotation_select:
1917                    raise FieldError(
1918                        "Cannot aggregate over the '%s' alias. Use annotate() "
1919                        "to promote it." % name
1920                    )
1921                return Ref(name, self.annotation_select[name])
1922            else:
1923                return annotation
1924        else:
1925            field_list = name.split(LOOKUP_SEP)
1926            annotation = self.annotations.get(field_list[0])
1927            if annotation is not None:
1928                for transform in field_list[1:]:
1929                    annotation = self.try_transform(annotation, transform)
1930                return annotation
1931            join_info = self.setup_joins(
1932                field_list, self.get_meta(), self.get_initial_alias(), can_reuse=reuse
1933            )
1934            targets, final_alias, join_list = self.trim_joins(
1935                join_info.targets, join_info.joins, join_info.path
1936            )
1937            if not allow_joins and len(join_list) > 1:
1938                raise FieldError(
1939                    "Joined field references are not permitted in this query"
1940                )
1941            if len(targets) > 1:
1942                raise FieldError(
1943                    "Referencing multicolumn fields with F() objects isn't supported"
1944                )
1945            # Verify that the last lookup in name is a field or a transform:
1946            # transform_function() raises FieldError if not.
1947            transform = join_info.transform_function(targets[0], final_alias)
1948            if reuse is not None:
1949                reuse.update(join_list)
1950            return transform
1951
1952    def split_exclude(self, filter_expr, can_reuse, names_with_path):
1953        """
1954        When doing an exclude against any kind of N-to-many relation, we need
1955        to use a subquery. This method constructs the nested query, given the
1956        original exclude filter (filter_expr) and the portion up to the first
1957        N-to-many relation field.
1958
1959        For example, if the origin filter is ~Q(child__name='foo'), filter_expr
1960        is ('child__name', 'foo') and can_reuse is a set of joins usable for
1961        filters in the original query.
1962
1963        We will turn this into equivalent of:
1964            WHERE NOT EXISTS(
1965                SELECT 1
1966                FROM child
1967                WHERE name = 'foo' AND child.parent_id = parent.id
1968                LIMIT 1
1969            )
1970        """
1971        # Generate the inner query.
1972        query = self.__class__(self.model)
1973        query._filtered_relations = self._filtered_relations
1974        filter_lhs, filter_rhs = filter_expr
1975        if isinstance(filter_rhs, OuterRef):
1976            filter_rhs = OuterRef(filter_rhs)
1977        elif isinstance(filter_rhs, F):
1978            filter_rhs = OuterRef(filter_rhs.name)
1979        query.add_filter(filter_lhs, filter_rhs)
1980        query.clear_ordering(force=True)
1981        # Try to have as simple as possible subquery -> trim leading joins from
1982        # the subquery.
1983        trimmed_prefix, contains_louter = query.trim_start(names_with_path)
1984
1985        col = query.select[0]
1986        select_field = col.target
1987        alias = col.alias
1988        if alias in can_reuse:
1989            pk = select_field.model._meta.pk
1990            # Need to add a restriction so that outer query's filters are in effect for
1991            # the subquery, too.
1992            query.bump_prefix(self)
1993            lookup_class = select_field.get_lookup("exact")
1994            # Note that the query.select[0].alias is different from alias
1995            # due to bump_prefix above.
1996            lookup = lookup_class(pk.get_col(query.select[0].alias), pk.get_col(alias))
1997            query.where.add(lookup, AND)
1998            query.external_aliases[alias] = True
1999
2000        lookup_class = select_field.get_lookup("exact")
2001        lookup = lookup_class(col, ResolvedOuterRef(trimmed_prefix))
2002        query.where.add(lookup, AND)
2003        condition, needed_inner = self.build_filter(Exists(query))
2004
2005        if contains_louter:
2006            or_null_condition, _ = self.build_filter(
2007                ("%s__isnull" % trimmed_prefix, True),
2008                current_negated=True,
2009                branch_negated=True,
2010                can_reuse=can_reuse,
2011            )
2012            condition.add(or_null_condition, OR)
2013            # Note that the end result will be:
2014            # (outercol NOT IN innerq AND outercol IS NOT NULL) OR outercol IS NULL.
2015            # This might look crazy but due to how IN works, this seems to be
2016            # correct. If the IS NOT NULL check is removed then outercol NOT
2017            # IN will return UNKNOWN. If the IS NULL check is removed, then if
2018            # outercol IS NULL we will not match the row.
2019        return condition, needed_inner
2020
2021    def set_empty(self):
2022        self.where.add(NothingNode(), AND)
2023        for query in self.combined_queries:
2024            query.set_empty()
2025
2026    def is_empty(self):
2027        return any(isinstance(c, NothingNode) for c in self.where.children)
2028
2029    def set_limits(self, low=None, high=None):
2030        """
2031        Adjust the limits on the rows retrieved. Use low/high to set these,
2032        as it makes it more Pythonic to read and write. When the SQL query is
2033        created, convert them to the appropriate offset and limit values.
2034
2035        Apply any limits passed in here to the existing constraints. Add low
2036        to the current low value and clamp both to any existing high value.
2037        """
2038        if high is not None:
2039            if self.high_mark is not None:
2040                self.high_mark = min(self.high_mark, self.low_mark + high)
2041            else:
2042                self.high_mark = self.low_mark + high
2043        if low is not None:
2044            if self.high_mark is not None:
2045                self.low_mark = min(self.high_mark, self.low_mark + low)
2046            else:
2047                self.low_mark = self.low_mark + low
2048
2049        if self.low_mark == self.high_mark:
2050            self.set_empty()
2051
2052    def clear_limits(self):
2053        """Clear any existing limits."""
2054        self.low_mark, self.high_mark = 0, None
2055
2056    @property
2057    def is_sliced(self):
2058        return self.low_mark != 0 or self.high_mark is not None
2059
2060    def has_limit_one(self):
2061        return self.high_mark is not None and (self.high_mark - self.low_mark) == 1
2062
2063    def can_filter(self):
2064        """
2065        Return True if adding filters to this instance is still possible.
2066
2067        Typically, this means no limits or offsets have been put on the results.
2068        """
2069        return not self.is_sliced
2070
2071    def clear_select_clause(self):
2072        """Remove all fields from SELECT clause."""
2073        self.select = ()
2074        self.default_cols = False
2075        self.select_related = False
2076        self.set_extra_mask(())
2077        self.set_annotation_mask(())
2078
2079    def clear_select_fields(self):
2080        """
2081        Clear the list of fields to select (but not extra_select columns).
2082        Some queryset types completely replace any existing list of select
2083        columns.
2084        """
2085        self.select = ()
2086        self.values_select = ()
2087
2088    def add_select_col(self, col, name):
2089        self.select += (col,)
2090        self.values_select += (name,)
2091
2092    def set_select(self, cols):
2093        self.default_cols = False
2094        self.select = tuple(cols)
2095
2096    def add_distinct_fields(self, *field_names):
2097        """
2098        Add and resolve the given fields to the query's "distinct on" clause.
2099        """
2100        self.distinct_fields = field_names
2101        self.distinct = True
2102
2103    def add_fields(self, field_names, allow_m2m=True):
2104        """
2105        Add the given (model) fields to the select set. Add the field names in
2106        the order specified.
2107        """
2108        alias = self.get_initial_alias()
2109        opts = self.get_meta()
2110
2111        try:
2112            cols = []
2113            for name in field_names:
2114                # Join promotion note - we must not remove any rows here, so
2115                # if there is no existing joins, use outer join.
2116                join_info = self.setup_joins(
2117                    name.split(LOOKUP_SEP), opts, alias, allow_many=allow_m2m
2118                )
2119                targets, final_alias, joins = self.trim_joins(
2120                    join_info.targets,
2121                    join_info.joins,
2122                    join_info.path,
2123                )
2124                for target in targets:
2125                    cols.append(join_info.transform_function(target, final_alias))
2126            if cols:
2127                self.set_select(cols)
2128        except MultiJoin:
2129            raise FieldError("Invalid field name: '%s'" % name)
2130        except FieldError:
2131            if LOOKUP_SEP in name:
2132                # For lookups spanning over relationships, show the error
2133                # from the model on which the lookup failed.
2134                raise
2135            elif name in self.annotations:
2136                raise FieldError(
2137                    "Cannot select the '%s' alias. Use annotate() to promote "
2138                    "it." % name
2139                )
2140            else:
2141                names = sorted(
2142                    [
2143                        *get_field_names_from_opts(opts),
2144                        *self.extra,
2145                        *self.annotation_select,
2146                        *self._filtered_relations,
2147                    ]
2148                )
2149                raise FieldError(
2150                    "Cannot resolve keyword {!r} into field. Choices are: {}".format(
2151                        name, ", ".join(names)
2152                    )
2153                )
2154
2155    def add_ordering(self, *ordering):
2156        """
2157        Add items from the 'ordering' sequence to the query's "order by"
2158        clause. These items are either field names (not column names) --
2159        possibly with a direction prefix ('-' or '?') -- or OrderBy
2160        expressions.
2161
2162        If 'ordering' is empty, clear all ordering from the query.
2163        """
2164        errors = []
2165        for item in ordering:
2166            if isinstance(item, str):
2167                if item == "?":
2168                    continue
2169                item = item.removeprefix("-")
2170                if item in self.annotations:
2171                    continue
2172                if self.extra and item in self.extra:
2173                    continue
2174                # names_to_path() validates the lookup. A descriptive
2175                # FieldError will be raise if it's not.
2176                self.names_to_path(item.split(LOOKUP_SEP), self.model._meta)
2177            elif not hasattr(item, "resolve_expression"):
2178                errors.append(item)
2179            if getattr(item, "contains_aggregate", False):
2180                raise FieldError(
2181                    "Using an aggregate in order_by() without also including "
2182                    "it in annotate() is not allowed: %s" % item
2183                )
2184        if errors:
2185            raise FieldError("Invalid order_by arguments: %s" % errors)
2186        if ordering:
2187            self.order_by += ordering
2188        else:
2189            self.default_ordering = False
2190
2191    def clear_ordering(self, force=False, clear_default=True):
2192        """
2193        Remove any ordering settings if the current query allows it without
2194        side effects, set 'force' to True to clear the ordering regardless.
2195        If 'clear_default' is True, there will be no ordering in the resulting
2196        query (not even the model's default).
2197        """
2198        if not force and (
2199            self.is_sliced or self.distinct_fields or self.select_for_update
2200        ):
2201            return
2202        self.order_by = ()
2203        self.extra_order_by = ()
2204        if clear_default:
2205            self.default_ordering = False
2206
2207    def set_group_by(self, allow_aliases=True):
2208        """
2209        Expand the GROUP BY clause required by the query.
2210
2211        This will usually be the set of all non-aggregate fields in the
2212        return data. If the database backend supports grouping by the
2213        primary key, and the query would be equivalent, the optimization
2214        will be made automatically.
2215        """
2216        if allow_aliases and self.values_select:
2217            # If grouping by aliases is allowed assign selected value aliases
2218            # by moving them to annotations.
2219            group_by_annotations = {}
2220            values_select = {}
2221            for alias, expr in zip(self.values_select, self.select):
2222                if isinstance(expr, Col):
2223                    values_select[alias] = expr
2224                else:
2225                    group_by_annotations[alias] = expr
2226            self.annotations = {**group_by_annotations, **self.annotations}
2227            self.append_annotation_mask(group_by_annotations)
2228            self.select = tuple(values_select.values())
2229            self.values_select = tuple(values_select)
2230        group_by = list(self.select)
2231        for alias, annotation in self.annotation_select.items():
2232            if not (group_by_cols := annotation.get_group_by_cols()):
2233                continue
2234            if allow_aliases and not annotation.contains_aggregate:
2235                group_by.append(Ref(alias, annotation))
2236            else:
2237                group_by.extend(group_by_cols)
2238        self.group_by = tuple(group_by)
2239
2240    def add_select_related(self, fields):
2241        """
2242        Set up the select_related data structure so that we only select
2243        certain related models (as opposed to all models, when
2244        self.select_related=True).
2245        """
2246        if isinstance(self.select_related, bool):
2247            field_dict = {}
2248        else:
2249            field_dict = self.select_related
2250        for field in fields:
2251            d = field_dict
2252            for part in field.split(LOOKUP_SEP):
2253                d = d.setdefault(part, {})
2254        self.select_related = field_dict
2255
2256    def add_extra(self, select, select_params, where, params, tables, order_by):
2257        """
2258        Add data to the various extra_* attributes for user-created additions
2259        to the query.
2260        """
2261        if select:
2262            # We need to pair any placeholder markers in the 'select'
2263            # dictionary with their parameters in 'select_params' so that
2264            # subsequent updates to the select dictionary also adjust the
2265            # parameters appropriately.
2266            select_pairs = {}
2267            if select_params:
2268                param_iter = iter(select_params)
2269            else:
2270                param_iter = iter([])
2271            for name, entry in select.items():
2272                self.check_alias(name)
2273                entry = str(entry)
2274                entry_params = []
2275                pos = entry.find("%s")
2276                while pos != -1:
2277                    if pos == 0 or entry[pos - 1] != "%":
2278                        entry_params.append(next(param_iter))
2279                    pos = entry.find("%s", pos + 2)
2280                select_pairs[name] = (entry, entry_params)
2281            self.extra.update(select_pairs)
2282        if where or params:
2283            self.where.add(ExtraWhere(where, params), AND)
2284        if tables:
2285            self.extra_tables += tuple(tables)
2286        if order_by:
2287            self.extra_order_by = order_by
2288
2289    def clear_deferred_loading(self):
2290        """Remove any fields from the deferred loading set."""
2291        self.deferred_loading = (frozenset(), True)
2292
2293    def add_deferred_loading(self, field_names):
2294        """
2295        Add the given list of model field names to the set of fields to
2296        exclude from loading from the database when automatic column selection
2297        is done. Add the new field names to any existing field names that
2298        are deferred (or removed from any existing field names that are marked
2299        as the only ones for immediate loading).
2300        """
2301        # Fields on related models are stored in the literal double-underscore
2302        # format, so that we can use a set datastructure. We do the foo__bar
2303        # splitting and handling when computing the SQL column names (as part of
2304        # get_columns()).
2305        existing, defer = self.deferred_loading
2306        if defer:
2307            # Add to existing deferred names.
2308            self.deferred_loading = existing.union(field_names), True
2309        else:
2310            # Remove names from the set of any existing "immediate load" names.
2311            if new_existing := existing.difference(field_names):
2312                self.deferred_loading = new_existing, False
2313            else:
2314                self.clear_deferred_loading()
2315                if new_only := set(field_names).difference(existing):
2316                    self.deferred_loading = new_only, True
2317
2318    def add_immediate_loading(self, field_names):
2319        """
2320        Add the given list of model field names to the set of fields to
2321        retrieve when the SQL is executed ("immediate loading" fields). The
2322        field names replace any existing immediate loading field names. If
2323        there are field names already specified for deferred loading, remove
2324        those names from the new field_names before storing the new names
2325        for immediate loading. (That is, immediate loading overrides any
2326        existing immediate values, but respects existing deferrals.)
2327        """
2328        existing, defer = self.deferred_loading
2329        field_names = set(field_names)
2330        if "pk" in field_names:
2331            field_names.remove("pk")
2332            field_names.add(self.get_meta().pk.name)
2333
2334        if defer:
2335            # Remove any existing deferred names from the current set before
2336            # setting the new names.
2337            self.deferred_loading = field_names.difference(existing), False
2338        else:
2339            # Replace any existing "immediate load" field names.
2340            self.deferred_loading = frozenset(field_names), False
2341
2342    def set_annotation_mask(self, names):
2343        """Set the mask of annotations that will be returned by the SELECT."""
2344        if names is None:
2345            self.annotation_select_mask = None
2346        else:
2347            self.annotation_select_mask = set(names)
2348        self._annotation_select_cache = None
2349
2350    def append_annotation_mask(self, names):
2351        if self.annotation_select_mask is not None:
2352            self.set_annotation_mask(self.annotation_select_mask.union(names))
2353
2354    def set_extra_mask(self, names):
2355        """
2356        Set the mask of extra select items that will be returned by SELECT.
2357        Don't remove them from the Query since they might be used later.
2358        """
2359        if names is None:
2360            self.extra_select_mask = None
2361        else:
2362            self.extra_select_mask = set(names)
2363        self._extra_select_cache = None
2364
2365    def set_values(self, fields):
2366        self.select_related = False
2367        self.clear_deferred_loading()
2368        self.clear_select_fields()
2369        self.has_select_fields = True
2370
2371        if fields:
2372            field_names = []
2373            extra_names = []
2374            annotation_names = []
2375            if not self.extra and not self.annotations:
2376                # Shortcut - if there are no extra or annotations, then
2377                # the values() clause must be just field names.
2378                field_names = list(fields)
2379            else:
2380                self.default_cols = False
2381                for f in fields:
2382                    if f in self.extra_select:
2383                        extra_names.append(f)
2384                    elif f in self.annotation_select:
2385                        annotation_names.append(f)
2386                    else:
2387                        field_names.append(f)
2388            self.set_extra_mask(extra_names)
2389            self.set_annotation_mask(annotation_names)
2390            selected = frozenset(field_names + extra_names + annotation_names)
2391        else:
2392            field_names = [f.attname for f in self.model._meta.concrete_fields]
2393            selected = frozenset(field_names)
2394        # Selected annotations must be known before setting the GROUP BY
2395        # clause.
2396        if self.group_by is True:
2397            self.add_fields(
2398                (f.attname for f in self.model._meta.concrete_fields), False
2399            )
2400            # Disable GROUP BY aliases to avoid orphaning references to the
2401            # SELECT clause which is about to be cleared.
2402            self.set_group_by(allow_aliases=False)
2403            self.clear_select_fields()
2404        elif self.group_by:
2405            # Resolve GROUP BY annotation references if they are not part of
2406            # the selected fields anymore.
2407            group_by = []
2408            for expr in self.group_by:
2409                if isinstance(expr, Ref) and expr.refs not in selected:
2410                    expr = self.annotations[expr.refs]
2411                group_by.append(expr)
2412            self.group_by = tuple(group_by)
2413
2414        self.values_select = tuple(field_names)
2415        self.add_fields(field_names, True)
2416
2417    @property
2418    def annotation_select(self):
2419        """
2420        Return the dictionary of aggregate columns that are not masked and
2421        should be used in the SELECT clause. Cache this result for performance.
2422        """
2423        if self._annotation_select_cache is not None:
2424            return self._annotation_select_cache
2425        elif not self.annotations:
2426            return {}
2427        elif self.annotation_select_mask is not None:
2428            self._annotation_select_cache = {
2429                k: v
2430                for k, v in self.annotations.items()
2431                if k in self.annotation_select_mask
2432            }
2433            return self._annotation_select_cache
2434        else:
2435            return self.annotations
2436
2437    @property
2438    def extra_select(self):
2439        if self._extra_select_cache is not None:
2440            return self._extra_select_cache
2441        if not self.extra:
2442            return {}
2443        elif self.extra_select_mask is not None:
2444            self._extra_select_cache = {
2445                k: v for k, v in self.extra.items() if k in self.extra_select_mask
2446            }
2447            return self._extra_select_cache
2448        else:
2449            return self.extra
2450
2451    def trim_start(self, names_with_path):
2452        """
2453        Trim joins from the start of the join path. The candidates for trim
2454        are the PathInfos in names_with_path structure that are m2m joins.
2455
2456        Also set the select column so the start matches the join.
2457
2458        This method is meant to be used for generating the subquery joins &
2459        cols in split_exclude().
2460
2461        Return a lookup usable for doing outerq.filter(lookup=self) and a
2462        boolean indicating if the joins in the prefix contain a LEFT OUTER join.
2463        _"""
2464        all_paths = []
2465        for _, paths in names_with_path:
2466            all_paths.extend(paths)
2467        contains_louter = False
2468        # Trim and operate only on tables that were generated for
2469        # the lookup part of the query. That is, avoid trimming
2470        # joins generated for F() expressions.
2471        lookup_tables = [
2472            t for t in self.alias_map if t in self._lookup_joins or t == self.base_table
2473        ]
2474        for trimmed_paths, path in enumerate(all_paths):
2475            if path.m2m:
2476                break
2477            if self.alias_map[lookup_tables[trimmed_paths + 1]].join_type == LOUTER:
2478                contains_louter = True
2479            alias = lookup_tables[trimmed_paths]
2480            self.unref_alias(alias)
2481        # The path.join_field is a Rel, lets get the other side's field
2482        join_field = path.join_field.field
2483        # Build the filter prefix.
2484        paths_in_prefix = trimmed_paths
2485        trimmed_prefix = []
2486        for name, path in names_with_path:
2487            if paths_in_prefix - len(path) < 0:
2488                break
2489            trimmed_prefix.append(name)
2490            paths_in_prefix -= len(path)
2491        trimmed_prefix.append(join_field.foreign_related_fields[0].name)
2492        trimmed_prefix = LOOKUP_SEP.join(trimmed_prefix)
2493        # Lets still see if we can trim the first join from the inner query
2494        # (that is, self). We can't do this for:
2495        # - LEFT JOINs because we would miss those rows that have nothing on
2496        #   the outer side,
2497        # - INNER JOINs from filtered relations because we would miss their
2498        #   filters.
2499        first_join = self.alias_map[lookup_tables[trimmed_paths + 1]]
2500        if first_join.join_type != LOUTER and not first_join.filtered_relation:
2501            select_fields = [r[0] for r in join_field.related_fields]
2502            select_alias = lookup_tables[trimmed_paths + 1]
2503            self.unref_alias(lookup_tables[trimmed_paths])
2504            extra_restriction = join_field.get_extra_restriction(
2505                None, lookup_tables[trimmed_paths + 1]
2506            )
2507            if extra_restriction:
2508                self.where.add(extra_restriction, AND)
2509        else:
2510            # TODO: It might be possible to trim more joins from the start of the
2511            # inner query if it happens to have a longer join chain containing the
2512            # values in select_fields. Lets punt this one for now.
2513            select_fields = [r[1] for r in join_field.related_fields]
2514            select_alias = lookup_tables[trimmed_paths]
2515        # The found starting point is likely a join_class instead of a
2516        # base_table_class reference. But the first entry in the query's FROM
2517        # clause must not be a JOIN.
2518        for table in self.alias_map:
2519            if self.alias_refcount[table] > 0:
2520                self.alias_map[table] = self.base_table_class(
2521                    self.alias_map[table].table_name,
2522                    table,
2523                )
2524                break
2525        self.set_select([f.get_col(select_alias) for f in select_fields])
2526        return trimmed_prefix, contains_louter
2527
2528    def is_nullable(self, field):
2529        """
2530        Check if the given field should be treated as nullable.
2531
2532        Some backends treat '' as null and Plain treats such fields as
2533        nullable for those backends. In such situations field.null can be
2534        False even if we should treat the field as nullable.
2535        """
2536        # We need to use DEFAULT_DB_ALIAS here, as QuerySet does not have
2537        # (nor should it have) knowledge of which connection is going to be
2538        # used. The proper fix would be to defer all decisions where
2539        # is_nullable() is needed to the compiler stage, but that is not easy
2540        # to do currently.
2541        return field.null or (
2542            field.empty_strings_allowed
2543            and connections[DEFAULT_DB_ALIAS].features.interprets_empty_strings_as_nulls
2544        )
2545
2546
2547def get_order_dir(field, default="ASC"):
2548    """
2549    Return the field name and direction for an order specification. For
2550    example, '-foo' is returned as ('foo', 'DESC').
2551
2552    The 'default' param is used to indicate which way no prefix (or a '+'
2553    prefix) should sort. The '-' prefix always sorts the opposite way.
2554    """
2555    dirn = ORDER_DIR[default]
2556    if field[0] == "-":
2557        return field[1:], dirn[1]
2558    return field, dirn[0]
2559
2560
2561class JoinPromoter:
2562    """
2563    A class to abstract away join promotion problems for complex filter
2564    conditions.
2565    """
2566
2567    def __init__(self, connector, num_children, negated):
2568        self.connector = connector
2569        self.negated = negated
2570        if self.negated:
2571            if connector == AND:
2572                self.effective_connector = OR
2573            else:
2574                self.effective_connector = AND
2575        else:
2576            self.effective_connector = self.connector
2577        self.num_children = num_children
2578        # Maps of table alias to how many times it is seen as required for
2579        # inner and/or outer joins.
2580        self.votes = Counter()
2581
2582    def __repr__(self):
2583        return (
2584            f"{self.__class__.__qualname__}(connector={self.connector!r}, "
2585            f"num_children={self.num_children!r}, negated={self.negated!r})"
2586        )
2587
2588    def add_votes(self, votes):
2589        """
2590        Add single vote per item to self.votes. Parameter can be any
2591        iterable.
2592        """
2593        self.votes.update(votes)
2594
2595    def update_join_types(self, query):
2596        """
2597        Change join types so that the generated query is as efficient as
2598        possible, but still correct. So, change as many joins as possible
2599        to INNER, but don't make OUTER joins INNER if that could remove
2600        results from the query.
2601        """
2602        to_promote = set()
2603        to_demote = set()
2604        # The effective_connector is used so that NOT (a AND b) is treated
2605        # similarly to (a OR b) for join promotion.
2606        for table, votes in self.votes.items():
2607            # We must use outer joins in OR case when the join isn't contained
2608            # in all of the joins. Otherwise the INNER JOIN itself could remove
2609            # valid results. Consider the case where a model with rel_a and
2610            # rel_b relations is queried with rel_a__col=1 | rel_b__col=2. Now,
2611            # if rel_a join doesn't produce any results is null (for example
2612            # reverse foreign key or null value in direct foreign key), and
2613            # there is a matching row in rel_b with col=2, then an INNER join
2614            # to rel_a would remove a valid match from the query. So, we need
2615            # to promote any existing INNER to LOUTER (it is possible this
2616            # promotion in turn will be demoted later on).
2617            if self.effective_connector == OR and votes < self.num_children:
2618                to_promote.add(table)
2619            # If connector is AND and there is a filter that can match only
2620            # when there is a joinable row, then use INNER. For example, in
2621            # rel_a__col=1 & rel_b__col=2, if either of the rels produce NULL
2622            # as join output, then the col=1 or col=2 can't match (as
2623            # NULL=anything is always false).
2624            # For the OR case, if all children voted for a join to be inner,
2625            # then we can use INNER for the join. For example:
2626            #     (rel_a__col__icontains=Alex | rel_a__col__icontains=Russell)
2627            # then if rel_a doesn't produce any rows, the whole condition
2628            # can't match. Hence we can safely use INNER join.
2629            if self.effective_connector == AND or (
2630                self.effective_connector == OR and votes == self.num_children
2631            ):
2632                to_demote.add(table)
2633            # Finally, what happens in cases where we have:
2634            #    (rel_a__col=1|rel_b__col=2) & rel_a__col__gte=0
2635            # Now, we first generate the OR clause, and promote joins for it
2636            # in the first if branch above. Both rel_a and rel_b are promoted
2637            # to LOUTER joins. After that we do the AND case. The OR case
2638            # voted no inner joins but the rel_a__col__gte=0 votes inner join
2639            # for rel_a. We demote it back to INNER join (in AND case a single
2640            # vote is enough). The demotion is OK, if rel_a doesn't produce
2641            # rows, then the rel_a__col__gte=0 clause can't be true, and thus
2642            # the whole clause must be false. So, it is safe to use INNER
2643            # join.
2644            # Note that in this example we could just as well have the __gte
2645            # clause and the OR clause swapped. Or we could replace the __gte
2646            # clause with an OR clause containing rel_a__col=1|rel_a__col=2,
2647            # and again we could safely demote to INNER.
2648        query.promote_joins(to_promote)
2649        query.demote_joins(to_demote)
2650        return to_demote